1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_MM_H
3 #define _LINUX_SCHED_MM_H
4
5 #include <linux/kernel.h>
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/mm_types.h>
9 #include <linux/gfp.h>
10 #include <linux/sync_core.h>
11
12 /*
13 * Routines for handling mm_structs
14 */
15 extern struct mm_struct *mm_alloc(void);
16
17 /**
18 * mmgrab() - Pin a &struct mm_struct.
19 * @mm: The &struct mm_struct to pin.
20 *
21 * Make sure that @mm will not get freed even after the owning task
22 * exits. This doesn't guarantee that the associated address space
23 * will still exist later on and mmget_not_zero() has to be used before
24 * accessing it.
25 *
26 * This is a preferred way to pin @mm for a longer/unbounded amount
27 * of time.
28 *
29 * Use mmdrop() to release the reference acquired by mmgrab().
30 *
31 * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
32 * of &mm_struct.mm_count vs &mm_struct.mm_users.
33 */
mmgrab(struct mm_struct * mm)34 static inline void mmgrab(struct mm_struct *mm)
35 {
36 atomic_inc(&mm->mm_count);
37 }
38
39 extern void __mmdrop(struct mm_struct *mm);
40
mmdrop(struct mm_struct * mm)41 static inline void mmdrop(struct mm_struct *mm)
42 {
43 /*
44 * The implicit full barrier implied by atomic_dec_and_test() is
45 * required by the membarrier system call before returning to
46 * user-space, after storing to rq->curr.
47 */
48 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
49 __mmdrop(mm);
50 }
51
52 #ifdef CONFIG_PREEMPT_RT
53 /*
54 * RCU callback for delayed mm drop. Not strictly RCU, but call_rcu() is
55 * by far the least expensive way to do that.
56 */
__mmdrop_delayed(struct rcu_head * rhp)57 static inline void __mmdrop_delayed(struct rcu_head *rhp)
58 {
59 struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
60
61 __mmdrop(mm);
62 }
63
64 /*
65 * Invoked from finish_task_switch(). Delegates the heavy lifting on RT
66 * kernels via RCU.
67 */
mmdrop_sched(struct mm_struct * mm)68 static inline void mmdrop_sched(struct mm_struct *mm)
69 {
70 /* Provides a full memory barrier. See mmdrop() */
71 if (atomic_dec_and_test(&mm->mm_count))
72 call_rcu(&mm->delayed_drop, __mmdrop_delayed);
73 }
74 #else
mmdrop_sched(struct mm_struct * mm)75 static inline void mmdrop_sched(struct mm_struct *mm)
76 {
77 mmdrop(mm);
78 }
79 #endif
80
81 /**
82 * mmget() - Pin the address space associated with a &struct mm_struct.
83 * @mm: The address space to pin.
84 *
85 * Make sure that the address space of the given &struct mm_struct doesn't
86 * go away. This does not protect against parts of the address space being
87 * modified or freed, however.
88 *
89 * Never use this function to pin this address space for an
90 * unbounded/indefinite amount of time.
91 *
92 * Use mmput() to release the reference acquired by mmget().
93 *
94 * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
95 * of &mm_struct.mm_count vs &mm_struct.mm_users.
96 */
mmget(struct mm_struct * mm)97 static inline void mmget(struct mm_struct *mm)
98 {
99 atomic_inc(&mm->mm_users);
100 }
101
mmget_not_zero(struct mm_struct * mm)102 static inline bool mmget_not_zero(struct mm_struct *mm)
103 {
104 return atomic_inc_not_zero(&mm->mm_users);
105 }
106
107 /* mmput gets rid of the mappings and all user-space */
108 extern void mmput(struct mm_struct *);
109 #ifdef CONFIG_MMU
110 /* same as above but performs the slow path from the async context. Can
111 * be called from the atomic context as well
112 */
113 void mmput_async(struct mm_struct *);
114 #endif
115
116 /* Grab a reference to a task's mm, if it is not already going away */
117 extern struct mm_struct *get_task_mm(struct task_struct *task);
118 /*
119 * Grab a reference to a task's mm, if it is not already going away
120 * and ptrace_may_access with the mode parameter passed to it
121 * succeeds.
122 */
123 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
124 /* Remove the current tasks stale references to the old mm_struct on exit() */
125 extern void exit_mm_release(struct task_struct *, struct mm_struct *);
126 /* Remove the current tasks stale references to the old mm_struct on exec() */
127 extern void exec_mm_release(struct task_struct *, struct mm_struct *);
128
129 #ifdef CONFIG_MEMCG
130 extern void mm_update_next_owner(struct mm_struct *mm);
131 #else
mm_update_next_owner(struct mm_struct * mm)132 static inline void mm_update_next_owner(struct mm_struct *mm)
133 {
134 }
135 #endif /* CONFIG_MEMCG */
136
137 #ifdef CONFIG_MMU
138 extern void arch_pick_mmap_layout(struct mm_struct *mm,
139 struct rlimit *rlim_stack);
140 extern unsigned long
141 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
142 unsigned long, unsigned long);
143 extern unsigned long
144 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
145 unsigned long len, unsigned long pgoff,
146 unsigned long flags);
147 #else
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)148 static inline void arch_pick_mmap_layout(struct mm_struct *mm,
149 struct rlimit *rlim_stack) {}
150 #endif
151
in_vfork(struct task_struct * tsk)152 static inline bool in_vfork(struct task_struct *tsk)
153 {
154 bool ret;
155
156 /*
157 * need RCU to access ->real_parent if CLONE_VM was used along with
158 * CLONE_PARENT.
159 *
160 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
161 * imply CLONE_VM
162 *
163 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
164 * ->real_parent is not necessarily the task doing vfork(), so in
165 * theory we can't rely on task_lock() if we want to dereference it.
166 *
167 * And in this case we can't trust the real_parent->mm == tsk->mm
168 * check, it can be false negative. But we do not care, if init or
169 * another oom-unkillable task does this it should blame itself.
170 */
171 rcu_read_lock();
172 ret = tsk->vfork_done &&
173 rcu_dereference(tsk->real_parent)->mm == tsk->mm;
174 rcu_read_unlock();
175
176 return ret;
177 }
178
179 /*
180 * Applies per-task gfp context to the given allocation flags.
181 * PF_MEMALLOC_NOIO implies GFP_NOIO
182 * PF_MEMALLOC_NOFS implies GFP_NOFS
183 * PF_MEMALLOC_PIN implies !GFP_MOVABLE
184 */
current_gfp_context(gfp_t flags)185 static inline gfp_t current_gfp_context(gfp_t flags)
186 {
187 unsigned int pflags = READ_ONCE(current->flags);
188
189 if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_PIN))) {
190 /*
191 * NOIO implies both NOIO and NOFS and it is a weaker context
192 * so always make sure it makes precedence
193 */
194 if (pflags & PF_MEMALLOC_NOIO)
195 flags &= ~(__GFP_IO | __GFP_FS);
196 else if (pflags & PF_MEMALLOC_NOFS)
197 flags &= ~__GFP_FS;
198
199 if (pflags & PF_MEMALLOC_PIN)
200 flags &= ~__GFP_MOVABLE;
201 }
202 return flags;
203 }
204
205 #ifdef CONFIG_LOCKDEP
206 extern void __fs_reclaim_acquire(unsigned long ip);
207 extern void __fs_reclaim_release(unsigned long ip);
208 extern void fs_reclaim_acquire(gfp_t gfp_mask);
209 extern void fs_reclaim_release(gfp_t gfp_mask);
210 #else
__fs_reclaim_acquire(unsigned long ip)211 static inline void __fs_reclaim_acquire(unsigned long ip) { }
__fs_reclaim_release(unsigned long ip)212 static inline void __fs_reclaim_release(unsigned long ip) { }
fs_reclaim_acquire(gfp_t gfp_mask)213 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
fs_reclaim_release(gfp_t gfp_mask)214 static inline void fs_reclaim_release(gfp_t gfp_mask) { }
215 #endif
216
217 /**
218 * might_alloc - Mark possible allocation sites
219 * @gfp_mask: gfp_t flags that would be used to allocate
220 *
221 * Similar to might_sleep() and other annotations, this can be used in functions
222 * that might allocate, but often don't. Compiles to nothing without
223 * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking.
224 */
might_alloc(gfp_t gfp_mask)225 static inline void might_alloc(gfp_t gfp_mask)
226 {
227 fs_reclaim_acquire(gfp_mask);
228 fs_reclaim_release(gfp_mask);
229
230 might_sleep_if(gfpflags_allow_blocking(gfp_mask));
231 }
232
233 /**
234 * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
235 *
236 * This functions marks the beginning of the GFP_NOIO allocation scope.
237 * All further allocations will implicitly drop __GFP_IO flag and so
238 * they are safe for the IO critical section from the allocation recursion
239 * point of view. Use memalloc_noio_restore to end the scope with flags
240 * returned by this function.
241 *
242 * This function is safe to be used from any context.
243 */
memalloc_noio_save(void)244 static inline unsigned int memalloc_noio_save(void)
245 {
246 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
247 current->flags |= PF_MEMALLOC_NOIO;
248 return flags;
249 }
250
251 /**
252 * memalloc_noio_restore - Ends the implicit GFP_NOIO scope.
253 * @flags: Flags to restore.
254 *
255 * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
256 * Always make sure that the given flags is the return value from the
257 * pairing memalloc_noio_save call.
258 */
memalloc_noio_restore(unsigned int flags)259 static inline void memalloc_noio_restore(unsigned int flags)
260 {
261 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
262 }
263
264 /**
265 * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope.
266 *
267 * This functions marks the beginning of the GFP_NOFS allocation scope.
268 * All further allocations will implicitly drop __GFP_FS flag and so
269 * they are safe for the FS critical section from the allocation recursion
270 * point of view. Use memalloc_nofs_restore to end the scope with flags
271 * returned by this function.
272 *
273 * This function is safe to be used from any context.
274 */
memalloc_nofs_save(void)275 static inline unsigned int memalloc_nofs_save(void)
276 {
277 unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
278 current->flags |= PF_MEMALLOC_NOFS;
279 return flags;
280 }
281
282 /**
283 * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope.
284 * @flags: Flags to restore.
285 *
286 * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
287 * Always make sure that the given flags is the return value from the
288 * pairing memalloc_nofs_save call.
289 */
memalloc_nofs_restore(unsigned int flags)290 static inline void memalloc_nofs_restore(unsigned int flags)
291 {
292 current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
293 }
294
memalloc_noreclaim_save(void)295 static inline unsigned int memalloc_noreclaim_save(void)
296 {
297 unsigned int flags = current->flags & PF_MEMALLOC;
298 current->flags |= PF_MEMALLOC;
299 return flags;
300 }
301
memalloc_noreclaim_restore(unsigned int flags)302 static inline void memalloc_noreclaim_restore(unsigned int flags)
303 {
304 current->flags = (current->flags & ~PF_MEMALLOC) | flags;
305 }
306
memalloc_pin_save(void)307 static inline unsigned int memalloc_pin_save(void)
308 {
309 unsigned int flags = current->flags & PF_MEMALLOC_PIN;
310
311 current->flags |= PF_MEMALLOC_PIN;
312 return flags;
313 }
314
memalloc_pin_restore(unsigned int flags)315 static inline void memalloc_pin_restore(unsigned int flags)
316 {
317 current->flags = (current->flags & ~PF_MEMALLOC_PIN) | flags;
318 }
319
320 #ifdef CONFIG_MEMCG
321 DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
322 /**
323 * set_active_memcg - Starts the remote memcg charging scope.
324 * @memcg: memcg to charge.
325 *
326 * This function marks the beginning of the remote memcg charging scope. All the
327 * __GFP_ACCOUNT allocations till the end of the scope will be charged to the
328 * given memcg.
329 *
330 * NOTE: This function can nest. Users must save the return value and
331 * reset the previous value after their own charging scope is over.
332 */
333 static inline struct mem_cgroup *
set_active_memcg(struct mem_cgroup * memcg)334 set_active_memcg(struct mem_cgroup *memcg)
335 {
336 struct mem_cgroup *old;
337
338 if (!in_task()) {
339 old = this_cpu_read(int_active_memcg);
340 this_cpu_write(int_active_memcg, memcg);
341 } else {
342 old = current->active_memcg;
343 current->active_memcg = memcg;
344 }
345
346 return old;
347 }
348 #else
349 static inline struct mem_cgroup *
set_active_memcg(struct mem_cgroup * memcg)350 set_active_memcg(struct mem_cgroup *memcg)
351 {
352 return NULL;
353 }
354 #endif
355
356 #ifdef CONFIG_MEMBARRIER
357 enum {
358 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
359 MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1),
360 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2),
361 MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
362 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
363 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
364 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = (1U << 6),
365 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = (1U << 7),
366 };
367
368 enum {
369 MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
370 MEMBARRIER_FLAG_RSEQ = (1U << 1),
371 };
372
373 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
374 #include <asm/membarrier.h>
375 #endif
376
membarrier_mm_sync_core_before_usermode(struct mm_struct * mm)377 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
378 {
379 if (current->mm != mm)
380 return;
381 if (likely(!(atomic_read(&mm->membarrier_state) &
382 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
383 return;
384 sync_core_before_usermode();
385 }
386
387 extern void membarrier_exec_mmap(struct mm_struct *mm);
388
389 extern void membarrier_update_current_mm(struct mm_struct *next_mm);
390
391 #else
392 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
membarrier_arch_switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)393 static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
394 struct mm_struct *next,
395 struct task_struct *tsk)
396 {
397 }
398 #endif
membarrier_exec_mmap(struct mm_struct * mm)399 static inline void membarrier_exec_mmap(struct mm_struct *mm)
400 {
401 }
membarrier_mm_sync_core_before_usermode(struct mm_struct * mm)402 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
403 {
404 }
membarrier_update_current_mm(struct mm_struct * next_mm)405 static inline void membarrier_update_current_mm(struct mm_struct *next_mm)
406 {
407 }
408 #endif
409
410 #endif /* _LINUX_SCHED_MM_H */
411