1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_MM_H
3 #define _LINUX_SCHED_MM_H
5 #include <linux/kernel.h>
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/mm_types.h>
10 #include <linux/sync_core.h>
11 #include <linux/sched/coredump.h>
14 * Routines for handling mm_structs
16 extern struct mm_struct
*mm_alloc(void);
19 * mmgrab() - Pin a &struct mm_struct.
20 * @mm: The &struct mm_struct to pin.
22 * Make sure that @mm will not get freed even after the owning task
23 * exits. This doesn't guarantee that the associated address space
24 * will still exist later on and mmget_not_zero() has to be used before
27 * This is a preferred way to pin @mm for a longer/unbounded amount
30 * Use mmdrop() to release the reference acquired by mmgrab().
32 * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
33 * of &mm_struct.mm_count vs &mm_struct.mm_users.
35 static inline void mmgrab(struct mm_struct
*mm
)
37 atomic_inc(&mm
->mm_count
);
40 static inline void smp_mb__after_mmgrab(void)
42 smp_mb__after_atomic();
45 extern void __mmdrop(struct mm_struct
*mm
);
47 static inline void mmdrop(struct mm_struct
*mm
)
50 * The implicit full barrier implied by atomic_dec_and_test() is
51 * required by the membarrier system call before returning to
52 * user-space, after storing to rq->curr.
54 if (unlikely(atomic_dec_and_test(&mm
->mm_count
)))
58 #ifdef CONFIG_PREEMPT_RT
60 * RCU callback for delayed mm drop. Not strictly RCU, but call_rcu() is
61 * by far the least expensive way to do that.
63 static inline void __mmdrop_delayed(struct rcu_head
*rhp
)
65 struct mm_struct
*mm
= container_of(rhp
, struct mm_struct
, delayed_drop
);
71 * Invoked from finish_task_switch(). Delegates the heavy lifting on RT
74 static inline void mmdrop_sched(struct mm_struct
*mm
)
76 /* Provides a full memory barrier. See mmdrop() */
77 if (atomic_dec_and_test(&mm
->mm_count
))
78 call_rcu(&mm
->delayed_drop
, __mmdrop_delayed
);
81 static inline void mmdrop_sched(struct mm_struct
*mm
)
87 /* Helpers for lazy TLB mm refcounting */
88 static inline void mmgrab_lazy_tlb(struct mm_struct
*mm
)
90 if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT
))
94 static inline void mmdrop_lazy_tlb(struct mm_struct
*mm
)
96 if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT
)) {
100 * mmdrop_lazy_tlb must provide a full memory barrier, see the
101 * membarrier comment finish_task_switch which relies on this.
107 static inline void mmdrop_lazy_tlb_sched(struct mm_struct
*mm
)
109 if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT
))
112 smp_mb(); /* see mmdrop_lazy_tlb() above */
116 * mmget() - Pin the address space associated with a &struct mm_struct.
117 * @mm: The address space to pin.
119 * Make sure that the address space of the given &struct mm_struct doesn't
120 * go away. This does not protect against parts of the address space being
121 * modified or freed, however.
123 * Never use this function to pin this address space for an
124 * unbounded/indefinite amount of time.
126 * Use mmput() to release the reference acquired by mmget().
128 * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
129 * of &mm_struct.mm_count vs &mm_struct.mm_users.
131 static inline void mmget(struct mm_struct
*mm
)
133 atomic_inc(&mm
->mm_users
);
136 static inline bool mmget_not_zero(struct mm_struct
*mm
)
138 return atomic_inc_not_zero(&mm
->mm_users
);
141 /* mmput gets rid of the mappings and all user-space */
142 extern void mmput(struct mm_struct
*);
144 /* same as above but performs the slow path from the async context. Can
145 * be called from the atomic context as well
147 void mmput_async(struct mm_struct
*);
150 /* Grab a reference to a task's mm, if it is not already going away */
151 extern struct mm_struct
*get_task_mm(struct task_struct
*task
);
153 * Grab a reference to a task's mm, if it is not already going away
154 * and ptrace_may_access with the mode parameter passed to it
157 extern struct mm_struct
*mm_access(struct task_struct
*task
, unsigned int mode
);
158 /* Remove the current tasks stale references to the old mm_struct on exit() */
159 extern void exit_mm_release(struct task_struct
*, struct mm_struct
*);
160 /* Remove the current tasks stale references to the old mm_struct on exec() */
161 extern void exec_mm_release(struct task_struct
*, struct mm_struct
*);
164 extern void mm_update_next_owner(struct mm_struct
*mm
);
166 static inline void mm_update_next_owner(struct mm_struct
*mm
)
169 #endif /* CONFIG_MEMCG */
172 #ifndef arch_get_mmap_end
173 #define arch_get_mmap_end(addr, len, flags) (TASK_SIZE)
176 #ifndef arch_get_mmap_base
177 #define arch_get_mmap_base(addr, base) (base)
180 extern void arch_pick_mmap_layout(struct mm_struct
*mm
,
181 struct rlimit
*rlim_stack
);
184 arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
185 unsigned long len
, unsigned long pgoff
,
186 unsigned long flags
, vm_flags_t vm_flags
);
188 arch_get_unmapped_area_topdown(struct file
*filp
, unsigned long addr
,
189 unsigned long len
, unsigned long pgoff
,
190 unsigned long flags
, vm_flags_t
);
192 unsigned long mm_get_unmapped_area(struct mm_struct
*mm
, struct file
*filp
,
193 unsigned long addr
, unsigned long len
,
194 unsigned long pgoff
, unsigned long flags
);
196 unsigned long mm_get_unmapped_area_vmflags(struct mm_struct
*mm
,
202 vm_flags_t vm_flags
);
205 generic_get_unmapped_area(struct file
*filp
, unsigned long addr
,
206 unsigned long len
, unsigned long pgoff
,
207 unsigned long flags
, vm_flags_t vm_flags
);
209 generic_get_unmapped_area_topdown(struct file
*filp
, unsigned long addr
,
210 unsigned long len
, unsigned long pgoff
,
211 unsigned long flags
, vm_flags_t vm_flags
);
213 static inline void arch_pick_mmap_layout(struct mm_struct
*mm
,
214 struct rlimit
*rlim_stack
) {}
217 static inline bool in_vfork(struct task_struct
*tsk
)
222 * need RCU to access ->real_parent if CLONE_VM was used along with
225 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
228 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
229 * ->real_parent is not necessarily the task doing vfork(), so in
230 * theory we can't rely on task_lock() if we want to dereference it.
232 * And in this case we can't trust the real_parent->mm == tsk->mm
233 * check, it can be false negative. But we do not care, if init or
234 * another oom-unkillable task does this it should blame itself.
237 ret
= tsk
->vfork_done
&&
238 rcu_dereference(tsk
->real_parent
)->mm
== tsk
->mm
;
245 * Applies per-task gfp context to the given allocation flags.
246 * PF_MEMALLOC_NOIO implies GFP_NOIO
247 * PF_MEMALLOC_NOFS implies GFP_NOFS
248 * PF_MEMALLOC_PIN implies !GFP_MOVABLE
250 static inline gfp_t
current_gfp_context(gfp_t flags
)
252 unsigned int pflags
= READ_ONCE(current
->flags
);
254 if (unlikely(pflags
& (PF_MEMALLOC_NOIO
| PF_MEMALLOC_NOFS
| PF_MEMALLOC_PIN
))) {
256 * NOIO implies both NOIO and NOFS and it is a weaker context
257 * so always make sure it makes precedence
259 if (pflags
& PF_MEMALLOC_NOIO
)
260 flags
&= ~(__GFP_IO
| __GFP_FS
);
261 else if (pflags
& PF_MEMALLOC_NOFS
)
264 if (pflags
& PF_MEMALLOC_PIN
)
265 flags
&= ~__GFP_MOVABLE
;
270 #ifdef CONFIG_LOCKDEP
271 extern void __fs_reclaim_acquire(unsigned long ip
);
272 extern void __fs_reclaim_release(unsigned long ip
);
273 extern void fs_reclaim_acquire(gfp_t gfp_mask
);
274 extern void fs_reclaim_release(gfp_t gfp_mask
);
276 static inline void __fs_reclaim_acquire(unsigned long ip
) { }
277 static inline void __fs_reclaim_release(unsigned long ip
) { }
278 static inline void fs_reclaim_acquire(gfp_t gfp_mask
) { }
279 static inline void fs_reclaim_release(gfp_t gfp_mask
) { }
282 /* Any memory-allocation retry loop should use
283 * memalloc_retry_wait(), and pass the flags for the most
284 * constrained allocation attempt that might have failed.
285 * This provides useful documentation of where loops are,
286 * and a central place to fine tune the waiting as the MM
287 * implementation changes.
289 static inline void memalloc_retry_wait(gfp_t gfp_flags
)
291 /* We use io_schedule_timeout because waiting for memory
292 * typically included waiting for dirty pages to be
293 * written out, which requires IO.
295 __set_current_state(TASK_UNINTERRUPTIBLE
);
296 gfp_flags
= current_gfp_context(gfp_flags
);
297 if (gfpflags_allow_blocking(gfp_flags
) &&
298 !(gfp_flags
& __GFP_NORETRY
))
299 /* Probably waited already, no need for much more */
300 io_schedule_timeout(1);
302 /* Probably didn't wait, and has now released a lock,
303 * so now is a good time to wait
305 io_schedule_timeout(HZ
/50);
309 * might_alloc - Mark possible allocation sites
310 * @gfp_mask: gfp_t flags that would be used to allocate
312 * Similar to might_sleep() and other annotations, this can be used in functions
313 * that might allocate, but often don't. Compiles to nothing without
314 * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking.
316 static inline void might_alloc(gfp_t gfp_mask
)
318 fs_reclaim_acquire(gfp_mask
);
319 fs_reclaim_release(gfp_mask
);
321 might_sleep_if(gfpflags_allow_blocking(gfp_mask
));
325 * memalloc_flags_save - Add a PF_* flag to current->flags, save old value
327 * This allows PF_* flags to be conveniently added, irrespective of current
328 * value, and then the old version restored with memalloc_flags_restore().
330 static inline unsigned memalloc_flags_save(unsigned flags
)
332 unsigned oldflags
= ~current
->flags
& flags
;
333 current
->flags
|= flags
;
337 static inline void memalloc_flags_restore(unsigned flags
)
339 current
->flags
&= ~flags
;
343 * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
345 * This functions marks the beginning of the GFP_NOIO allocation scope.
346 * All further allocations will implicitly drop __GFP_IO flag and so
347 * they are safe for the IO critical section from the allocation recursion
348 * point of view. Use memalloc_noio_restore to end the scope with flags
349 * returned by this function.
351 * Context: This function is safe to be used from any context.
352 * Return: The saved flags to be passed to memalloc_noio_restore.
354 static inline unsigned int memalloc_noio_save(void)
356 return memalloc_flags_save(PF_MEMALLOC_NOIO
);
360 * memalloc_noio_restore - Ends the implicit GFP_NOIO scope.
361 * @flags: Flags to restore.
363 * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
364 * Always make sure that the given flags is the return value from the
365 * pairing memalloc_noio_save call.
367 static inline void memalloc_noio_restore(unsigned int flags
)
369 memalloc_flags_restore(flags
);
373 * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope.
375 * This functions marks the beginning of the GFP_NOFS allocation scope.
376 * All further allocations will implicitly drop __GFP_FS flag and so
377 * they are safe for the FS critical section from the allocation recursion
378 * point of view. Use memalloc_nofs_restore to end the scope with flags
379 * returned by this function.
381 * Context: This function is safe to be used from any context.
382 * Return: The saved flags to be passed to memalloc_nofs_restore.
384 static inline unsigned int memalloc_nofs_save(void)
386 return memalloc_flags_save(PF_MEMALLOC_NOFS
);
390 * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope.
391 * @flags: Flags to restore.
393 * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
394 * Always make sure that the given flags is the return value from the
395 * pairing memalloc_nofs_save call.
397 static inline void memalloc_nofs_restore(unsigned int flags
)
399 memalloc_flags_restore(flags
);
403 * memalloc_noreclaim_save - Marks implicit __GFP_MEMALLOC scope.
405 * This function marks the beginning of the __GFP_MEMALLOC allocation scope.
406 * All further allocations will implicitly add the __GFP_MEMALLOC flag, which
407 * prevents entering reclaim and allows access to all memory reserves. This
408 * should only be used when the caller guarantees the allocation will allow more
409 * memory to be freed very shortly, i.e. it needs to allocate some memory in
410 * the process of freeing memory, and cannot reclaim due to potential recursion.
412 * Users of this scope have to be extremely careful to not deplete the reserves
413 * completely and implement a throttling mechanism which controls the
414 * consumption of the reserve based on the amount of freed memory. Usage of a
415 * pre-allocated pool (e.g. mempool) should be always considered before using
418 * Individual allocations under the scope can opt out using __GFP_NOMEMALLOC
420 * Context: This function should not be used in an interrupt context as that one
421 * does not give PF_MEMALLOC access to reserves.
422 * See __gfp_pfmemalloc_flags().
423 * Return: The saved flags to be passed to memalloc_noreclaim_restore.
425 static inline unsigned int memalloc_noreclaim_save(void)
427 return memalloc_flags_save(PF_MEMALLOC
);
431 * memalloc_noreclaim_restore - Ends the implicit __GFP_MEMALLOC scope.
432 * @flags: Flags to restore.
434 * Ends the implicit __GFP_MEMALLOC scope started by memalloc_noreclaim_save
435 * function. Always make sure that the given flags is the return value from the
436 * pairing memalloc_noreclaim_save call.
438 static inline void memalloc_noreclaim_restore(unsigned int flags
)
440 memalloc_flags_restore(flags
);
444 * memalloc_pin_save - Marks implicit ~__GFP_MOVABLE scope.
446 * This function marks the beginning of the ~__GFP_MOVABLE allocation scope.
447 * All further allocations will implicitly remove the __GFP_MOVABLE flag, which
448 * will constraint the allocations to zones that allow long term pinning, i.e.
449 * not ZONE_MOVABLE zones.
451 * Return: The saved flags to be passed to memalloc_pin_restore.
453 static inline unsigned int memalloc_pin_save(void)
455 return memalloc_flags_save(PF_MEMALLOC_PIN
);
459 * memalloc_pin_restore - Ends the implicit ~__GFP_MOVABLE scope.
460 * @flags: Flags to restore.
462 * Ends the implicit ~__GFP_MOVABLE scope started by memalloc_pin_save function.
463 * Always make sure that the given flags is the return value from the pairing
464 * memalloc_pin_save call.
466 static inline void memalloc_pin_restore(unsigned int flags
)
468 memalloc_flags_restore(flags
);
472 DECLARE_PER_CPU(struct mem_cgroup
*, int_active_memcg
);
474 * set_active_memcg - Starts the remote memcg charging scope.
475 * @memcg: memcg to charge.
477 * This function marks the beginning of the remote memcg charging scope. All the
478 * __GFP_ACCOUNT allocations till the end of the scope will be charged to the
481 * Please, make sure that caller has a reference to the passed memcg structure,
482 * so its lifetime is guaranteed to exceed the scope between two
483 * set_active_memcg() calls.
485 * NOTE: This function can nest. Users must save the return value and
486 * reset the previous value after their own charging scope is over.
488 static inline struct mem_cgroup
*
489 set_active_memcg(struct mem_cgroup
*memcg
)
491 struct mem_cgroup
*old
;
494 old
= this_cpu_read(int_active_memcg
);
495 this_cpu_write(int_active_memcg
, memcg
);
497 old
= current
->active_memcg
;
498 current
->active_memcg
= memcg
;
504 static inline struct mem_cgroup
*
505 set_active_memcg(struct mem_cgroup
*memcg
)
511 #ifdef CONFIG_MEMBARRIER
513 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY
= (1U << 0),
514 MEMBARRIER_STATE_PRIVATE_EXPEDITED
= (1U << 1),
515 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY
= (1U << 2),
516 MEMBARRIER_STATE_GLOBAL_EXPEDITED
= (1U << 3),
517 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY
= (1U << 4),
518 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE
= (1U << 5),
519 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY
= (1U << 6),
520 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ
= (1U << 7),
524 MEMBARRIER_FLAG_SYNC_CORE
= (1U << 0),
525 MEMBARRIER_FLAG_RSEQ
= (1U << 1),
528 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
529 #include <asm/membarrier.h>
532 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct
*mm
)
534 if (current
->mm
!= mm
)
536 if (likely(!(atomic_read(&mm
->membarrier_state
) &
537 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE
)))
539 sync_core_before_usermode();
542 extern void membarrier_exec_mmap(struct mm_struct
*mm
);
544 extern void membarrier_update_current_mm(struct mm_struct
*next_mm
);
547 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
548 static inline void membarrier_arch_switch_mm(struct mm_struct
*prev
,
549 struct mm_struct
*next
,
550 struct task_struct
*tsk
)
554 static inline void membarrier_exec_mmap(struct mm_struct
*mm
)
557 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct
*mm
)
560 static inline void membarrier_update_current_mm(struct mm_struct
*next_mm
)
565 #endif /* _LINUX_SCHED_MM_H */