1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MM_TYPES_H
3 #define _LINUX_MM_TYPES_H
5 #include <linux/mm_types_task.h>
7 #include <linux/auxvec.h>
8 #include <linux/list.h>
9 #include <linux/spinlock.h>
10 #include <linux/rbtree.h>
11 #include <linux/rwsem.h>
12 #include <linux/completion.h>
13 #include <linux/cpumask.h>
14 #include <linux/uprobes.h>
15 #include <linux/page-flags-layout.h>
16 #include <linux/workqueue.h>
20 #ifndef AT_VECTOR_SIZE_ARCH
21 #define AT_VECTOR_SIZE_ARCH 0
23 #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
30 * Each physical page in the system has a struct page associated with
31 * it to keep track of whatever it is we are using the page for at the
32 * moment. Note that we have no way to track which tasks are using
33 * a page, though if it is a pagecache page, rmap structures can tell us
34 * who is mapping it. If you allocate the page using alloc_pages(), you
35 * can use some of the space in struct page for your own purposes.
37 * Pages that were once in the page cache may be found under the RCU lock
38 * even after they have been recycled to a different purpose. The page
39 * cache reads and writes some of the fields in struct page to pin the
40 * page before checking that it's still in the page cache. It is vital
41 * that all users of struct page:
42 * 1. Use the first word as PageFlags.
43 * 2. Clear or preserve bit 0 of page->compound_head. It is used as
44 * PageTail for compound pages, and the page cache must not see false
45 * positives. Some users put a pointer here (guaranteed to be at least
46 * 4-byte aligned), other users avoid using the field altogether.
47 * 3. page->_refcount must either not be used, or must be used in such a
48 * way that other CPUs temporarily incrementing and then decrementing the
49 * refcount does not cause problems. On receiving the page from
50 * alloc_pages(), the refcount will be positive.
51 * 4. Either preserve page->_mapcount or restore it to -1 before freeing it.
53 * If you allocate pages of order > 0, you can use the fields in the struct
54 * page associated with each page, but bear in mind that the pages may have
55 * been inserted individually into the page cache, so you must use the above
56 * four fields in a compatible way for each struct page.
58 * SLUB uses cmpxchg_double() to atomically update its freelist and
59 * counters. That requires that freelist & counters be adjacent and
60 * double-word aligned. We align all struct pages to double-word
61 * boundaries, and ensure that 'freelist' is aligned within the
64 #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
65 #define _struct_page_alignment __aligned(2 * sizeof(unsigned long))
66 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE)
67 #define _slub_counter_t unsigned long
69 #define _slub_counter_t unsigned int
71 #else /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */
72 #define _struct_page_alignment
73 #define _slub_counter_t unsigned int
74 #endif /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */
77 /* First double word block */
78 unsigned long flags
; /* Atomic flags, some possibly
79 * updated asynchronously */
81 /* See page-flags.h for the definition of PAGE_MAPPING_FLAGS */
82 struct address_space
*mapping
;
84 void *s_mem
; /* slab first object */
85 atomic_t compound_mapcount
; /* first tail page */
86 /* page_deferred_list().next -- second tail page */
89 /* Second double word */
91 pgoff_t index
; /* Our offset within mapping. */
92 void *freelist
; /* sl[aou]b first free object */
93 /* page_deferred_list().prev -- second tail page */
97 _slub_counter_t counters
;
98 unsigned int active
; /* SLAB */
104 int units
; /* SLOB */
106 struct { /* Page cache */
108 * Count of ptes mapped in mms, to show when
109 * page is mapped & limit reverse map searches.
111 * Extra information about page type may be
112 * stored here for pages that are never mapped,
113 * in which case the value MUST BE <= -2.
114 * See page-flags.h for more details.
119 * Usage count, *USE WRAPPER FUNCTION* when manual
120 * accounting. See page_ref.h
127 * WARNING: bit 0 of the first word encode PageTail(). That means
128 * the rest users of the storage space MUST NOT use the bit to
129 * avoid collision and false-positive PageTail().
132 struct list_head lru
; /* Pageout list, eg. active_list
133 * protected by zone_lru_lock !
134 * Can be used as a generic list
137 struct dev_pagemap
*pgmap
; /* ZONE_DEVICE pages are never on an
138 * lru or handled by a slab
139 * allocator, this points to the
140 * hosting device page map.
142 struct { /* slub per cpu partial pages */
143 struct page
*next
; /* Next partial slab */
145 int pages
; /* Nr of partial slabs left */
146 int pobjects
; /* Approximate # of objects */
153 struct rcu_head rcu_head
; /* Used by SLAB
154 * when destroying via RCU
156 /* Tail pages of compound page */
158 unsigned long compound_head
; /* If bit zero is set */
160 /* First tail page only */
161 unsigned char compound_dtor
;
162 unsigned char compound_order
;
163 /* two/six bytes available here */
166 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
168 unsigned long __pad
; /* do not overlay pmd_huge_pte
169 * with compound_head to avoid
170 * possible bit 0 collision.
172 pgtable_t pmd_huge_pte
; /* protected by page->ptl */
179 * Mapping-private opaque data:
180 * Usually used for buffer_heads if PagePrivate
181 * Used for swp_entry_t if PageSwapCache
182 * Indicates order in the buddy system if PageBuddy
184 unsigned long private;
185 #if USE_SPLIT_PTE_PTLOCKS
186 #if ALLOC_SPLIT_PTLOCKS
192 struct kmem_cache
*slab_cache
; /* SL[AU]B: Pointer to slab */
196 struct mem_cgroup
*mem_cgroup
;
200 * On machines where all RAM is mapped into kernel address space,
201 * we can simply calculate the virtual address. On machines with
202 * highmem some memory is mapped into kernel virtual memory
203 * dynamically, so we need a place to store that address.
204 * Note that this field could be 16 bits on x86 ... ;)
206 * Architectures with slow multiplication can define
207 * WANT_PAGE_VIRTUAL in asm/page.h
209 #if defined(WANT_PAGE_VIRTUAL)
210 void *virtual; /* Kernel virtual address (NULL if
211 not kmapped, ie. highmem) */
212 #endif /* WANT_PAGE_VIRTUAL */
214 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
217 } _struct_page_alignment
;
219 #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
220 #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
222 struct page_frag_cache
{
224 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
230 /* we maintain a pagecount bias, so that we dont dirty cache line
231 * containing page->_refcount every time we allocate a fragment.
233 unsigned int pagecnt_bias
;
237 typedef unsigned long vm_flags_t
;
240 * A region containing a mapping of a non-memory backed file under NOMMU
241 * conditions. These are held in a global tree and are pinned by the VMAs that
245 struct rb_node vm_rb
; /* link in global region tree */
246 vm_flags_t vm_flags
; /* VMA vm_flags */
247 unsigned long vm_start
; /* start address of region */
248 unsigned long vm_end
; /* region initialised to here */
249 unsigned long vm_top
; /* region allocated to here */
250 unsigned long vm_pgoff
; /* the offset in vm_file corresponding to vm_start */
251 struct file
*vm_file
; /* the backing file or NULL */
253 int vm_usage
; /* region usage count (access under nommu_region_sem) */
254 bool vm_icache_flushed
: 1; /* true if the icache has been flushed for
258 #ifdef CONFIG_USERFAULTFD
259 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
260 struct vm_userfaultfd_ctx
{
261 struct userfaultfd_ctx
*ctx
;
263 #else /* CONFIG_USERFAULTFD */
264 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
265 struct vm_userfaultfd_ctx
{};
266 #endif /* CONFIG_USERFAULTFD */
269 * This struct defines a memory VMM memory area. There is one of these
270 * per VM-area/task. A VM area is any part of the process virtual memory
271 * space that has a special rule for the page-fault handlers (ie a shared
272 * library, the executable area etc).
274 struct vm_area_struct
{
275 /* The first cache line has the info for VMA tree walking. */
277 unsigned long vm_start
; /* Our start address within vm_mm. */
278 unsigned long vm_end
; /* The first byte after our end address
281 /* linked list of VM areas per task, sorted by address */
282 struct vm_area_struct
*vm_next
, *vm_prev
;
284 struct rb_node vm_rb
;
287 * Largest free memory gap in bytes to the left of this VMA.
288 * Either between this VMA and vma->vm_prev, or between one of the
289 * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
290 * get_unmapped_area find a free area of the right size.
292 unsigned long rb_subtree_gap
;
294 /* Second cache line starts here. */
296 struct mm_struct
*vm_mm
; /* The address space we belong to. */
297 pgprot_t vm_page_prot
; /* Access permissions of this VMA. */
298 unsigned long vm_flags
; /* Flags, see mm.h. */
301 * For areas with an address space and backing store,
302 * linkage into the address_space->i_mmap interval tree.
306 unsigned long rb_subtree_last
;
310 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
311 * list, after a COW of one of the file pages. A MAP_SHARED vma
312 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
313 * or brk vma (with NULL file) can only be in an anon_vma list.
315 struct list_head anon_vma_chain
; /* Serialized by mmap_sem &
317 struct anon_vma
*anon_vma
; /* Serialized by page_table_lock */
319 /* Function pointers to deal with this struct. */
320 const struct vm_operations_struct
*vm_ops
;
322 /* Information about our backing store: */
323 unsigned long vm_pgoff
; /* Offset (within vm_file) in PAGE_SIZE
325 struct file
* vm_file
; /* File we map to (can be NULL). */
326 void * vm_private_data
; /* was vm_pte (shared mem) */
328 atomic_long_t swap_readahead_info
;
330 struct vm_region
*vm_region
; /* NOMMU mapping region */
333 struct mempolicy
*vm_policy
; /* NUMA policy for the VMA */
335 struct vm_userfaultfd_ctx vm_userfaultfd_ctx
;
336 } __randomize_layout
;
339 struct task_struct
*task
;
340 struct core_thread
*next
;
345 struct core_thread dumper
;
346 struct completion startup
;
351 struct vm_area_struct
*mmap
; /* list of VMAs */
352 struct rb_root mm_rb
;
353 u32 vmacache_seqnum
; /* per-thread vmacache */
355 unsigned long (*get_unmapped_area
) (struct file
*filp
,
356 unsigned long addr
, unsigned long len
,
357 unsigned long pgoff
, unsigned long flags
);
359 unsigned long mmap_base
; /* base of mmap area */
360 unsigned long mmap_legacy_base
; /* base of mmap area in bottom-up allocations */
361 #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
362 /* Base adresses for compatible mmap() */
363 unsigned long mmap_compat_base
;
364 unsigned long mmap_compat_legacy_base
;
366 unsigned long task_size
; /* size of task vm space */
367 unsigned long highest_vm_end
; /* highest vma end address */
371 * @mm_users: The number of users including userspace.
373 * Use mmget()/mmget_not_zero()/mmput() to modify. When this drops
374 * to 0 (i.e. when the task exits and there are no other temporary
375 * reference holders), we also release a reference on @mm_count
376 * (which may then free the &struct mm_struct if @mm_count also
382 * @mm_count: The number of references to &struct mm_struct
383 * (@mm_users count as 1).
385 * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
386 * &struct mm_struct is freed.
391 atomic_long_t pgtables_bytes
; /* PTE page table pages */
393 int map_count
; /* number of VMAs */
395 spinlock_t page_table_lock
; /* Protects page tables and some counters */
396 struct rw_semaphore mmap_sem
;
398 struct list_head mmlist
; /* List of maybe swapped mm's. These are globally strung
399 * together off init_mm.mmlist, and are protected
404 unsigned long hiwater_rss
; /* High-watermark of RSS usage */
405 unsigned long hiwater_vm
; /* High-water virtual memory usage */
407 unsigned long total_vm
; /* Total pages mapped */
408 unsigned long locked_vm
; /* Pages that have PG_mlocked set */
409 unsigned long pinned_vm
; /* Refcount permanently increased */
410 unsigned long data_vm
; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
411 unsigned long exec_vm
; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
412 unsigned long stack_vm
; /* VM_STACK */
413 unsigned long def_flags
;
414 unsigned long start_code
, end_code
, start_data
, end_data
;
415 unsigned long start_brk
, brk
, start_stack
;
416 unsigned long arg_start
, arg_end
, env_start
, env_end
;
418 unsigned long saved_auxv
[AT_VECTOR_SIZE
]; /* for /proc/PID/auxv */
421 * Special counters, in some configurations protected by the
422 * page_table_lock, in other configurations by being atomic.
424 struct mm_rss_stat rss_stat
;
426 struct linux_binfmt
*binfmt
;
428 cpumask_var_t cpu_vm_mask_var
;
430 /* Architecture-specific MM context */
431 mm_context_t context
;
433 unsigned long flags
; /* Must use atomic bitops to access the bits */
435 struct core_state
*core_state
; /* coredumping support */
436 #ifdef CONFIG_MEMBARRIER
437 atomic_t membarrier_state
;
440 spinlock_t ioctx_lock
;
441 struct kioctx_table __rcu
*ioctx_table
;
445 * "owner" points to a task that is regarded as the canonical
446 * user/owner of this mm. All of the following must be true in
447 * order for it to be changed:
449 * current == mm->owner
451 * new_owner->mm == mm
452 * new_owner->alloc_lock is held
454 struct task_struct __rcu
*owner
;
456 struct user_namespace
*user_ns
;
458 /* store ref to file /proc/<pid>/exe symlink points to */
459 struct file __rcu
*exe_file
;
460 #ifdef CONFIG_MMU_NOTIFIER
461 struct mmu_notifier_mm
*mmu_notifier_mm
;
463 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
464 pgtable_t pmd_huge_pte
; /* protected by page_table_lock */
466 #ifdef CONFIG_CPUMASK_OFFSTACK
467 struct cpumask cpumask_allocation
;
469 #ifdef CONFIG_NUMA_BALANCING
471 * numa_next_scan is the next time that the PTEs will be marked
472 * pte_numa. NUMA hinting faults will gather statistics and migrate
473 * pages to new nodes if necessary.
475 unsigned long numa_next_scan
;
477 /* Restart point for scanning and setting pte_numa */
478 unsigned long numa_scan_offset
;
480 /* numa_scan_seq prevents two threads setting pte_numa */
484 * An operation with batched TLB flushing is going on. Anything that
485 * can move process memory needs to flush the TLB when moving a
486 * PROT_NONE or PROT_NUMA mapped page.
488 atomic_t tlb_flush_pending
;
489 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
490 /* See flush_tlb_batched_pending() */
491 bool tlb_flush_batched
;
493 struct uprobes_state uprobes_state
;
494 #ifdef CONFIG_HUGETLB_PAGE
495 atomic_long_t hugetlb_usage
;
497 struct work_struct async_put_work
;
499 #if IS_ENABLED(CONFIG_HMM)
500 /* HMM needs to track a few things per mm */
503 } __randomize_layout
;
505 extern struct mm_struct init_mm
;
507 static inline void mm_init_cpumask(struct mm_struct
*mm
)
509 #ifdef CONFIG_CPUMASK_OFFSTACK
510 mm
->cpu_vm_mask_var
= &mm
->cpumask_allocation
;
512 cpumask_clear(mm
->cpu_vm_mask_var
);
515 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
516 static inline cpumask_t
*mm_cpumask(struct mm_struct
*mm
)
518 return mm
->cpu_vm_mask_var
;
522 extern void tlb_gather_mmu(struct mmu_gather
*tlb
, struct mm_struct
*mm
,
523 unsigned long start
, unsigned long end
);
524 extern void tlb_finish_mmu(struct mmu_gather
*tlb
,
525 unsigned long start
, unsigned long end
);
527 static inline void init_tlb_flush_pending(struct mm_struct
*mm
)
529 atomic_set(&mm
->tlb_flush_pending
, 0);
532 static inline void inc_tlb_flush_pending(struct mm_struct
*mm
)
534 atomic_inc(&mm
->tlb_flush_pending
);
536 * The only time this value is relevant is when there are indeed pages
537 * to flush. And we'll only flush pages after changing them, which
540 * So the ordering here is:
542 * atomic_inc(&mm->tlb_flush_pending);
549 * mm_tlb_flush_pending();
554 * atomic_dec(&mm->tlb_flush_pending);
556 * Where the increment if constrained by the PTL unlock, it thus
557 * ensures that the increment is visible if the PTE modification is
558 * visible. After all, if there is no PTE modification, nobody cares
559 * about TLB flushes either.
561 * This very much relies on users (mm_tlb_flush_pending() and
562 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
563 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
564 * locks (PPC) the unlock of one doesn't order against the lock of
567 * The decrement is ordered by the flush_tlb_range(), such that
568 * mm_tlb_flush_pending() will not return false unless all flushes have
573 static inline void dec_tlb_flush_pending(struct mm_struct
*mm
)
576 * See inc_tlb_flush_pending().
578 * This cannot be smp_mb__before_atomic() because smp_mb() simply does
579 * not order against TLB invalidate completion, which is what we need.
581 * Therefore we must rely on tlb_flush_*() to guarantee order.
583 atomic_dec(&mm
->tlb_flush_pending
);
586 static inline bool mm_tlb_flush_pending(struct mm_struct
*mm
)
589 * Must be called after having acquired the PTL; orders against that
590 * PTLs release and therefore ensures that if we observe the modified
591 * PTE we must also observe the increment from inc_tlb_flush_pending().
593 * That is, it only guarantees to return true if there is a flush
594 * pending for _this_ PTL.
596 return atomic_read(&mm
->tlb_flush_pending
);
599 static inline bool mm_tlb_flush_nested(struct mm_struct
*mm
)
602 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
603 * for which there is a TLB flush pending in order to guarantee
604 * we've seen both that PTE modification and the increment.
606 * (no requirement on actually still holding the PTL, that is irrelevant)
608 return atomic_read(&mm
->tlb_flush_pending
) > 1;
613 struct vm_special_mapping
{
614 const char *name
; /* The name, e.g. "[vdso]". */
617 * If .fault is not provided, this points to a
618 * NULL-terminated array of pages that back the special mapping.
620 * This must not be NULL unless .fault is provided.
625 * If non-NULL, then this is called to resolve page faults
626 * on the special mapping. If used, .pages is not checked.
628 int (*fault
)(const struct vm_special_mapping
*sm
,
629 struct vm_area_struct
*vma
,
630 struct vm_fault
*vmf
);
632 int (*mremap
)(const struct vm_special_mapping
*sm
,
633 struct vm_area_struct
*new_vma
);
636 enum tlb_flush_reason
{
637 TLB_FLUSH_ON_TASK_SWITCH
,
638 TLB_REMOTE_SHOOTDOWN
,
640 TLB_LOCAL_MM_SHOOTDOWN
,
642 NR_TLB_FLUSH_REASONS
,
646 * A swap entry has to fit into a "unsigned long", as the entry is hidden
647 * in the "index" field of the swapper address space.
653 #endif /* _LINUX_MM_TYPES_H */