1 #ifndef _LINUX_MM_TYPES_H
2 #define _LINUX_MM_TYPES_H
4 #include <linux/auxvec.h>
5 #include <linux/types.h>
6 #include <linux/threads.h>
7 #include <linux/list.h>
8 #include <linux/spinlock.h>
9 #include <linux/prio_tree.h>
10 #include <linux/rbtree.h>
11 #include <linux/rwsem.h>
12 #include <linux/completion.h>
13 #include <linux/cpumask.h>
14 #include <linux/page-debug-flags.h>
18 #ifndef AT_VECTOR_SIZE_ARCH
19 #define AT_VECTOR_SIZE_ARCH 0
21 #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
25 #define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
28 * Each physical page in the system has a struct page associated with
29 * it to keep track of whatever it is we are using the page for at the
30 * moment. Note that we have no way to track which tasks are using
31 * a page, though if it is a pagecache page, rmap structures can tell us
35 unsigned long flags
; /* Atomic flags, some possibly
36 * updated asynchronously */
37 atomic_t _count
; /* Usage count, see below. */
40 * Count of ptes mapped in
41 * mms, to show when page is
42 * mapped & limit reverse map
45 * Used also for tail pages
46 * refcounting instead of
47 * _count. Tail pages cannot
48 * be mapped and keeping the
49 * tail page _count zero at
50 * all times guarantees
51 * get_page_unless_zero() will
52 * never succeed on tail
64 unsigned long private; /* Mapping-private opaque data:
65 * usually used for buffer_heads
66 * if PagePrivate set; used for
67 * swp_entry_t if PageSwapCache;
68 * indicates order in the buddy
69 * system if PG_buddy is set.
71 struct address_space
*mapping
; /* If low bit clear, points to
72 * inode address_space, or NULL.
73 * If page mapped as anonymous
74 * memory, low bit is set, and
75 * it points to anon_vma object:
76 * see PAGE_MAPPING_ANON below.
82 struct kmem_cache
*slab
; /* SLUB: Pointer to slab */
83 struct page
*first_page
; /* Compound tail pages */
86 pgoff_t index
; /* Our offset within mapping. */
87 void *freelist
; /* SLUB: freelist req. slab lock */
89 struct list_head lru
; /* Pageout list, eg. active_list
90 * protected by zone->lru_lock !
93 * On machines where all RAM is mapped into kernel address space,
94 * we can simply calculate the virtual address. On machines with
95 * highmem some memory is mapped into kernel virtual memory
96 * dynamically, so we need a place to store that address.
97 * Note that this field could be 16 bits on x86 ... ;)
99 * Architectures with slow multiplication can define
100 * WANT_PAGE_VIRTUAL in asm/page.h
102 #if defined(WANT_PAGE_VIRTUAL)
103 void *virtual; /* Kernel virtual address (NULL if
104 not kmapped, ie. highmem) */
105 #endif /* WANT_PAGE_VIRTUAL */
106 #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
107 unsigned long debug_flags
; /* Use atomic bitops on this */
110 #ifdef CONFIG_KMEMCHECK
112 * kmemcheck wants to track the status of each byte in a page; this
113 * is a pointer to such a status block. NULL if not tracked.
119 typedef unsigned long __nocast vm_flags_t
;
122 * A region containing a mapping of a non-memory backed file under NOMMU
123 * conditions. These are held in a global tree and are pinned by the VMAs that
127 struct rb_node vm_rb
; /* link in global region tree */
128 vm_flags_t vm_flags
; /* VMA vm_flags */
129 unsigned long vm_start
; /* start address of region */
130 unsigned long vm_end
; /* region initialised to here */
131 unsigned long vm_top
; /* region allocated to here */
132 unsigned long vm_pgoff
; /* the offset in vm_file corresponding to vm_start */
133 struct file
*vm_file
; /* the backing file or NULL */
135 int vm_usage
; /* region usage count (access under nommu_region_sem) */
136 bool vm_icache_flushed
: 1; /* true if the icache has been flushed for
141 * This struct defines a memory VMM memory area. There is one of these
142 * per VM-area/task. A VM area is any part of the process virtual memory
143 * space that has a special rule for the page-fault handlers (ie a shared
144 * library, the executable area etc).
146 struct vm_area_struct
{
147 struct mm_struct
* vm_mm
; /* The address space we belong to. */
148 unsigned long vm_start
; /* Our start address within vm_mm. */
149 unsigned long vm_end
; /* The first byte after our end address
152 /* linked list of VM areas per task, sorted by address */
153 struct vm_area_struct
*vm_next
, *vm_prev
;
155 pgprot_t vm_page_prot
; /* Access permissions of this VMA. */
156 unsigned long vm_flags
; /* Flags, see mm.h. */
158 struct rb_node vm_rb
;
161 * For areas with an address space and backing store,
162 * linkage into the address_space->i_mmap prio tree, or
163 * linkage to the list of like vmas hanging off its node, or
164 * linkage of vma in the address_space->i_mmap_nonlinear list.
168 struct list_head list
;
169 void *parent
; /* aligns with prio_tree_node parent */
170 struct vm_area_struct
*head
;
173 struct raw_prio_tree_node prio_tree_node
;
177 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
178 * list, after a COW of one of the file pages. A MAP_SHARED vma
179 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
180 * or brk vma (with NULL file) can only be in an anon_vma list.
182 struct list_head anon_vma_chain
; /* Serialized by mmap_sem &
184 struct anon_vma
*anon_vma
; /* Serialized by page_table_lock */
186 /* Function pointers to deal with this struct. */
187 const struct vm_operations_struct
*vm_ops
;
189 /* Information about our backing store: */
190 unsigned long vm_pgoff
; /* Offset (within vm_file) in PAGE_SIZE
191 units, *not* PAGE_CACHE_SIZE */
192 struct file
* vm_file
; /* File we map to (can be NULL). */
193 void * vm_private_data
; /* was vm_pte (shared mem) */
196 struct vm_region
*vm_region
; /* NOMMU mapping region */
199 struct mempolicy
*vm_policy
; /* NUMA policy for the VMA */
204 struct task_struct
*task
;
205 struct core_thread
*next
;
210 struct core_thread dumper
;
211 struct completion startup
;
221 #if USE_SPLIT_PTLOCKS && defined(CONFIG_MMU)
222 #define SPLIT_RSS_COUNTING
223 /* per-thread cached information, */
224 struct task_rss_stat
{
225 int events
; /* for synchronization threshold */
226 int count
[NR_MM_COUNTERS
];
228 #endif /* USE_SPLIT_PTLOCKS */
231 atomic_long_t count
[NR_MM_COUNTERS
];
235 struct vm_area_struct
* mmap
; /* list of VMAs */
236 struct rb_root mm_rb
;
237 struct vm_area_struct
* mmap_cache
; /* last find_vma result */
239 unsigned long (*get_unmapped_area
) (struct file
*filp
,
240 unsigned long addr
, unsigned long len
,
241 unsigned long pgoff
, unsigned long flags
);
242 void (*unmap_area
) (struct mm_struct
*mm
, unsigned long addr
);
244 unsigned long mmap_base
; /* base of mmap area */
245 unsigned long task_size
; /* size of task vm space */
246 unsigned long cached_hole_size
; /* if non-zero, the largest hole below free_area_cache */
247 unsigned long free_area_cache
; /* first hole of size cached_hole_size or larger */
249 atomic_t mm_users
; /* How many users with user space? */
250 atomic_t mm_count
; /* How many references to "struct mm_struct" (users count as 1) */
251 int map_count
; /* number of VMAs */
253 spinlock_t page_table_lock
; /* Protects page tables and some counters */
254 struct rw_semaphore mmap_sem
;
256 struct list_head mmlist
; /* List of maybe swapped mm's. These are globally strung
257 * together off init_mm.mmlist, and are protected
262 unsigned long hiwater_rss
; /* High-watermark of RSS usage */
263 unsigned long hiwater_vm
; /* High-water virtual memory usage */
265 unsigned long total_vm
, locked_vm
, shared_vm
, exec_vm
;
266 unsigned long stack_vm
, reserved_vm
, def_flags
, nr_ptes
;
267 unsigned long start_code
, end_code
, start_data
, end_data
;
268 unsigned long start_brk
, brk
, start_stack
;
269 unsigned long arg_start
, arg_end
, env_start
, env_end
;
271 unsigned long saved_auxv
[AT_VECTOR_SIZE
]; /* for /proc/PID/auxv */
274 * Special counters, in some configurations protected by the
275 * page_table_lock, in other configurations by being atomic.
277 struct mm_rss_stat rss_stat
;
279 struct linux_binfmt
*binfmt
;
281 cpumask_var_t cpu_vm_mask_var
;
283 /* Architecture-specific MM context */
284 mm_context_t context
;
286 /* Swap token stuff */
288 * Last value of global fault stamp as seen by this process.
289 * In other words, this value gives an indication of how long
290 * it has been since this task got the token.
291 * Look at mm/thrash.c
293 unsigned int faultstamp
;
294 unsigned int token_priority
;
295 unsigned int last_interval
;
297 /* How many tasks sharing this mm are OOM_DISABLE */
298 atomic_t oom_disable_count
;
300 unsigned long flags
; /* Must use atomic bitops to access the bits */
302 struct core_state
*core_state
; /* coredumping support */
304 spinlock_t ioctx_lock
;
305 struct hlist_head ioctx_list
;
307 #ifdef CONFIG_MM_OWNER
309 * "owner" points to a task that is regarded as the canonical
310 * user/owner of this mm. All of the following must be true in
311 * order for it to be changed:
313 * current == mm->owner
315 * new_owner->mm == mm
316 * new_owner->alloc_lock is held
318 struct task_struct __rcu
*owner
;
321 /* store ref to file /proc/<pid>/exe symlink points to */
322 struct file
*exe_file
;
323 unsigned long num_exe_file_vmas
;
324 #ifdef CONFIG_MMU_NOTIFIER
325 struct mmu_notifier_mm
*mmu_notifier_mm
;
327 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
328 pgtable_t pmd_huge_pte
; /* protected by page_table_lock */
330 #ifdef CONFIG_CPUMASK_OFFSTACK
331 struct cpumask cpumask_allocation
;
335 static inline void mm_init_cpumask(struct mm_struct
*mm
)
337 #ifdef CONFIG_CPUMASK_OFFSTACK
338 mm
->cpu_vm_mask_var
= &mm
->cpumask_allocation
;
342 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
343 static inline cpumask_t
*mm_cpumask(struct mm_struct
*mm
)
345 return mm
->cpu_vm_mask_var
;
348 #endif /* _LINUX_MM_TYPES_H */