sched/wake_q: Clarify queue reinit comment
[cris-mirror.git] / include / linux / mempolicy.h
blob5f4d8281832b04fb6602f1803b17c84c71321e71
1 /*
2 * NUMA memory policies for Linux.
3 * Copyright 2003,2004 Andi Kleen SuSE Labs
4 */
5 #ifndef _LINUX_MEMPOLICY_H
6 #define _LINUX_MEMPOLICY_H 1
9 #include <linux/mmzone.h>
10 #include <linux/dax.h>
11 #include <linux/slab.h>
12 #include <linux/rbtree.h>
13 #include <linux/spinlock.h>
14 #include <linux/nodemask.h>
15 #include <linux/pagemap.h>
16 #include <uapi/linux/mempolicy.h>
18 struct mm_struct;
20 #ifdef CONFIG_NUMA
23 * Describe a memory policy.
25 * A mempolicy can be either associated with a process or with a VMA.
26 * For VMA related allocations the VMA policy is preferred, otherwise
27 * the process policy is used. Interrupts ignore the memory policy
28 * of the current process.
30 * Locking policy for interlave:
31 * In process context there is no locking because only the process accesses
32 * its own state. All vma manipulation is somewhat protected by a down_read on
33 * mmap_sem.
35 * Freeing policy:
36 * Mempolicy objects are reference counted. A mempolicy will be freed when
37 * mpol_put() decrements the reference count to zero.
39 * Duplicating policy objects:
40 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
41 * to the new storage. The reference count of the new object is initialized
42 * to 1, representing the caller of mpol_dup().
44 struct mempolicy {
45 atomic_t refcnt;
46 unsigned short mode; /* See MPOL_* above */
47 unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
48 union {
49 short preferred_node; /* preferred */
50 nodemask_t nodes; /* interleave/bind */
51 /* undefined for default */
52 } v;
53 union {
54 nodemask_t cpuset_mems_allowed; /* relative to these nodes */
55 nodemask_t user_nodemask; /* nodemask passed by user */
56 } w;
60 * Support for managing mempolicy data objects (clone, copy, destroy)
61 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
64 extern void __mpol_put(struct mempolicy *pol);
65 static inline void mpol_put(struct mempolicy *pol)
67 if (pol)
68 __mpol_put(pol);
72 * Does mempolicy pol need explicit unref after use?
73 * Currently only needed for shared policies.
75 static inline int mpol_needs_cond_ref(struct mempolicy *pol)
77 return (pol && (pol->flags & MPOL_F_SHARED));
80 static inline void mpol_cond_put(struct mempolicy *pol)
82 if (mpol_needs_cond_ref(pol))
83 __mpol_put(pol);
86 extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
87 static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
89 if (pol)
90 pol = __mpol_dup(pol);
91 return pol;
94 #define vma_policy(vma) ((vma)->vm_policy)
96 static inline void mpol_get(struct mempolicy *pol)
98 if (pol)
99 atomic_inc(&pol->refcnt);
102 extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
103 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
105 if (a == b)
106 return true;
107 return __mpol_equal(a, b);
111 * Tree of shared policies for a shared memory region.
112 * Maintain the policies in a pseudo mm that contains vmas. The vmas
113 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
114 * bytes, so that we can work with shared memory segments bigger than
115 * unsigned long.
118 struct sp_node {
119 struct rb_node nd;
120 unsigned long start, end;
121 struct mempolicy *policy;
124 struct shared_policy {
125 struct rb_root root;
126 rwlock_t lock;
129 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
130 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
131 int mpol_set_shared_policy(struct shared_policy *info,
132 struct vm_area_struct *vma,
133 struct mempolicy *new);
134 void mpol_free_shared_policy(struct shared_policy *p);
135 struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
136 unsigned long idx);
138 struct mempolicy *get_task_policy(struct task_struct *p);
139 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
140 unsigned long addr);
141 bool vma_policy_mof(struct vm_area_struct *vma);
143 extern void numa_default_policy(void);
144 extern void numa_policy_init(void);
145 extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
146 enum mpol_rebind_step step);
147 extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
149 extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
150 unsigned long addr, gfp_t gfp_flags,
151 struct mempolicy **mpol, nodemask_t **nodemask);
152 extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
153 extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
154 const nodemask_t *mask);
155 extern unsigned int mempolicy_slab_node(void);
157 extern enum zone_type policy_zone;
159 static inline void check_highest_zone(enum zone_type k)
161 if (k > policy_zone && k != ZONE_MOVABLE)
162 policy_zone = k;
165 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
166 const nodemask_t *to, int flags);
169 #ifdef CONFIG_TMPFS
170 extern int mpol_parse_str(char *str, struct mempolicy **mpol);
171 #endif
173 extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
175 /* Check if a vma is migratable */
176 static inline bool vma_migratable(struct vm_area_struct *vma)
178 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
179 return false;
182 * DAX device mappings require predictable access latency, so avoid
183 * incurring periodic faults.
185 if (vma_is_dax(vma))
186 return false;
188 #ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
189 if (vma->vm_flags & VM_HUGETLB)
190 return false;
191 #endif
194 * Migration allocates pages in the highest zone. If we cannot
195 * do so then migration (at least from node to node) is not
196 * possible.
198 if (vma->vm_file &&
199 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
200 < policy_zone)
201 return false;
202 return true;
205 extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
206 extern void mpol_put_task_policy(struct task_struct *);
208 #else
210 struct mempolicy {};
212 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
214 return true;
217 static inline void mpol_put(struct mempolicy *p)
221 static inline void mpol_cond_put(struct mempolicy *pol)
225 static inline void mpol_get(struct mempolicy *pol)
229 struct shared_policy {};
231 static inline void mpol_shared_policy_init(struct shared_policy *sp,
232 struct mempolicy *mpol)
236 static inline void mpol_free_shared_policy(struct shared_policy *p)
240 static inline struct mempolicy *
241 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
243 return NULL;
246 #define vma_policy(vma) NULL
248 static inline int
249 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
251 return 0;
254 static inline void numa_policy_init(void)
258 static inline void numa_default_policy(void)
262 static inline void mpol_rebind_task(struct task_struct *tsk,
263 const nodemask_t *new,
264 enum mpol_rebind_step step)
268 static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
272 static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
273 unsigned long addr, gfp_t gfp_flags,
274 struct mempolicy **mpol, nodemask_t **nodemask)
276 *mpol = NULL;
277 *nodemask = NULL;
278 return node_zonelist(0, gfp_flags);
281 static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
283 return false;
286 static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
287 const nodemask_t *to, int flags)
289 return 0;
292 static inline void check_highest_zone(int k)
296 #ifdef CONFIG_TMPFS
297 static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
299 return 1; /* error */
301 #endif
303 static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
304 unsigned long address)
306 return -1; /* no node preference */
309 static inline void mpol_put_task_policy(struct task_struct *task)
312 #endif /* CONFIG_NUMA */
313 #endif