1 #ifndef _LINUX_MEMPOLICY_H
2 #define _LINUX_MEMPOLICY_H 1
4 #include <linux/errno.h>
7 * NUMA memory policies for Linux.
8 * Copyright 2003,2004 Andi Kleen SuSE Labs
12 * Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are
13 * passed by the user to either set_mempolicy() or mbind() in an 'int' actual.
14 * The MPOL_MODE_FLAGS macro determines the legal set of optional mode flags.
23 MPOL_MAX
, /* always last member of enum */
26 enum mpol_rebind_step
{
27 MPOL_REBIND_ONCE
, /* do rebind work at once(not by two step) */
28 MPOL_REBIND_STEP1
, /* first step(set all the newly nodes) */
29 MPOL_REBIND_STEP2
, /* second step(clean all the disallowed nodes)*/
33 /* Flags for set_mempolicy */
34 #define MPOL_F_STATIC_NODES (1 << 15)
35 #define MPOL_F_RELATIVE_NODES (1 << 14)
38 * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to
39 * either set_mempolicy() or mbind().
41 #define MPOL_MODE_FLAGS (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES)
43 /* Flags for get_mempolicy */
44 #define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
45 #define MPOL_F_ADDR (1<<1) /* look up vma using address */
46 #define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */
49 #define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
50 #define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */
51 #define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
52 #define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
55 * Internal flags that share the struct mempolicy flags word with
56 * "mode flags". These flags are allocated from bit 0 up, as they
57 * are never OR'ed into the mode in mempolicy API arguments.
59 #define MPOL_F_SHARED (1 << 0) /* identify shared policies */
60 #define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */
61 #define MPOL_F_REBINDING (1 << 2) /* identify policies in rebinding */
65 #include <linux/mmzone.h>
66 #include <linux/slab.h>
67 #include <linux/rbtree.h>
68 #include <linux/spinlock.h>
69 #include <linux/nodemask.h>
70 #include <linux/pagemap.h>
77 * Describe a memory policy.
79 * A mempolicy can be either associated with a process or with a VMA.
80 * For VMA related allocations the VMA policy is preferred, otherwise
81 * the process policy is used. Interrupts ignore the memory policy
82 * of the current process.
84 * Locking policy for interlave:
85 * In process context there is no locking because only the process accesses
86 * its own state. All vma manipulation is somewhat protected by a down_read on
90 * Mempolicy objects are reference counted. A mempolicy will be freed when
91 * mpol_put() decrements the reference count to zero.
93 * Duplicating policy objects:
94 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
95 * to the new storage. The reference count of the new object is initialized
96 * to 1, representing the caller of mpol_dup().
100 unsigned short mode
; /* See MPOL_* above */
101 unsigned short flags
; /* See set_mempolicy() MPOL_F_* above */
103 short preferred_node
; /* preferred */
104 nodemask_t nodes
; /* interleave/bind */
105 /* undefined for default */
108 nodemask_t cpuset_mems_allowed
; /* relative to these nodes */
109 nodemask_t user_nodemask
; /* nodemask passed by user */
114 * Support for managing mempolicy data objects (clone, copy, destroy)
115 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
118 extern void __mpol_put(struct mempolicy
*pol
);
119 static inline void mpol_put(struct mempolicy
*pol
)
126 * Does mempolicy pol need explicit unref after use?
127 * Currently only needed for shared policies.
129 static inline int mpol_needs_cond_ref(struct mempolicy
*pol
)
131 return (pol
&& (pol
->flags
& MPOL_F_SHARED
));
134 static inline void mpol_cond_put(struct mempolicy
*pol
)
136 if (mpol_needs_cond_ref(pol
))
140 extern struct mempolicy
*__mpol_cond_copy(struct mempolicy
*tompol
,
141 struct mempolicy
*frompol
);
142 static inline struct mempolicy
*mpol_cond_copy(struct mempolicy
*tompol
,
143 struct mempolicy
*frompol
)
147 return __mpol_cond_copy(tompol
, frompol
);
150 extern struct mempolicy
*__mpol_dup(struct mempolicy
*pol
);
151 static inline struct mempolicy
*mpol_dup(struct mempolicy
*pol
)
154 pol
= __mpol_dup(pol
);
158 #define vma_policy(vma) ((vma)->vm_policy)
159 #define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
161 static inline void mpol_get(struct mempolicy
*pol
)
164 atomic_inc(&pol
->refcnt
);
167 extern bool __mpol_equal(struct mempolicy
*a
, struct mempolicy
*b
);
168 static inline bool mpol_equal(struct mempolicy
*a
, struct mempolicy
*b
)
172 return __mpol_equal(a
, b
);
176 * Tree of shared policies for a shared memory region.
177 * Maintain the policies in a pseudo mm that contains vmas. The vmas
178 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
179 * bytes, so that we can work with shared memory segments bigger than
185 unsigned long start
, end
;
186 struct mempolicy
*policy
;
189 struct shared_policy
{
194 void mpol_shared_policy_init(struct shared_policy
*sp
, struct mempolicy
*mpol
);
195 int mpol_set_shared_policy(struct shared_policy
*info
,
196 struct vm_area_struct
*vma
,
197 struct mempolicy
*new);
198 void mpol_free_shared_policy(struct shared_policy
*p
);
199 struct mempolicy
*mpol_shared_policy_lookup(struct shared_policy
*sp
,
202 struct mempolicy
*get_vma_policy(struct task_struct
*tsk
,
203 struct vm_area_struct
*vma
, unsigned long addr
);
205 extern void numa_default_policy(void);
206 extern void numa_policy_init(void);
207 extern void mpol_rebind_task(struct task_struct
*tsk
, const nodemask_t
*new,
208 enum mpol_rebind_step step
);
209 extern void mpol_rebind_mm(struct mm_struct
*mm
, nodemask_t
*new);
210 extern void mpol_fix_fork_child_flag(struct task_struct
*p
);
212 extern struct zonelist
*huge_zonelist(struct vm_area_struct
*vma
,
213 unsigned long addr
, gfp_t gfp_flags
,
214 struct mempolicy
**mpol
, nodemask_t
**nodemask
);
215 extern bool init_nodemask_of_mempolicy(nodemask_t
*mask
);
216 extern bool mempolicy_nodemask_intersects(struct task_struct
*tsk
,
217 const nodemask_t
*mask
);
218 extern unsigned slab_node(struct mempolicy
*policy
);
220 extern enum zone_type policy_zone
;
222 static inline void check_highest_zone(enum zone_type k
)
224 if (k
> policy_zone
&& k
!= ZONE_MOVABLE
)
228 int do_migrate_pages(struct mm_struct
*mm
,
229 const nodemask_t
*from_nodes
, const nodemask_t
*to_nodes
, int flags
);
233 extern int mpol_parse_str(char *str
, struct mempolicy
**mpol
, int no_context
);
236 extern int mpol_to_str(char *buffer
, int maxlen
, struct mempolicy
*pol
,
239 /* Check if a vma is migratable */
240 static inline int vma_migratable(struct vm_area_struct
*vma
)
242 if (vma
->vm_flags
& (VM_IO
|VM_HUGETLB
|VM_PFNMAP
|VM_RESERVED
))
245 * Migration allocates pages in the highest zone. If we cannot
246 * do so then migration (at least from node to node) is not
250 gfp_zone(mapping_gfp_mask(vma
->vm_file
->f_mapping
))
260 static inline bool mpol_equal(struct mempolicy
*a
, struct mempolicy
*b
)
265 static inline void mpol_put(struct mempolicy
*p
)
269 static inline void mpol_cond_put(struct mempolicy
*pol
)
273 static inline struct mempolicy
*mpol_cond_copy(struct mempolicy
*to
,
274 struct mempolicy
*from
)
279 static inline void mpol_get(struct mempolicy
*pol
)
283 static inline struct mempolicy
*mpol_dup(struct mempolicy
*old
)
288 struct shared_policy
{};
290 static inline int mpol_set_shared_policy(struct shared_policy
*info
,
291 struct vm_area_struct
*vma
,
292 struct mempolicy
*new)
297 static inline void mpol_shared_policy_init(struct shared_policy
*sp
,
298 struct mempolicy
*mpol
)
302 static inline void mpol_free_shared_policy(struct shared_policy
*p
)
306 static inline struct mempolicy
*
307 mpol_shared_policy_lookup(struct shared_policy
*sp
, unsigned long idx
)
312 #define vma_policy(vma) NULL
313 #define vma_set_policy(vma, pol) do {} while(0)
315 static inline void numa_policy_init(void)
319 static inline void numa_default_policy(void)
323 static inline void mpol_rebind_task(struct task_struct
*tsk
,
324 const nodemask_t
*new,
325 enum mpol_rebind_step step
)
329 static inline void mpol_rebind_mm(struct mm_struct
*mm
, nodemask_t
*new)
333 static inline void mpol_fix_fork_child_flag(struct task_struct
*p
)
337 static inline struct zonelist
*huge_zonelist(struct vm_area_struct
*vma
,
338 unsigned long addr
, gfp_t gfp_flags
,
339 struct mempolicy
**mpol
, nodemask_t
**nodemask
)
343 return node_zonelist(0, gfp_flags
);
346 static inline bool init_nodemask_of_mempolicy(nodemask_t
*m
)
351 static inline bool mempolicy_nodemask_intersects(struct task_struct
*tsk
,
352 const nodemask_t
*mask
)
357 static inline int do_migrate_pages(struct mm_struct
*mm
,
358 const nodemask_t
*from_nodes
,
359 const nodemask_t
*to_nodes
, int flags
)
364 static inline void check_highest_zone(int k
)
369 static inline int mpol_parse_str(char *str
, struct mempolicy
**mpol
,
372 return 1; /* error */
376 static inline int mpol_to_str(char *buffer
, int maxlen
, struct mempolicy
*pol
,
382 #endif /* CONFIG_NUMA */
383 #endif /* __KERNEL__ */