drm/i915: no lvds quirk for Zotac ZDBOX SD ID12/ID13
[linux/fpc-iii.git] / mm / mempolicy.c
blobd1e4beffbe1f4036ab45c51996231414597cc84c
1 /*
2 * Simple NUMA memory policy for the Linux kernel.
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6 * Subject to the GNU Public License, version 2.
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
11 * Support four policies per VMA and per process:
13 * The VMA policy has priority over the process policy for a page fault.
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
28 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
56 /* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
68 #include <linux/mempolicy.h>
69 #include <linux/mm.h>
70 #include <linux/highmem.h>
71 #include <linux/hugetlb.h>
72 #include <linux/kernel.h>
73 #include <linux/sched.h>
74 #include <linux/nodemask.h>
75 #include <linux/cpuset.h>
76 #include <linux/slab.h>
77 #include <linux/string.h>
78 #include <linux/export.h>
79 #include <linux/nsproxy.h>
80 #include <linux/interrupt.h>
81 #include <linux/init.h>
82 #include <linux/compat.h>
83 #include <linux/swap.h>
84 #include <linux/seq_file.h>
85 #include <linux/proc_fs.h>
86 #include <linux/migrate.h>
87 #include <linux/ksm.h>
88 #include <linux/rmap.h>
89 #include <linux/security.h>
90 #include <linux/syscalls.h>
91 #include <linux/ctype.h>
92 #include <linux/mm_inline.h>
94 #include <asm/tlbflush.h>
95 #include <asm/uaccess.h>
96 #include <linux/random.h>
98 #include "internal.h"
100 /* Internal flags */
101 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
102 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
104 static struct kmem_cache *policy_cache;
105 static struct kmem_cache *sn_cache;
107 /* Highest zone. An specific allocation for a zone below that is not
108 policied. */
109 enum zone_type policy_zone = 0;
112 * run-time system-wide default policy => local allocation
114 static struct mempolicy default_policy = {
115 .refcnt = ATOMIC_INIT(1), /* never free it */
116 .mode = MPOL_PREFERRED,
117 .flags = MPOL_F_LOCAL,
120 static const struct mempolicy_operations {
121 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
123 * If read-side task has no lock to protect task->mempolicy, write-side
124 * task will rebind the task->mempolicy by two step. The first step is
125 * setting all the newly nodes, and the second step is cleaning all the
126 * disallowed nodes. In this way, we can avoid finding no node to alloc
127 * page.
128 * If we have a lock to protect task->mempolicy in read-side, we do
129 * rebind directly.
131 * step:
132 * MPOL_REBIND_ONCE - do rebind work at once
133 * MPOL_REBIND_STEP1 - set all the newly nodes
134 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
136 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
137 enum mpol_rebind_step step);
138 } mpol_ops[MPOL_MAX];
140 /* Check that the nodemask contains at least one populated zone */
141 static int is_valid_nodemask(const nodemask_t *nodemask)
143 int nd, k;
145 for_each_node_mask(nd, *nodemask) {
146 struct zone *z;
148 for (k = 0; k <= policy_zone; k++) {
149 z = &NODE_DATA(nd)->node_zones[k];
150 if (z->present_pages > 0)
151 return 1;
155 return 0;
158 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
160 return pol->flags & MPOL_MODE_FLAGS;
163 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
164 const nodemask_t *rel)
166 nodemask_t tmp;
167 nodes_fold(tmp, *orig, nodes_weight(*rel));
168 nodes_onto(*ret, tmp, *rel);
171 static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
173 if (nodes_empty(*nodes))
174 return -EINVAL;
175 pol->v.nodes = *nodes;
176 return 0;
179 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
181 if (!nodes)
182 pol->flags |= MPOL_F_LOCAL; /* local allocation */
183 else if (nodes_empty(*nodes))
184 return -EINVAL; /* no allowed nodes */
185 else
186 pol->v.preferred_node = first_node(*nodes);
187 return 0;
190 static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
192 if (!is_valid_nodemask(nodes))
193 return -EINVAL;
194 pol->v.nodes = *nodes;
195 return 0;
199 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
200 * any, for the new policy. mpol_new() has already validated the nodes
201 * parameter with respect to the policy mode and flags. But, we need to
202 * handle an empty nodemask with MPOL_PREFERRED here.
204 * Must be called holding task's alloc_lock to protect task's mems_allowed
205 * and mempolicy. May also be called holding the mmap_semaphore for write.
207 static int mpol_set_nodemask(struct mempolicy *pol,
208 const nodemask_t *nodes, struct nodemask_scratch *nsc)
210 int ret;
212 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
213 if (pol == NULL)
214 return 0;
215 /* Check N_HIGH_MEMORY */
216 nodes_and(nsc->mask1,
217 cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]);
219 VM_BUG_ON(!nodes);
220 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
221 nodes = NULL; /* explicit local allocation */
222 else {
223 if (pol->flags & MPOL_F_RELATIVE_NODES)
224 mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
225 else
226 nodes_and(nsc->mask2, *nodes, nsc->mask1);
228 if (mpol_store_user_nodemask(pol))
229 pol->w.user_nodemask = *nodes;
230 else
231 pol->w.cpuset_mems_allowed =
232 cpuset_current_mems_allowed;
235 if (nodes)
236 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
237 else
238 ret = mpol_ops[pol->mode].create(pol, NULL);
239 return ret;
243 * This function just creates a new policy, does some check and simple
244 * initialization. You must invoke mpol_set_nodemask() to set nodes.
246 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
247 nodemask_t *nodes)
249 struct mempolicy *policy;
251 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
252 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
254 if (mode == MPOL_DEFAULT) {
255 if (nodes && !nodes_empty(*nodes))
256 return ERR_PTR(-EINVAL);
257 return NULL; /* simply delete any existing policy */
259 VM_BUG_ON(!nodes);
262 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
263 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
264 * All other modes require a valid pointer to a non-empty nodemask.
266 if (mode == MPOL_PREFERRED) {
267 if (nodes_empty(*nodes)) {
268 if (((flags & MPOL_F_STATIC_NODES) ||
269 (flags & MPOL_F_RELATIVE_NODES)))
270 return ERR_PTR(-EINVAL);
272 } else if (nodes_empty(*nodes))
273 return ERR_PTR(-EINVAL);
274 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
275 if (!policy)
276 return ERR_PTR(-ENOMEM);
277 atomic_set(&policy->refcnt, 1);
278 policy->mode = mode;
279 policy->flags = flags;
281 return policy;
284 /* Slow path of a mpol destructor. */
285 void __mpol_put(struct mempolicy *p)
287 if (!atomic_dec_and_test(&p->refcnt))
288 return;
289 kmem_cache_free(policy_cache, p);
292 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
293 enum mpol_rebind_step step)
298 * step:
299 * MPOL_REBIND_ONCE - do rebind work at once
300 * MPOL_REBIND_STEP1 - set all the newly nodes
301 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
303 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
304 enum mpol_rebind_step step)
306 nodemask_t tmp;
308 if (pol->flags & MPOL_F_STATIC_NODES)
309 nodes_and(tmp, pol->w.user_nodemask, *nodes);
310 else if (pol->flags & MPOL_F_RELATIVE_NODES)
311 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
312 else {
314 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
315 * result
317 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
318 nodes_remap(tmp, pol->v.nodes,
319 pol->w.cpuset_mems_allowed, *nodes);
320 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
321 } else if (step == MPOL_REBIND_STEP2) {
322 tmp = pol->w.cpuset_mems_allowed;
323 pol->w.cpuset_mems_allowed = *nodes;
324 } else
325 BUG();
328 if (nodes_empty(tmp))
329 tmp = *nodes;
331 if (step == MPOL_REBIND_STEP1)
332 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
333 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
334 pol->v.nodes = tmp;
335 else
336 BUG();
338 if (!node_isset(current->il_next, tmp)) {
339 current->il_next = next_node(current->il_next, tmp);
340 if (current->il_next >= MAX_NUMNODES)
341 current->il_next = first_node(tmp);
342 if (current->il_next >= MAX_NUMNODES)
343 current->il_next = numa_node_id();
347 static void mpol_rebind_preferred(struct mempolicy *pol,
348 const nodemask_t *nodes,
349 enum mpol_rebind_step step)
351 nodemask_t tmp;
353 if (pol->flags & MPOL_F_STATIC_NODES) {
354 int node = first_node(pol->w.user_nodemask);
356 if (node_isset(node, *nodes)) {
357 pol->v.preferred_node = node;
358 pol->flags &= ~MPOL_F_LOCAL;
359 } else
360 pol->flags |= MPOL_F_LOCAL;
361 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
362 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
363 pol->v.preferred_node = first_node(tmp);
364 } else if (!(pol->flags & MPOL_F_LOCAL)) {
365 pol->v.preferred_node = node_remap(pol->v.preferred_node,
366 pol->w.cpuset_mems_allowed,
367 *nodes);
368 pol->w.cpuset_mems_allowed = *nodes;
373 * mpol_rebind_policy - Migrate a policy to a different set of nodes
375 * If read-side task has no lock to protect task->mempolicy, write-side
376 * task will rebind the task->mempolicy by two step. The first step is
377 * setting all the newly nodes, and the second step is cleaning all the
378 * disallowed nodes. In this way, we can avoid finding no node to alloc
379 * page.
380 * If we have a lock to protect task->mempolicy in read-side, we do
381 * rebind directly.
383 * step:
384 * MPOL_REBIND_ONCE - do rebind work at once
385 * MPOL_REBIND_STEP1 - set all the newly nodes
386 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
388 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
389 enum mpol_rebind_step step)
391 if (!pol)
392 return;
393 if (!mpol_store_user_nodemask(pol) && step == 0 &&
394 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
395 return;
397 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
398 return;
400 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
401 BUG();
403 if (step == MPOL_REBIND_STEP1)
404 pol->flags |= MPOL_F_REBINDING;
405 else if (step == MPOL_REBIND_STEP2)
406 pol->flags &= ~MPOL_F_REBINDING;
407 else if (step >= MPOL_REBIND_NSTEP)
408 BUG();
410 mpol_ops[pol->mode].rebind(pol, newmask, step);
414 * Wrapper for mpol_rebind_policy() that just requires task
415 * pointer, and updates task mempolicy.
417 * Called with task's alloc_lock held.
420 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
421 enum mpol_rebind_step step)
423 mpol_rebind_policy(tsk->mempolicy, new, step);
427 * Rebind each vma in mm to new nodemask.
429 * Call holding a reference to mm. Takes mm->mmap_sem during call.
432 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
434 struct vm_area_struct *vma;
436 down_write(&mm->mmap_sem);
437 for (vma = mm->mmap; vma; vma = vma->vm_next)
438 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
439 up_write(&mm->mmap_sem);
442 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
443 [MPOL_DEFAULT] = {
444 .rebind = mpol_rebind_default,
446 [MPOL_INTERLEAVE] = {
447 .create = mpol_new_interleave,
448 .rebind = mpol_rebind_nodemask,
450 [MPOL_PREFERRED] = {
451 .create = mpol_new_preferred,
452 .rebind = mpol_rebind_preferred,
454 [MPOL_BIND] = {
455 .create = mpol_new_bind,
456 .rebind = mpol_rebind_nodemask,
460 static void migrate_page_add(struct page *page, struct list_head *pagelist,
461 unsigned long flags);
463 /* Scan through pages checking if pages follow certain conditions. */
464 static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
465 unsigned long addr, unsigned long end,
466 const nodemask_t *nodes, unsigned long flags,
467 void *private)
469 pte_t *orig_pte;
470 pte_t *pte;
471 spinlock_t *ptl;
473 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
474 do {
475 struct page *page;
476 int nid;
478 if (!pte_present(*pte))
479 continue;
480 page = vm_normal_page(vma, addr, *pte);
481 if (!page)
482 continue;
484 * vm_normal_page() filters out zero pages, but there might
485 * still be PageReserved pages to skip, perhaps in a VDSO.
486 * And we cannot move PageKsm pages sensibly or safely yet.
488 if (PageReserved(page) || PageKsm(page))
489 continue;
490 nid = page_to_nid(page);
491 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
492 continue;
494 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
495 migrate_page_add(page, private, flags);
496 else
497 break;
498 } while (pte++, addr += PAGE_SIZE, addr != end);
499 pte_unmap_unlock(orig_pte, ptl);
500 return addr != end;
503 static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
504 unsigned long addr, unsigned long end,
505 const nodemask_t *nodes, unsigned long flags,
506 void *private)
508 pmd_t *pmd;
509 unsigned long next;
511 pmd = pmd_offset(pud, addr);
512 do {
513 next = pmd_addr_end(addr, end);
514 split_huge_page_pmd(vma->vm_mm, pmd);
515 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
516 continue;
517 if (check_pte_range(vma, pmd, addr, next, nodes,
518 flags, private))
519 return -EIO;
520 } while (pmd++, addr = next, addr != end);
521 return 0;
524 static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
525 unsigned long addr, unsigned long end,
526 const nodemask_t *nodes, unsigned long flags,
527 void *private)
529 pud_t *pud;
530 unsigned long next;
532 pud = pud_offset(pgd, addr);
533 do {
534 next = pud_addr_end(addr, end);
535 if (pud_none_or_clear_bad(pud))
536 continue;
537 if (check_pmd_range(vma, pud, addr, next, nodes,
538 flags, private))
539 return -EIO;
540 } while (pud++, addr = next, addr != end);
541 return 0;
544 static inline int check_pgd_range(struct vm_area_struct *vma,
545 unsigned long addr, unsigned long end,
546 const nodemask_t *nodes, unsigned long flags,
547 void *private)
549 pgd_t *pgd;
550 unsigned long next;
552 pgd = pgd_offset(vma->vm_mm, addr);
553 do {
554 next = pgd_addr_end(addr, end);
555 if (pgd_none_or_clear_bad(pgd))
556 continue;
557 if (check_pud_range(vma, pgd, addr, next, nodes,
558 flags, private))
559 return -EIO;
560 } while (pgd++, addr = next, addr != end);
561 return 0;
565 * Check if all pages in a range are on a set of nodes.
566 * If pagelist != NULL then isolate pages from the LRU and
567 * put them on the pagelist.
569 static struct vm_area_struct *
570 check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
571 const nodemask_t *nodes, unsigned long flags, void *private)
573 int err;
574 struct vm_area_struct *first, *vma, *prev;
577 first = find_vma(mm, start);
578 if (!first)
579 return ERR_PTR(-EFAULT);
580 prev = NULL;
581 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
582 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
583 if (!vma->vm_next && vma->vm_end < end)
584 return ERR_PTR(-EFAULT);
585 if (prev && prev->vm_end < vma->vm_start)
586 return ERR_PTR(-EFAULT);
588 if (!is_vm_hugetlb_page(vma) &&
589 ((flags & MPOL_MF_STRICT) ||
590 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
591 vma_migratable(vma)))) {
592 unsigned long endvma = vma->vm_end;
594 if (endvma > end)
595 endvma = end;
596 if (vma->vm_start > start)
597 start = vma->vm_start;
598 err = check_pgd_range(vma, start, endvma, nodes,
599 flags, private);
600 if (err) {
601 first = ERR_PTR(err);
602 break;
605 prev = vma;
607 return first;
611 * Apply policy to a single VMA
612 * This must be called with the mmap_sem held for writing.
614 static int vma_replace_policy(struct vm_area_struct *vma,
615 struct mempolicy *pol)
617 int err;
618 struct mempolicy *old;
619 struct mempolicy *new;
621 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
622 vma->vm_start, vma->vm_end, vma->vm_pgoff,
623 vma->vm_ops, vma->vm_file,
624 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
626 new = mpol_dup(pol);
627 if (IS_ERR(new))
628 return PTR_ERR(new);
630 if (vma->vm_ops && vma->vm_ops->set_policy) {
631 err = vma->vm_ops->set_policy(vma, new);
632 if (err)
633 goto err_out;
636 old = vma->vm_policy;
637 vma->vm_policy = new; /* protected by mmap_sem */
638 mpol_put(old);
640 return 0;
641 err_out:
642 mpol_put(new);
643 return err;
646 /* Step 2: apply policy to a range and do splits. */
647 static int mbind_range(struct mm_struct *mm, unsigned long start,
648 unsigned long end, struct mempolicy *new_pol)
650 struct vm_area_struct *next;
651 struct vm_area_struct *prev;
652 struct vm_area_struct *vma;
653 int err = 0;
654 pgoff_t pgoff;
655 unsigned long vmstart;
656 unsigned long vmend;
658 vma = find_vma(mm, start);
659 if (!vma || vma->vm_start > start)
660 return -EFAULT;
662 prev = vma->vm_prev;
663 if (start > vma->vm_start)
664 prev = vma;
666 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
667 next = vma->vm_next;
668 vmstart = max(start, vma->vm_start);
669 vmend = min(end, vma->vm_end);
671 if (mpol_equal(vma_policy(vma), new_pol))
672 continue;
674 pgoff = vma->vm_pgoff +
675 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
676 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
677 vma->anon_vma, vma->vm_file, pgoff,
678 new_pol);
679 if (prev) {
680 vma = prev;
681 next = vma->vm_next;
682 continue;
684 if (vma->vm_start != vmstart) {
685 err = split_vma(vma->vm_mm, vma, vmstart, 1);
686 if (err)
687 goto out;
689 if (vma->vm_end != vmend) {
690 err = split_vma(vma->vm_mm, vma, vmend, 0);
691 if (err)
692 goto out;
694 err = vma_replace_policy(vma, new_pol);
695 if (err)
696 goto out;
699 out:
700 return err;
704 * Update task->flags PF_MEMPOLICY bit: set iff non-default
705 * mempolicy. Allows more rapid checking of this (combined perhaps
706 * with other PF_* flag bits) on memory allocation hot code paths.
708 * If called from outside this file, the task 'p' should -only- be
709 * a newly forked child not yet visible on the task list, because
710 * manipulating the task flags of a visible task is not safe.
712 * The above limitation is why this routine has the funny name
713 * mpol_fix_fork_child_flag().
715 * It is also safe to call this with a task pointer of current,
716 * which the static wrapper mpol_set_task_struct_flag() does,
717 * for use within this file.
720 void mpol_fix_fork_child_flag(struct task_struct *p)
722 if (p->mempolicy)
723 p->flags |= PF_MEMPOLICY;
724 else
725 p->flags &= ~PF_MEMPOLICY;
728 static void mpol_set_task_struct_flag(void)
730 mpol_fix_fork_child_flag(current);
733 /* Set the process memory policy */
734 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
735 nodemask_t *nodes)
737 struct mempolicy *new, *old;
738 struct mm_struct *mm = current->mm;
739 NODEMASK_SCRATCH(scratch);
740 int ret;
742 if (!scratch)
743 return -ENOMEM;
745 new = mpol_new(mode, flags, nodes);
746 if (IS_ERR(new)) {
747 ret = PTR_ERR(new);
748 goto out;
751 * prevent changing our mempolicy while show_numa_maps()
752 * is using it.
753 * Note: do_set_mempolicy() can be called at init time
754 * with no 'mm'.
756 if (mm)
757 down_write(&mm->mmap_sem);
758 task_lock(current);
759 ret = mpol_set_nodemask(new, nodes, scratch);
760 if (ret) {
761 task_unlock(current);
762 if (mm)
763 up_write(&mm->mmap_sem);
764 mpol_put(new);
765 goto out;
767 old = current->mempolicy;
768 current->mempolicy = new;
769 mpol_set_task_struct_flag();
770 if (new && new->mode == MPOL_INTERLEAVE &&
771 nodes_weight(new->v.nodes))
772 current->il_next = first_node(new->v.nodes);
773 task_unlock(current);
774 if (mm)
775 up_write(&mm->mmap_sem);
777 mpol_put(old);
778 ret = 0;
779 out:
780 NODEMASK_SCRATCH_FREE(scratch);
781 return ret;
785 * Return nodemask for policy for get_mempolicy() query
787 * Called with task's alloc_lock held
789 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
791 nodes_clear(*nodes);
792 if (p == &default_policy)
793 return;
795 switch (p->mode) {
796 case MPOL_BIND:
797 /* Fall through */
798 case MPOL_INTERLEAVE:
799 *nodes = p->v.nodes;
800 break;
801 case MPOL_PREFERRED:
802 if (!(p->flags & MPOL_F_LOCAL))
803 node_set(p->v.preferred_node, *nodes);
804 /* else return empty node mask for local allocation */
805 break;
806 default:
807 BUG();
811 static int lookup_node(struct mm_struct *mm, unsigned long addr)
813 struct page *p;
814 int err;
816 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
817 if (err >= 0) {
818 err = page_to_nid(p);
819 put_page(p);
821 return err;
824 /* Retrieve NUMA policy */
825 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
826 unsigned long addr, unsigned long flags)
828 int err;
829 struct mm_struct *mm = current->mm;
830 struct vm_area_struct *vma = NULL;
831 struct mempolicy *pol = current->mempolicy;
833 if (flags &
834 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
835 return -EINVAL;
837 if (flags & MPOL_F_MEMS_ALLOWED) {
838 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
839 return -EINVAL;
840 *policy = 0; /* just so it's initialized */
841 task_lock(current);
842 *nmask = cpuset_current_mems_allowed;
843 task_unlock(current);
844 return 0;
847 if (flags & MPOL_F_ADDR) {
849 * Do NOT fall back to task policy if the
850 * vma/shared policy at addr is NULL. We
851 * want to return MPOL_DEFAULT in this case.
853 down_read(&mm->mmap_sem);
854 vma = find_vma_intersection(mm, addr, addr+1);
855 if (!vma) {
856 up_read(&mm->mmap_sem);
857 return -EFAULT;
859 if (vma->vm_ops && vma->vm_ops->get_policy)
860 pol = vma->vm_ops->get_policy(vma, addr);
861 else
862 pol = vma->vm_policy;
863 } else if (addr)
864 return -EINVAL;
866 if (!pol)
867 pol = &default_policy; /* indicates default behavior */
869 if (flags & MPOL_F_NODE) {
870 if (flags & MPOL_F_ADDR) {
871 err = lookup_node(mm, addr);
872 if (err < 0)
873 goto out;
874 *policy = err;
875 } else if (pol == current->mempolicy &&
876 pol->mode == MPOL_INTERLEAVE) {
877 *policy = current->il_next;
878 } else {
879 err = -EINVAL;
880 goto out;
882 } else {
883 *policy = pol == &default_policy ? MPOL_DEFAULT :
884 pol->mode;
886 * Internal mempolicy flags must be masked off before exposing
887 * the policy to userspace.
889 *policy |= (pol->flags & MPOL_MODE_FLAGS);
892 if (vma) {
893 up_read(&current->mm->mmap_sem);
894 vma = NULL;
897 err = 0;
898 if (nmask) {
899 if (mpol_store_user_nodemask(pol)) {
900 *nmask = pol->w.user_nodemask;
901 } else {
902 task_lock(current);
903 get_policy_nodemask(pol, nmask);
904 task_unlock(current);
908 out:
909 mpol_cond_put(pol);
910 if (vma)
911 up_read(&current->mm->mmap_sem);
912 return err;
915 #ifdef CONFIG_MIGRATION
917 * page migration
919 static void migrate_page_add(struct page *page, struct list_head *pagelist,
920 unsigned long flags)
923 * Avoid migrating a page that is shared with others.
925 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
926 if (!isolate_lru_page(page)) {
927 list_add_tail(&page->lru, pagelist);
928 inc_zone_page_state(page, NR_ISOLATED_ANON +
929 page_is_file_cache(page));
934 static struct page *new_node_page(struct page *page, unsigned long node, int **x)
936 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
940 * Migrate pages from one node to a target node.
941 * Returns error or the number of pages not migrated.
943 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
944 int flags)
946 nodemask_t nmask;
947 LIST_HEAD(pagelist);
948 int err = 0;
949 struct vm_area_struct *vma;
951 nodes_clear(nmask);
952 node_set(source, nmask);
954 vma = check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
955 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
956 if (IS_ERR(vma))
957 return PTR_ERR(vma);
959 if (!list_empty(&pagelist)) {
960 err = migrate_pages(&pagelist, new_node_page, dest,
961 false, MIGRATE_SYNC);
962 if (err)
963 putback_lru_pages(&pagelist);
966 return err;
970 * Move pages between the two nodesets so as to preserve the physical
971 * layout as much as possible.
973 * Returns the number of page that could not be moved.
975 int do_migrate_pages(struct mm_struct *mm,
976 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
978 int busy = 0;
979 int err;
980 nodemask_t tmp;
982 err = migrate_prep();
983 if (err)
984 return err;
986 down_read(&mm->mmap_sem);
988 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
989 if (err)
990 goto out;
993 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
994 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
995 * bit in 'tmp', and return that <source, dest> pair for migration.
996 * The pair of nodemasks 'to' and 'from' define the map.
998 * If no pair of bits is found that way, fallback to picking some
999 * pair of 'source' and 'dest' bits that are not the same. If the
1000 * 'source' and 'dest' bits are the same, this represents a node
1001 * that will be migrating to itself, so no pages need move.
1003 * If no bits are left in 'tmp', or if all remaining bits left
1004 * in 'tmp' correspond to the same bit in 'to', return false
1005 * (nothing left to migrate).
1007 * This lets us pick a pair of nodes to migrate between, such that
1008 * if possible the dest node is not already occupied by some other
1009 * source node, minimizing the risk of overloading the memory on a
1010 * node that would happen if we migrated incoming memory to a node
1011 * before migrating outgoing memory source that same node.
1013 * A single scan of tmp is sufficient. As we go, we remember the
1014 * most recent <s, d> pair that moved (s != d). If we find a pair
1015 * that not only moved, but what's better, moved to an empty slot
1016 * (d is not set in tmp), then we break out then, with that pair.
1017 * Otherwise when we finish scanning from_tmp, we at least have the
1018 * most recent <s, d> pair that moved. If we get all the way through
1019 * the scan of tmp without finding any node that moved, much less
1020 * moved to an empty node, then there is nothing left worth migrating.
1023 tmp = *from_nodes;
1024 while (!nodes_empty(tmp)) {
1025 int s,d;
1026 int source = -1;
1027 int dest = 0;
1029 for_each_node_mask(s, tmp) {
1030 d = node_remap(s, *from_nodes, *to_nodes);
1031 if (s == d)
1032 continue;
1034 source = s; /* Node moved. Memorize */
1035 dest = d;
1037 /* dest not in remaining from nodes? */
1038 if (!node_isset(dest, tmp))
1039 break;
1041 if (source == -1)
1042 break;
1044 node_clear(source, tmp);
1045 err = migrate_to_node(mm, source, dest, flags);
1046 if (err > 0)
1047 busy += err;
1048 if (err < 0)
1049 break;
1051 out:
1052 up_read(&mm->mmap_sem);
1053 if (err < 0)
1054 return err;
1055 return busy;
1060 * Allocate a new page for page migration based on vma policy.
1061 * Start assuming that page is mapped by vma pointed to by @private.
1062 * Search forward from there, if not. N.B., this assumes that the
1063 * list of pages handed to migrate_pages()--which is how we get here--
1064 * is in virtual address order.
1066 static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1068 struct vm_area_struct *vma = (struct vm_area_struct *)private;
1069 unsigned long uninitialized_var(address);
1071 while (vma) {
1072 address = page_address_in_vma(page, vma);
1073 if (address != -EFAULT)
1074 break;
1075 vma = vma->vm_next;
1079 * if !vma, alloc_page_vma() will use task or system default policy
1081 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1083 #else
1085 static void migrate_page_add(struct page *page, struct list_head *pagelist,
1086 unsigned long flags)
1090 int do_migrate_pages(struct mm_struct *mm,
1091 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
1093 return -ENOSYS;
1096 static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1098 return NULL;
1100 #endif
1102 static long do_mbind(unsigned long start, unsigned long len,
1103 unsigned short mode, unsigned short mode_flags,
1104 nodemask_t *nmask, unsigned long flags)
1106 struct vm_area_struct *vma;
1107 struct mm_struct *mm = current->mm;
1108 struct mempolicy *new;
1109 unsigned long end;
1110 int err;
1111 LIST_HEAD(pagelist);
1113 if (flags & ~(unsigned long)(MPOL_MF_STRICT |
1114 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1115 return -EINVAL;
1116 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1117 return -EPERM;
1119 if (start & ~PAGE_MASK)
1120 return -EINVAL;
1122 if (mode == MPOL_DEFAULT)
1123 flags &= ~MPOL_MF_STRICT;
1125 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1126 end = start + len;
1128 if (end < start)
1129 return -EINVAL;
1130 if (end == start)
1131 return 0;
1133 new = mpol_new(mode, mode_flags, nmask);
1134 if (IS_ERR(new))
1135 return PTR_ERR(new);
1138 * If we are using the default policy then operation
1139 * on discontinuous address spaces is okay after all
1141 if (!new)
1142 flags |= MPOL_MF_DISCONTIG_OK;
1144 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1145 start, start + len, mode, mode_flags,
1146 nmask ? nodes_addr(*nmask)[0] : -1);
1148 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1150 err = migrate_prep();
1151 if (err)
1152 goto mpol_out;
1155 NODEMASK_SCRATCH(scratch);
1156 if (scratch) {
1157 down_write(&mm->mmap_sem);
1158 task_lock(current);
1159 err = mpol_set_nodemask(new, nmask, scratch);
1160 task_unlock(current);
1161 if (err)
1162 up_write(&mm->mmap_sem);
1163 } else
1164 err = -ENOMEM;
1165 NODEMASK_SCRATCH_FREE(scratch);
1167 if (err)
1168 goto mpol_out;
1170 vma = check_range(mm, start, end, nmask,
1171 flags | MPOL_MF_INVERT, &pagelist);
1173 err = PTR_ERR(vma);
1174 if (!IS_ERR(vma)) {
1175 int nr_failed = 0;
1177 err = mbind_range(mm, start, end, new);
1179 if (!list_empty(&pagelist)) {
1180 nr_failed = migrate_pages(&pagelist, new_vma_page,
1181 (unsigned long)vma,
1182 false, true);
1183 if (nr_failed)
1184 putback_lru_pages(&pagelist);
1187 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
1188 err = -EIO;
1189 } else
1190 putback_lru_pages(&pagelist);
1192 up_write(&mm->mmap_sem);
1193 mpol_out:
1194 mpol_put(new);
1195 return err;
1199 * User space interface with variable sized bitmaps for nodelists.
1202 /* Copy a node mask from user space. */
1203 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1204 unsigned long maxnode)
1206 unsigned long k;
1207 unsigned long nlongs;
1208 unsigned long endmask;
1210 --maxnode;
1211 nodes_clear(*nodes);
1212 if (maxnode == 0 || !nmask)
1213 return 0;
1214 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1215 return -EINVAL;
1217 nlongs = BITS_TO_LONGS(maxnode);
1218 if ((maxnode % BITS_PER_LONG) == 0)
1219 endmask = ~0UL;
1220 else
1221 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1223 /* When the user specified more nodes than supported just check
1224 if the non supported part is all zero. */
1225 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1226 if (nlongs > PAGE_SIZE/sizeof(long))
1227 return -EINVAL;
1228 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1229 unsigned long t;
1230 if (get_user(t, nmask + k))
1231 return -EFAULT;
1232 if (k == nlongs - 1) {
1233 if (t & endmask)
1234 return -EINVAL;
1235 } else if (t)
1236 return -EINVAL;
1238 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1239 endmask = ~0UL;
1242 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1243 return -EFAULT;
1244 nodes_addr(*nodes)[nlongs-1] &= endmask;
1245 return 0;
1248 /* Copy a kernel node mask to user space */
1249 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1250 nodemask_t *nodes)
1252 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1253 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1255 if (copy > nbytes) {
1256 if (copy > PAGE_SIZE)
1257 return -EINVAL;
1258 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1259 return -EFAULT;
1260 copy = nbytes;
1262 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1265 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1266 unsigned long, mode, unsigned long __user *, nmask,
1267 unsigned long, maxnode, unsigned, flags)
1269 nodemask_t nodes;
1270 int err;
1271 unsigned short mode_flags;
1273 mode_flags = mode & MPOL_MODE_FLAGS;
1274 mode &= ~MPOL_MODE_FLAGS;
1275 if (mode >= MPOL_MAX)
1276 return -EINVAL;
1277 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1278 (mode_flags & MPOL_F_RELATIVE_NODES))
1279 return -EINVAL;
1280 err = get_nodes(&nodes, nmask, maxnode);
1281 if (err)
1282 return err;
1283 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1286 /* Set the process memory policy */
1287 SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1288 unsigned long, maxnode)
1290 int err;
1291 nodemask_t nodes;
1292 unsigned short flags;
1294 flags = mode & MPOL_MODE_FLAGS;
1295 mode &= ~MPOL_MODE_FLAGS;
1296 if ((unsigned int)mode >= MPOL_MAX)
1297 return -EINVAL;
1298 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1299 return -EINVAL;
1300 err = get_nodes(&nodes, nmask, maxnode);
1301 if (err)
1302 return err;
1303 return do_set_mempolicy(mode, flags, &nodes);
1306 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1307 const unsigned long __user *, old_nodes,
1308 const unsigned long __user *, new_nodes)
1310 const struct cred *cred = current_cred(), *tcred;
1311 struct mm_struct *mm = NULL;
1312 struct task_struct *task;
1313 nodemask_t task_nodes;
1314 int err;
1315 nodemask_t *old;
1316 nodemask_t *new;
1317 NODEMASK_SCRATCH(scratch);
1319 if (!scratch)
1320 return -ENOMEM;
1322 old = &scratch->mask1;
1323 new = &scratch->mask2;
1325 err = get_nodes(old, old_nodes, maxnode);
1326 if (err)
1327 goto out;
1329 err = get_nodes(new, new_nodes, maxnode);
1330 if (err)
1331 goto out;
1333 /* Find the mm_struct */
1334 rcu_read_lock();
1335 task = pid ? find_task_by_vpid(pid) : current;
1336 if (!task) {
1337 rcu_read_unlock();
1338 err = -ESRCH;
1339 goto out;
1341 get_task_struct(task);
1343 err = -EINVAL;
1346 * Check if this process has the right to modify the specified
1347 * process. The right exists if the process has administrative
1348 * capabilities, superuser privileges or the same
1349 * userid as the target process.
1351 tcred = __task_cred(task);
1352 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1353 cred->uid != tcred->suid && cred->uid != tcred->uid &&
1354 !capable(CAP_SYS_NICE)) {
1355 rcu_read_unlock();
1356 err = -EPERM;
1357 goto out_put;
1359 rcu_read_unlock();
1361 task_nodes = cpuset_mems_allowed(task);
1362 /* Is the user allowed to access the target nodes? */
1363 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1364 err = -EPERM;
1365 goto out_put;
1368 if (!nodes_subset(*new, node_states[N_HIGH_MEMORY])) {
1369 err = -EINVAL;
1370 goto out_put;
1373 err = security_task_movememory(task);
1374 if (err)
1375 goto out_put;
1377 mm = get_task_mm(task);
1378 put_task_struct(task);
1380 if (!mm) {
1381 err = -EINVAL;
1382 goto out;
1385 err = do_migrate_pages(mm, old, new,
1386 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1388 mmput(mm);
1389 out:
1390 NODEMASK_SCRATCH_FREE(scratch);
1392 return err;
1394 out_put:
1395 put_task_struct(task);
1396 goto out;
1401 /* Retrieve NUMA policy */
1402 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1403 unsigned long __user *, nmask, unsigned long, maxnode,
1404 unsigned long, addr, unsigned long, flags)
1406 int err;
1407 int uninitialized_var(pval);
1408 nodemask_t nodes;
1410 if (nmask != NULL && maxnode < MAX_NUMNODES)
1411 return -EINVAL;
1413 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1415 if (err)
1416 return err;
1418 if (policy && put_user(pval, policy))
1419 return -EFAULT;
1421 if (nmask)
1422 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1424 return err;
1427 #ifdef CONFIG_COMPAT
1429 asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1430 compat_ulong_t __user *nmask,
1431 compat_ulong_t maxnode,
1432 compat_ulong_t addr, compat_ulong_t flags)
1434 long err;
1435 unsigned long __user *nm = NULL;
1436 unsigned long nr_bits, alloc_size;
1437 DECLARE_BITMAP(bm, MAX_NUMNODES);
1439 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1440 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1442 if (nmask)
1443 nm = compat_alloc_user_space(alloc_size);
1445 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1447 if (!err && nmask) {
1448 unsigned long copy_size;
1449 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1450 err = copy_from_user(bm, nm, copy_size);
1451 /* ensure entire bitmap is zeroed */
1452 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1453 err |= compat_put_bitmap(nmask, bm, nr_bits);
1456 return err;
1459 asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1460 compat_ulong_t maxnode)
1462 long err = 0;
1463 unsigned long __user *nm = NULL;
1464 unsigned long nr_bits, alloc_size;
1465 DECLARE_BITMAP(bm, MAX_NUMNODES);
1467 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1468 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1470 if (nmask) {
1471 err = compat_get_bitmap(bm, nmask, nr_bits);
1472 nm = compat_alloc_user_space(alloc_size);
1473 err |= copy_to_user(nm, bm, alloc_size);
1476 if (err)
1477 return -EFAULT;
1479 return sys_set_mempolicy(mode, nm, nr_bits+1);
1482 asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1483 compat_ulong_t mode, compat_ulong_t __user *nmask,
1484 compat_ulong_t maxnode, compat_ulong_t flags)
1486 long err = 0;
1487 unsigned long __user *nm = NULL;
1488 unsigned long nr_bits, alloc_size;
1489 nodemask_t bm;
1491 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1492 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1494 if (nmask) {
1495 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1496 nm = compat_alloc_user_space(alloc_size);
1497 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1500 if (err)
1501 return -EFAULT;
1503 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1506 #endif
1509 * get_vma_policy(@task, @vma, @addr)
1510 * @task - task for fallback if vma policy == default
1511 * @vma - virtual memory area whose policy is sought
1512 * @addr - address in @vma for shared policy lookup
1514 * Returns effective policy for a VMA at specified address.
1515 * Falls back to @task or system default policy, as necessary.
1516 * Current or other task's task mempolicy and non-shared vma policies
1517 * are protected by the task's mmap_sem, which must be held for read by
1518 * the caller.
1519 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1520 * count--added by the get_policy() vm_op, as appropriate--to protect against
1521 * freeing by another task. It is the caller's responsibility to free the
1522 * extra reference for shared policies.
1524 struct mempolicy *get_vma_policy(struct task_struct *task,
1525 struct vm_area_struct *vma, unsigned long addr)
1527 struct mempolicy *pol = task->mempolicy;
1529 if (vma) {
1530 if (vma->vm_ops && vma->vm_ops->get_policy) {
1531 struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1532 addr);
1533 if (vpol)
1534 pol = vpol;
1535 } else if (vma->vm_policy) {
1536 pol = vma->vm_policy;
1539 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1540 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1541 * count on these policies which will be dropped by
1542 * mpol_cond_put() later
1544 if (mpol_needs_cond_ref(pol))
1545 mpol_get(pol);
1548 if (!pol)
1549 pol = &default_policy;
1550 return pol;
1554 * Return a nodemask representing a mempolicy for filtering nodes for
1555 * page allocation
1557 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1559 /* Lower zones don't get a nodemask applied for MPOL_BIND */
1560 if (unlikely(policy->mode == MPOL_BIND) &&
1561 gfp_zone(gfp) >= policy_zone &&
1562 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1563 return &policy->v.nodes;
1565 return NULL;
1568 /* Return a zonelist indicated by gfp for node representing a mempolicy */
1569 static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1570 int nd)
1572 switch (policy->mode) {
1573 case MPOL_PREFERRED:
1574 if (!(policy->flags & MPOL_F_LOCAL))
1575 nd = policy->v.preferred_node;
1576 break;
1577 case MPOL_BIND:
1579 * Normally, MPOL_BIND allocations are node-local within the
1580 * allowed nodemask. However, if __GFP_THISNODE is set and the
1581 * current node isn't part of the mask, we use the zonelist for
1582 * the first node in the mask instead.
1584 if (unlikely(gfp & __GFP_THISNODE) &&
1585 unlikely(!node_isset(nd, policy->v.nodes)))
1586 nd = first_node(policy->v.nodes);
1587 break;
1588 default:
1589 BUG();
1591 return node_zonelist(nd, gfp);
1594 /* Do dynamic interleaving for a process */
1595 static unsigned interleave_nodes(struct mempolicy *policy)
1597 unsigned nid, next;
1598 struct task_struct *me = current;
1600 nid = me->il_next;
1601 next = next_node(nid, policy->v.nodes);
1602 if (next >= MAX_NUMNODES)
1603 next = first_node(policy->v.nodes);
1604 if (next < MAX_NUMNODES)
1605 me->il_next = next;
1606 return nid;
1610 * Depending on the memory policy provide a node from which to allocate the
1611 * next slab entry.
1612 * @policy must be protected by freeing by the caller. If @policy is
1613 * the current task's mempolicy, this protection is implicit, as only the
1614 * task can change it's policy. The system default policy requires no
1615 * such protection.
1617 unsigned slab_node(struct mempolicy *policy)
1619 if (!policy || policy->flags & MPOL_F_LOCAL)
1620 return numa_node_id();
1622 switch (policy->mode) {
1623 case MPOL_PREFERRED:
1625 * handled MPOL_F_LOCAL above
1627 return policy->v.preferred_node;
1629 case MPOL_INTERLEAVE:
1630 return interleave_nodes(policy);
1632 case MPOL_BIND: {
1634 * Follow bind policy behavior and start allocation at the
1635 * first node.
1637 struct zonelist *zonelist;
1638 struct zone *zone;
1639 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1640 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1641 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1642 &policy->v.nodes,
1643 &zone);
1644 return zone ? zone->node : numa_node_id();
1647 default:
1648 BUG();
1652 /* Do static interleaving for a VMA with known offset. */
1653 static unsigned offset_il_node(struct mempolicy *pol,
1654 struct vm_area_struct *vma, unsigned long off)
1656 unsigned nnodes = nodes_weight(pol->v.nodes);
1657 unsigned target;
1658 int c;
1659 int nid = -1;
1661 if (!nnodes)
1662 return numa_node_id();
1663 target = (unsigned int)off % nnodes;
1664 c = 0;
1665 do {
1666 nid = next_node(nid, pol->v.nodes);
1667 c++;
1668 } while (c <= target);
1669 return nid;
1672 /* Determine a node number for interleave */
1673 static inline unsigned interleave_nid(struct mempolicy *pol,
1674 struct vm_area_struct *vma, unsigned long addr, int shift)
1676 if (vma) {
1677 unsigned long off;
1680 * for small pages, there is no difference between
1681 * shift and PAGE_SHIFT, so the bit-shift is safe.
1682 * for huge pages, since vm_pgoff is in units of small
1683 * pages, we need to shift off the always 0 bits to get
1684 * a useful offset.
1686 BUG_ON(shift < PAGE_SHIFT);
1687 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1688 off += (addr - vma->vm_start) >> shift;
1689 return offset_il_node(pol, vma, off);
1690 } else
1691 return interleave_nodes(pol);
1695 * Return the bit number of a random bit set in the nodemask.
1696 * (returns -1 if nodemask is empty)
1698 int node_random(const nodemask_t *maskp)
1700 int w, bit = -1;
1702 w = nodes_weight(*maskp);
1703 if (w)
1704 bit = bitmap_ord_to_pos(maskp->bits,
1705 get_random_int() % w, MAX_NUMNODES);
1706 return bit;
1709 #ifdef CONFIG_HUGETLBFS
1711 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1712 * @vma = virtual memory area whose policy is sought
1713 * @addr = address in @vma for shared policy lookup and interleave policy
1714 * @gfp_flags = for requested zone
1715 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1716 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1718 * Returns a zonelist suitable for a huge page allocation and a pointer
1719 * to the struct mempolicy for conditional unref after allocation.
1720 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1721 * @nodemask for filtering the zonelist.
1723 * Must be protected by get_mems_allowed()
1725 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1726 gfp_t gfp_flags, struct mempolicy **mpol,
1727 nodemask_t **nodemask)
1729 struct zonelist *zl;
1731 *mpol = get_vma_policy(current, vma, addr);
1732 *nodemask = NULL; /* assume !MPOL_BIND */
1734 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1735 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1736 huge_page_shift(hstate_vma(vma))), gfp_flags);
1737 } else {
1738 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
1739 if ((*mpol)->mode == MPOL_BIND)
1740 *nodemask = &(*mpol)->v.nodes;
1742 return zl;
1746 * init_nodemask_of_mempolicy
1748 * If the current task's mempolicy is "default" [NULL], return 'false'
1749 * to indicate default policy. Otherwise, extract the policy nodemask
1750 * for 'bind' or 'interleave' policy into the argument nodemask, or
1751 * initialize the argument nodemask to contain the single node for
1752 * 'preferred' or 'local' policy and return 'true' to indicate presence
1753 * of non-default mempolicy.
1755 * We don't bother with reference counting the mempolicy [mpol_get/put]
1756 * because the current task is examining it's own mempolicy and a task's
1757 * mempolicy is only ever changed by the task itself.
1759 * N.B., it is the caller's responsibility to free a returned nodemask.
1761 bool init_nodemask_of_mempolicy(nodemask_t *mask)
1763 struct mempolicy *mempolicy;
1764 int nid;
1766 if (!(mask && current->mempolicy))
1767 return false;
1769 task_lock(current);
1770 mempolicy = current->mempolicy;
1771 switch (mempolicy->mode) {
1772 case MPOL_PREFERRED:
1773 if (mempolicy->flags & MPOL_F_LOCAL)
1774 nid = numa_node_id();
1775 else
1776 nid = mempolicy->v.preferred_node;
1777 init_nodemask_of_node(mask, nid);
1778 break;
1780 case MPOL_BIND:
1781 /* Fall through */
1782 case MPOL_INTERLEAVE:
1783 *mask = mempolicy->v.nodes;
1784 break;
1786 default:
1787 BUG();
1789 task_unlock(current);
1791 return true;
1793 #endif
1796 * mempolicy_nodemask_intersects
1798 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1799 * policy. Otherwise, check for intersection between mask and the policy
1800 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1801 * policy, always return true since it may allocate elsewhere on fallback.
1803 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1805 bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1806 const nodemask_t *mask)
1808 struct mempolicy *mempolicy;
1809 bool ret = true;
1811 if (!mask)
1812 return ret;
1813 task_lock(tsk);
1814 mempolicy = tsk->mempolicy;
1815 if (!mempolicy)
1816 goto out;
1818 switch (mempolicy->mode) {
1819 case MPOL_PREFERRED:
1821 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1822 * allocate from, they may fallback to other nodes when oom.
1823 * Thus, it's possible for tsk to have allocated memory from
1824 * nodes in mask.
1826 break;
1827 case MPOL_BIND:
1828 case MPOL_INTERLEAVE:
1829 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1830 break;
1831 default:
1832 BUG();
1834 out:
1835 task_unlock(tsk);
1836 return ret;
1839 /* Allocate a page in interleaved policy.
1840 Own path because it needs to do special accounting. */
1841 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1842 unsigned nid)
1844 struct zonelist *zl;
1845 struct page *page;
1847 zl = node_zonelist(nid, gfp);
1848 page = __alloc_pages(gfp, order, zl);
1849 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1850 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1851 return page;
1855 * alloc_pages_vma - Allocate a page for a VMA.
1857 * @gfp:
1858 * %GFP_USER user allocation.
1859 * %GFP_KERNEL kernel allocations,
1860 * %GFP_HIGHMEM highmem/user allocations,
1861 * %GFP_FS allocation should not call back into a file system.
1862 * %GFP_ATOMIC don't sleep.
1864 * @order:Order of the GFP allocation.
1865 * @vma: Pointer to VMA or NULL if not available.
1866 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1868 * This function allocates a page from the kernel page pool and applies
1869 * a NUMA policy associated with the VMA or the current process.
1870 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1871 * mm_struct of the VMA to prevent it from going away. Should be used for
1872 * all allocations for pages that will be mapped into
1873 * user space. Returns NULL when no page can be allocated.
1875 * Should be called with the mm_sem of the vma hold.
1877 struct page *
1878 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1879 unsigned long addr, int node)
1881 struct mempolicy *pol;
1882 struct zonelist *zl;
1883 struct page *page;
1884 unsigned int cpuset_mems_cookie;
1886 retry_cpuset:
1887 pol = get_vma_policy(current, vma, addr);
1888 cpuset_mems_cookie = get_mems_allowed();
1890 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1891 unsigned nid;
1893 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
1894 mpol_cond_put(pol);
1895 page = alloc_page_interleave(gfp, order, nid);
1896 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
1897 goto retry_cpuset;
1899 return page;
1901 zl = policy_zonelist(gfp, pol, node);
1902 if (unlikely(mpol_needs_cond_ref(pol))) {
1904 * slow path: ref counted shared policy
1906 struct page *page = __alloc_pages_nodemask(gfp, order,
1907 zl, policy_nodemask(gfp, pol));
1908 __mpol_put(pol);
1909 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
1910 goto retry_cpuset;
1911 return page;
1914 * fast path: default or task policy
1916 page = __alloc_pages_nodemask(gfp, order, zl,
1917 policy_nodemask(gfp, pol));
1918 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
1919 goto retry_cpuset;
1920 return page;
1924 * alloc_pages_current - Allocate pages.
1926 * @gfp:
1927 * %GFP_USER user allocation,
1928 * %GFP_KERNEL kernel allocation,
1929 * %GFP_HIGHMEM highmem allocation,
1930 * %GFP_FS don't call back into a file system.
1931 * %GFP_ATOMIC don't sleep.
1932 * @order: Power of two of allocation size in pages. 0 is a single page.
1934 * Allocate a page from the kernel page pool. When not in
1935 * interrupt context and apply the current process NUMA policy.
1936 * Returns NULL when no page can be allocated.
1938 * Don't call cpuset_update_task_memory_state() unless
1939 * 1) it's ok to take cpuset_sem (can WAIT), and
1940 * 2) allocating for current task (not interrupt).
1942 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1944 struct mempolicy *pol = current->mempolicy;
1945 struct page *page;
1946 unsigned int cpuset_mems_cookie;
1948 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1949 pol = &default_policy;
1951 retry_cpuset:
1952 cpuset_mems_cookie = get_mems_allowed();
1955 * No reference counting needed for current->mempolicy
1956 * nor system default_policy
1958 if (pol->mode == MPOL_INTERLEAVE)
1959 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
1960 else
1961 page = __alloc_pages_nodemask(gfp, order,
1962 policy_zonelist(gfp, pol, numa_node_id()),
1963 policy_nodemask(gfp, pol));
1965 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
1966 goto retry_cpuset;
1968 return page;
1970 EXPORT_SYMBOL(alloc_pages_current);
1973 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
1974 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1975 * with the mems_allowed returned by cpuset_mems_allowed(). This
1976 * keeps mempolicies cpuset relative after its cpuset moves. See
1977 * further kernel/cpuset.c update_nodemask().
1979 * current's mempolicy may be rebinded by the other task(the task that changes
1980 * cpuset's mems), so we needn't do rebind work for current task.
1983 /* Slow path of a mempolicy duplicate */
1984 struct mempolicy *__mpol_dup(struct mempolicy *old)
1986 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1988 if (!new)
1989 return ERR_PTR(-ENOMEM);
1991 /* task's mempolicy is protected by alloc_lock */
1992 if (old == current->mempolicy) {
1993 task_lock(current);
1994 *new = *old;
1995 task_unlock(current);
1996 } else
1997 *new = *old;
1999 rcu_read_lock();
2000 if (current_cpuset_is_being_rebound()) {
2001 nodemask_t mems = cpuset_mems_allowed(current);
2002 if (new->flags & MPOL_F_REBINDING)
2003 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2004 else
2005 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
2007 rcu_read_unlock();
2008 atomic_set(&new->refcnt, 1);
2009 return new;
2013 * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
2014 * eliminate the * MPOL_F_* flags that require conditional ref and
2015 * [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly
2016 * after return. Use the returned value.
2018 * Allows use of a mempolicy for, e.g., multiple allocations with a single
2019 * policy lookup, even if the policy needs/has extra ref on lookup.
2020 * shmem_readahead needs this.
2022 struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
2023 struct mempolicy *frompol)
2025 if (!mpol_needs_cond_ref(frompol))
2026 return frompol;
2028 *tompol = *frompol;
2029 tompol->flags &= ~MPOL_F_SHARED; /* copy doesn't need unref */
2030 __mpol_put(frompol);
2031 return tompol;
2034 /* Slow path of a mempolicy comparison */
2035 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2037 if (!a || !b)
2038 return false;
2039 if (a->mode != b->mode)
2040 return false;
2041 if (a->flags != b->flags)
2042 return false;
2043 if (mpol_store_user_nodemask(a))
2044 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2045 return false;
2047 switch (a->mode) {
2048 case MPOL_BIND:
2049 /* Fall through */
2050 case MPOL_INTERLEAVE:
2051 return !!nodes_equal(a->v.nodes, b->v.nodes);
2052 case MPOL_PREFERRED:
2053 return a->v.preferred_node == b->v.preferred_node;
2054 default:
2055 BUG();
2056 return false;
2061 * Shared memory backing store policy support.
2063 * Remember policies even when nobody has shared memory mapped.
2064 * The policies are kept in Red-Black tree linked from the inode.
2065 * They are protected by the sp->lock spinlock, which should be held
2066 * for any accesses to the tree.
2069 /* lookup first element intersecting start-end */
2070 /* Caller holds sp->mutex */
2071 static struct sp_node *
2072 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2074 struct rb_node *n = sp->root.rb_node;
2076 while (n) {
2077 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2079 if (start >= p->end)
2080 n = n->rb_right;
2081 else if (end <= p->start)
2082 n = n->rb_left;
2083 else
2084 break;
2086 if (!n)
2087 return NULL;
2088 for (;;) {
2089 struct sp_node *w = NULL;
2090 struct rb_node *prev = rb_prev(n);
2091 if (!prev)
2092 break;
2093 w = rb_entry(prev, struct sp_node, nd);
2094 if (w->end <= start)
2095 break;
2096 n = prev;
2098 return rb_entry(n, struct sp_node, nd);
2101 /* Insert a new shared policy into the list. */
2102 /* Caller holds sp->lock */
2103 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2105 struct rb_node **p = &sp->root.rb_node;
2106 struct rb_node *parent = NULL;
2107 struct sp_node *nd;
2109 while (*p) {
2110 parent = *p;
2111 nd = rb_entry(parent, struct sp_node, nd);
2112 if (new->start < nd->start)
2113 p = &(*p)->rb_left;
2114 else if (new->end > nd->end)
2115 p = &(*p)->rb_right;
2116 else
2117 BUG();
2119 rb_link_node(&new->nd, parent, p);
2120 rb_insert_color(&new->nd, &sp->root);
2121 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2122 new->policy ? new->policy->mode : 0);
2125 /* Find shared policy intersecting idx */
2126 struct mempolicy *
2127 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2129 struct mempolicy *pol = NULL;
2130 struct sp_node *sn;
2132 if (!sp->root.rb_node)
2133 return NULL;
2134 mutex_lock(&sp->mutex);
2135 sn = sp_lookup(sp, idx, idx+1);
2136 if (sn) {
2137 mpol_get(sn->policy);
2138 pol = sn->policy;
2140 mutex_unlock(&sp->mutex);
2141 return pol;
2144 static void sp_free(struct sp_node *n)
2146 mpol_put(n->policy);
2147 kmem_cache_free(sn_cache, n);
2150 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2152 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2153 rb_erase(&n->nd, &sp->root);
2154 sp_free(n);
2157 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2158 struct mempolicy *pol)
2160 struct sp_node *n;
2161 struct mempolicy *newpol;
2163 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2164 if (!n)
2165 return NULL;
2167 newpol = mpol_dup(pol);
2168 if (IS_ERR(newpol)) {
2169 kmem_cache_free(sn_cache, n);
2170 return NULL;
2172 newpol->flags |= MPOL_F_SHARED;
2174 n->start = start;
2175 n->end = end;
2176 n->policy = newpol;
2178 return n;
2181 /* Replace a policy range. */
2182 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2183 unsigned long end, struct sp_node *new)
2185 struct sp_node *n;
2186 int ret = 0;
2188 mutex_lock(&sp->mutex);
2189 n = sp_lookup(sp, start, end);
2190 /* Take care of old policies in the same range. */
2191 while (n && n->start < end) {
2192 struct rb_node *next = rb_next(&n->nd);
2193 if (n->start >= start) {
2194 if (n->end <= end)
2195 sp_delete(sp, n);
2196 else
2197 n->start = end;
2198 } else {
2199 /* Old policy spanning whole new range. */
2200 if (n->end > end) {
2201 struct sp_node *new2;
2202 new2 = sp_alloc(end, n->end, n->policy);
2203 if (!new2) {
2204 ret = -ENOMEM;
2205 goto out;
2207 n->end = start;
2208 sp_insert(sp, new2);
2209 break;
2210 } else
2211 n->end = start;
2213 if (!next)
2214 break;
2215 n = rb_entry(next, struct sp_node, nd);
2217 if (new)
2218 sp_insert(sp, new);
2219 out:
2220 mutex_unlock(&sp->mutex);
2221 return ret;
2225 * mpol_shared_policy_init - initialize shared policy for inode
2226 * @sp: pointer to inode shared policy
2227 * @mpol: struct mempolicy to install
2229 * Install non-NULL @mpol in inode's shared policy rb-tree.
2230 * On entry, the current task has a reference on a non-NULL @mpol.
2231 * This must be released on exit.
2232 * This is called at get_inode() calls and we can use GFP_KERNEL.
2234 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2236 int ret;
2238 sp->root = RB_ROOT; /* empty tree == default mempolicy */
2239 mutex_init(&sp->mutex);
2241 if (mpol) {
2242 struct vm_area_struct pvma;
2243 struct mempolicy *new;
2244 NODEMASK_SCRATCH(scratch);
2246 if (!scratch)
2247 goto put_mpol;
2248 /* contextualize the tmpfs mount point mempolicy */
2249 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2250 if (IS_ERR(new))
2251 goto free_scratch; /* no valid nodemask intersection */
2253 task_lock(current);
2254 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2255 task_unlock(current);
2256 if (ret)
2257 goto put_new;
2259 /* Create pseudo-vma that contains just the policy */
2260 memset(&pvma, 0, sizeof(struct vm_area_struct));
2261 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2262 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2264 put_new:
2265 mpol_put(new); /* drop initial ref */
2266 free_scratch:
2267 NODEMASK_SCRATCH_FREE(scratch);
2268 put_mpol:
2269 mpol_put(mpol); /* drop our incoming ref on sb mpol */
2273 int mpol_set_shared_policy(struct shared_policy *info,
2274 struct vm_area_struct *vma, struct mempolicy *npol)
2276 int err;
2277 struct sp_node *new = NULL;
2278 unsigned long sz = vma_pages(vma);
2280 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2281 vma->vm_pgoff,
2282 sz, npol ? npol->mode : -1,
2283 npol ? npol->flags : -1,
2284 npol ? nodes_addr(npol->v.nodes)[0] : -1);
2286 if (npol) {
2287 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2288 if (!new)
2289 return -ENOMEM;
2291 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2292 if (err && new)
2293 sp_free(new);
2294 return err;
2297 /* Free a backing policy store on inode delete. */
2298 void mpol_free_shared_policy(struct shared_policy *p)
2300 struct sp_node *n;
2301 struct rb_node *next;
2303 if (!p->root.rb_node)
2304 return;
2305 mutex_lock(&p->mutex);
2306 next = rb_first(&p->root);
2307 while (next) {
2308 n = rb_entry(next, struct sp_node, nd);
2309 next = rb_next(&n->nd);
2310 sp_delete(p, n);
2312 mutex_unlock(&p->mutex);
2315 /* assumes fs == KERNEL_DS */
2316 void __init numa_policy_init(void)
2318 nodemask_t interleave_nodes;
2319 unsigned long largest = 0;
2320 int nid, prefer = 0;
2322 policy_cache = kmem_cache_create("numa_policy",
2323 sizeof(struct mempolicy),
2324 0, SLAB_PANIC, NULL);
2326 sn_cache = kmem_cache_create("shared_policy_node",
2327 sizeof(struct sp_node),
2328 0, SLAB_PANIC, NULL);
2331 * Set interleaving policy for system init. Interleaving is only
2332 * enabled across suitably sized nodes (default is >= 16MB), or
2333 * fall back to the largest node if they're all smaller.
2335 nodes_clear(interleave_nodes);
2336 for_each_node_state(nid, N_HIGH_MEMORY) {
2337 unsigned long total_pages = node_present_pages(nid);
2339 /* Preserve the largest node */
2340 if (largest < total_pages) {
2341 largest = total_pages;
2342 prefer = nid;
2345 /* Interleave this node? */
2346 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2347 node_set(nid, interleave_nodes);
2350 /* All too small, use the largest */
2351 if (unlikely(nodes_empty(interleave_nodes)))
2352 node_set(prefer, interleave_nodes);
2354 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2355 printk("numa_policy_init: interleaving failed\n");
2358 /* Reset policy of current process to default */
2359 void numa_default_policy(void)
2361 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2365 * Parse and format mempolicy from/to strings
2369 * "local" is pseudo-policy: MPOL_PREFERRED with MPOL_F_LOCAL flag
2370 * Used only for mpol_parse_str() and mpol_to_str()
2372 #define MPOL_LOCAL MPOL_MAX
2373 static const char * const policy_modes[] =
2375 [MPOL_DEFAULT] = "default",
2376 [MPOL_PREFERRED] = "prefer",
2377 [MPOL_BIND] = "bind",
2378 [MPOL_INTERLEAVE] = "interleave",
2379 [MPOL_LOCAL] = "local"
2383 #ifdef CONFIG_TMPFS
2385 * mpol_parse_str - parse string to mempolicy
2386 * @str: string containing mempolicy to parse
2387 * @mpol: pointer to struct mempolicy pointer, returned on success.
2388 * @no_context: flag whether to "contextualize" the mempolicy
2390 * Format of input:
2391 * <mode>[=<flags>][:<nodelist>]
2393 * if @no_context is true, save the input nodemask in w.user_nodemask in
2394 * the returned mempolicy. This will be used to "clone" the mempolicy in
2395 * a specific context [cpuset] at a later time. Used to parse tmpfs mpol
2396 * mount option. Note that if 'static' or 'relative' mode flags were
2397 * specified, the input nodemask will already have been saved. Saving
2398 * it again is redundant, but safe.
2400 * On success, returns 0, else 1
2402 int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
2404 struct mempolicy *new = NULL;
2405 unsigned short mode;
2406 unsigned short uninitialized_var(mode_flags);
2407 nodemask_t nodes;
2408 char *nodelist = strchr(str, ':');
2409 char *flags = strchr(str, '=');
2410 int err = 1;
2412 if (nodelist) {
2413 /* NUL-terminate mode or flags string */
2414 *nodelist++ = '\0';
2415 if (nodelist_parse(nodelist, nodes))
2416 goto out;
2417 if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
2418 goto out;
2419 } else
2420 nodes_clear(nodes);
2422 if (flags)
2423 *flags++ = '\0'; /* terminate mode string */
2425 for (mode = 0; mode <= MPOL_LOCAL; mode++) {
2426 if (!strcmp(str, policy_modes[mode])) {
2427 break;
2430 if (mode > MPOL_LOCAL)
2431 goto out;
2433 switch (mode) {
2434 case MPOL_PREFERRED:
2436 * Insist on a nodelist of one node only
2438 if (nodelist) {
2439 char *rest = nodelist;
2440 while (isdigit(*rest))
2441 rest++;
2442 if (*rest)
2443 goto out;
2445 break;
2446 case MPOL_INTERLEAVE:
2448 * Default to online nodes with memory if no nodelist
2450 if (!nodelist)
2451 nodes = node_states[N_HIGH_MEMORY];
2452 break;
2453 case MPOL_LOCAL:
2455 * Don't allow a nodelist; mpol_new() checks flags
2457 if (nodelist)
2458 goto out;
2459 mode = MPOL_PREFERRED;
2460 break;
2461 case MPOL_DEFAULT:
2463 * Insist on a empty nodelist
2465 if (!nodelist)
2466 err = 0;
2467 goto out;
2468 case MPOL_BIND:
2470 * Insist on a nodelist
2472 if (!nodelist)
2473 goto out;
2476 mode_flags = 0;
2477 if (flags) {
2479 * Currently, we only support two mutually exclusive
2480 * mode flags.
2482 if (!strcmp(flags, "static"))
2483 mode_flags |= MPOL_F_STATIC_NODES;
2484 else if (!strcmp(flags, "relative"))
2485 mode_flags |= MPOL_F_RELATIVE_NODES;
2486 else
2487 goto out;
2490 new = mpol_new(mode, mode_flags, &nodes);
2491 if (IS_ERR(new))
2492 goto out;
2494 if (no_context) {
2495 /* save for contextualization */
2496 new->w.user_nodemask = nodes;
2497 } else {
2498 int ret;
2499 NODEMASK_SCRATCH(scratch);
2500 if (scratch) {
2501 task_lock(current);
2502 ret = mpol_set_nodemask(new, &nodes, scratch);
2503 task_unlock(current);
2504 } else
2505 ret = -ENOMEM;
2506 NODEMASK_SCRATCH_FREE(scratch);
2507 if (ret) {
2508 mpol_put(new);
2509 goto out;
2512 err = 0;
2514 out:
2515 /* Restore string for error message */
2516 if (nodelist)
2517 *--nodelist = ':';
2518 if (flags)
2519 *--flags = '=';
2520 if (!err)
2521 *mpol = new;
2522 return err;
2524 #endif /* CONFIG_TMPFS */
2527 * mpol_to_str - format a mempolicy structure for printing
2528 * @buffer: to contain formatted mempolicy string
2529 * @maxlen: length of @buffer
2530 * @pol: pointer to mempolicy to be formatted
2531 * @no_context: "context free" mempolicy - use nodemask in w.user_nodemask
2533 * Convert a mempolicy into a string.
2534 * Returns the number of characters in buffer (if positive)
2535 * or an error (negative)
2537 int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
2539 char *p = buffer;
2540 int l;
2541 nodemask_t nodes;
2542 unsigned short mode;
2543 unsigned short flags = pol ? pol->flags : 0;
2546 * Sanity check: room for longest mode, flag and some nodes
2548 VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2550 if (!pol || pol == &default_policy)
2551 mode = MPOL_DEFAULT;
2552 else
2553 mode = pol->mode;
2555 switch (mode) {
2556 case MPOL_DEFAULT:
2557 nodes_clear(nodes);
2558 break;
2560 case MPOL_PREFERRED:
2561 nodes_clear(nodes);
2562 if (flags & MPOL_F_LOCAL)
2563 mode = MPOL_LOCAL; /* pseudo-policy */
2564 else
2565 node_set(pol->v.preferred_node, nodes);
2566 break;
2568 case MPOL_BIND:
2569 /* Fall through */
2570 case MPOL_INTERLEAVE:
2571 if (no_context)
2572 nodes = pol->w.user_nodemask;
2573 else
2574 nodes = pol->v.nodes;
2575 break;
2577 default:
2578 return -EINVAL;
2581 l = strlen(policy_modes[mode]);
2582 if (buffer + maxlen < p + l + 1)
2583 return -ENOSPC;
2585 strcpy(p, policy_modes[mode]);
2586 p += l;
2588 if (flags & MPOL_MODE_FLAGS) {
2589 if (buffer + maxlen < p + 2)
2590 return -ENOSPC;
2591 *p++ = '=';
2594 * Currently, the only defined flags are mutually exclusive
2596 if (flags & MPOL_F_STATIC_NODES)
2597 p += snprintf(p, buffer + maxlen - p, "static");
2598 else if (flags & MPOL_F_RELATIVE_NODES)
2599 p += snprintf(p, buffer + maxlen - p, "relative");
2602 if (!nodes_empty(nodes)) {
2603 if (buffer + maxlen < p + 2)
2604 return -ENOSPC;
2605 *p++ = ':';
2606 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2608 return p - buffer;