thp: introduce deferred_split_huge_page()
[linux/fpc-iii.git] / mm / mempolicy.c
blob973434eff9dcf2c521f2f589292f1a07a8c04d4b
1 /*
2 * Simple NUMA memory policy for the Linux kernel.
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6 * Subject to the GNU Public License, version 2.
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
11 * Support four policies per VMA and per process:
13 * The VMA policy has priority over the process policy for a page fault.
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
28 * preferred Try a specific node first before normal fallback.
29 * As a special case NUMA_NO_NODE here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
56 /* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
68 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
70 #include <linux/mempolicy.h>
71 #include <linux/mm.h>
72 #include <linux/highmem.h>
73 #include <linux/hugetlb.h>
74 #include <linux/kernel.h>
75 #include <linux/sched.h>
76 #include <linux/nodemask.h>
77 #include <linux/cpuset.h>
78 #include <linux/slab.h>
79 #include <linux/string.h>
80 #include <linux/export.h>
81 #include <linux/nsproxy.h>
82 #include <linux/interrupt.h>
83 #include <linux/init.h>
84 #include <linux/compat.h>
85 #include <linux/swap.h>
86 #include <linux/seq_file.h>
87 #include <linux/proc_fs.h>
88 #include <linux/migrate.h>
89 #include <linux/ksm.h>
90 #include <linux/rmap.h>
91 #include <linux/security.h>
92 #include <linux/syscalls.h>
93 #include <linux/ctype.h>
94 #include <linux/mm_inline.h>
95 #include <linux/mmu_notifier.h>
96 #include <linux/printk.h>
98 #include <asm/tlbflush.h>
99 #include <asm/uaccess.h>
100 #include <linux/random.h>
102 #include "internal.h"
104 /* Internal flags */
105 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
106 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
108 static struct kmem_cache *policy_cache;
109 static struct kmem_cache *sn_cache;
111 /* Highest zone. An specific allocation for a zone below that is not
112 policied. */
113 enum zone_type policy_zone = 0;
116 * run-time system-wide default policy => local allocation
118 static struct mempolicy default_policy = {
119 .refcnt = ATOMIC_INIT(1), /* never free it */
120 .mode = MPOL_PREFERRED,
121 .flags = MPOL_F_LOCAL,
124 static struct mempolicy preferred_node_policy[MAX_NUMNODES];
126 struct mempolicy *get_task_policy(struct task_struct *p)
128 struct mempolicy *pol = p->mempolicy;
129 int node;
131 if (pol)
132 return pol;
134 node = numa_node_id();
135 if (node != NUMA_NO_NODE) {
136 pol = &preferred_node_policy[node];
137 /* preferred_node_policy is not initialised early in boot */
138 if (pol->mode)
139 return pol;
142 return &default_policy;
145 static const struct mempolicy_operations {
146 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
148 * If read-side task has no lock to protect task->mempolicy, write-side
149 * task will rebind the task->mempolicy by two step. The first step is
150 * setting all the newly nodes, and the second step is cleaning all the
151 * disallowed nodes. In this way, we can avoid finding no node to alloc
152 * page.
153 * If we have a lock to protect task->mempolicy in read-side, we do
154 * rebind directly.
156 * step:
157 * MPOL_REBIND_ONCE - do rebind work at once
158 * MPOL_REBIND_STEP1 - set all the newly nodes
159 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
161 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
162 enum mpol_rebind_step step);
163 } mpol_ops[MPOL_MAX];
165 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
167 return pol->flags & MPOL_MODE_FLAGS;
170 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
171 const nodemask_t *rel)
173 nodemask_t tmp;
174 nodes_fold(tmp, *orig, nodes_weight(*rel));
175 nodes_onto(*ret, tmp, *rel);
178 static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
180 if (nodes_empty(*nodes))
181 return -EINVAL;
182 pol->v.nodes = *nodes;
183 return 0;
186 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
188 if (!nodes)
189 pol->flags |= MPOL_F_LOCAL; /* local allocation */
190 else if (nodes_empty(*nodes))
191 return -EINVAL; /* no allowed nodes */
192 else
193 pol->v.preferred_node = first_node(*nodes);
194 return 0;
197 static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
199 if (nodes_empty(*nodes))
200 return -EINVAL;
201 pol->v.nodes = *nodes;
202 return 0;
206 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
207 * any, for the new policy. mpol_new() has already validated the nodes
208 * parameter with respect to the policy mode and flags. But, we need to
209 * handle an empty nodemask with MPOL_PREFERRED here.
211 * Must be called holding task's alloc_lock to protect task's mems_allowed
212 * and mempolicy. May also be called holding the mmap_semaphore for write.
214 static int mpol_set_nodemask(struct mempolicy *pol,
215 const nodemask_t *nodes, struct nodemask_scratch *nsc)
217 int ret;
219 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
220 if (pol == NULL)
221 return 0;
222 /* Check N_MEMORY */
223 nodes_and(nsc->mask1,
224 cpuset_current_mems_allowed, node_states[N_MEMORY]);
226 VM_BUG_ON(!nodes);
227 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
228 nodes = NULL; /* explicit local allocation */
229 else {
230 if (pol->flags & MPOL_F_RELATIVE_NODES)
231 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
232 else
233 nodes_and(nsc->mask2, *nodes, nsc->mask1);
235 if (mpol_store_user_nodemask(pol))
236 pol->w.user_nodemask = *nodes;
237 else
238 pol->w.cpuset_mems_allowed =
239 cpuset_current_mems_allowed;
242 if (nodes)
243 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
244 else
245 ret = mpol_ops[pol->mode].create(pol, NULL);
246 return ret;
250 * This function just creates a new policy, does some check and simple
251 * initialization. You must invoke mpol_set_nodemask() to set nodes.
253 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
254 nodemask_t *nodes)
256 struct mempolicy *policy;
258 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
259 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
261 if (mode == MPOL_DEFAULT) {
262 if (nodes && !nodes_empty(*nodes))
263 return ERR_PTR(-EINVAL);
264 return NULL;
266 VM_BUG_ON(!nodes);
269 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
270 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
271 * All other modes require a valid pointer to a non-empty nodemask.
273 if (mode == MPOL_PREFERRED) {
274 if (nodes_empty(*nodes)) {
275 if (((flags & MPOL_F_STATIC_NODES) ||
276 (flags & MPOL_F_RELATIVE_NODES)))
277 return ERR_PTR(-EINVAL);
279 } else if (mode == MPOL_LOCAL) {
280 if (!nodes_empty(*nodes))
281 return ERR_PTR(-EINVAL);
282 mode = MPOL_PREFERRED;
283 } else if (nodes_empty(*nodes))
284 return ERR_PTR(-EINVAL);
285 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
286 if (!policy)
287 return ERR_PTR(-ENOMEM);
288 atomic_set(&policy->refcnt, 1);
289 policy->mode = mode;
290 policy->flags = flags;
292 return policy;
295 /* Slow path of a mpol destructor. */
296 void __mpol_put(struct mempolicy *p)
298 if (!atomic_dec_and_test(&p->refcnt))
299 return;
300 kmem_cache_free(policy_cache, p);
303 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
304 enum mpol_rebind_step step)
309 * step:
310 * MPOL_REBIND_ONCE - do rebind work at once
311 * MPOL_REBIND_STEP1 - set all the newly nodes
312 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
314 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
315 enum mpol_rebind_step step)
317 nodemask_t tmp;
319 if (pol->flags & MPOL_F_STATIC_NODES)
320 nodes_and(tmp, pol->w.user_nodemask, *nodes);
321 else if (pol->flags & MPOL_F_RELATIVE_NODES)
322 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
323 else {
325 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
326 * result
328 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
329 nodes_remap(tmp, pol->v.nodes,
330 pol->w.cpuset_mems_allowed, *nodes);
331 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
332 } else if (step == MPOL_REBIND_STEP2) {
333 tmp = pol->w.cpuset_mems_allowed;
334 pol->w.cpuset_mems_allowed = *nodes;
335 } else
336 BUG();
339 if (nodes_empty(tmp))
340 tmp = *nodes;
342 if (step == MPOL_REBIND_STEP1)
343 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
344 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
345 pol->v.nodes = tmp;
346 else
347 BUG();
349 if (!node_isset(current->il_next, tmp)) {
350 current->il_next = next_node(current->il_next, tmp);
351 if (current->il_next >= MAX_NUMNODES)
352 current->il_next = first_node(tmp);
353 if (current->il_next >= MAX_NUMNODES)
354 current->il_next = numa_node_id();
358 static void mpol_rebind_preferred(struct mempolicy *pol,
359 const nodemask_t *nodes,
360 enum mpol_rebind_step step)
362 nodemask_t tmp;
364 if (pol->flags & MPOL_F_STATIC_NODES) {
365 int node = first_node(pol->w.user_nodemask);
367 if (node_isset(node, *nodes)) {
368 pol->v.preferred_node = node;
369 pol->flags &= ~MPOL_F_LOCAL;
370 } else
371 pol->flags |= MPOL_F_LOCAL;
372 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
373 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
374 pol->v.preferred_node = first_node(tmp);
375 } else if (!(pol->flags & MPOL_F_LOCAL)) {
376 pol->v.preferred_node = node_remap(pol->v.preferred_node,
377 pol->w.cpuset_mems_allowed,
378 *nodes);
379 pol->w.cpuset_mems_allowed = *nodes;
384 * mpol_rebind_policy - Migrate a policy to a different set of nodes
386 * If read-side task has no lock to protect task->mempolicy, write-side
387 * task will rebind the task->mempolicy by two step. The first step is
388 * setting all the newly nodes, and the second step is cleaning all the
389 * disallowed nodes. In this way, we can avoid finding no node to alloc
390 * page.
391 * If we have a lock to protect task->mempolicy in read-side, we do
392 * rebind directly.
394 * step:
395 * MPOL_REBIND_ONCE - do rebind work at once
396 * MPOL_REBIND_STEP1 - set all the newly nodes
397 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
399 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
400 enum mpol_rebind_step step)
402 if (!pol)
403 return;
404 if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
405 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
406 return;
408 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
409 return;
411 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
412 BUG();
414 if (step == MPOL_REBIND_STEP1)
415 pol->flags |= MPOL_F_REBINDING;
416 else if (step == MPOL_REBIND_STEP2)
417 pol->flags &= ~MPOL_F_REBINDING;
418 else if (step >= MPOL_REBIND_NSTEP)
419 BUG();
421 mpol_ops[pol->mode].rebind(pol, newmask, step);
425 * Wrapper for mpol_rebind_policy() that just requires task
426 * pointer, and updates task mempolicy.
428 * Called with task's alloc_lock held.
431 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
432 enum mpol_rebind_step step)
434 mpol_rebind_policy(tsk->mempolicy, new, step);
438 * Rebind each vma in mm to new nodemask.
440 * Call holding a reference to mm. Takes mm->mmap_sem during call.
443 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
445 struct vm_area_struct *vma;
447 down_write(&mm->mmap_sem);
448 for (vma = mm->mmap; vma; vma = vma->vm_next)
449 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
450 up_write(&mm->mmap_sem);
453 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
454 [MPOL_DEFAULT] = {
455 .rebind = mpol_rebind_default,
457 [MPOL_INTERLEAVE] = {
458 .create = mpol_new_interleave,
459 .rebind = mpol_rebind_nodemask,
461 [MPOL_PREFERRED] = {
462 .create = mpol_new_preferred,
463 .rebind = mpol_rebind_preferred,
465 [MPOL_BIND] = {
466 .create = mpol_new_bind,
467 .rebind = mpol_rebind_nodemask,
471 static void migrate_page_add(struct page *page, struct list_head *pagelist,
472 unsigned long flags);
474 struct queue_pages {
475 struct list_head *pagelist;
476 unsigned long flags;
477 nodemask_t *nmask;
478 struct vm_area_struct *prev;
482 * Scan through pages checking if pages follow certain conditions,
483 * and move them to the pagelist if they do.
485 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
486 unsigned long end, struct mm_walk *walk)
488 struct vm_area_struct *vma = walk->vma;
489 struct page *page;
490 struct queue_pages *qp = walk->private;
491 unsigned long flags = qp->flags;
492 int nid, ret;
493 pte_t *pte;
494 spinlock_t *ptl;
496 if (pmd_trans_huge(*pmd)) {
497 ptl = pmd_lock(walk->mm, pmd);
498 if (pmd_trans_huge(*pmd)) {
499 page = pmd_page(*pmd);
500 if (is_huge_zero_page(page)) {
501 spin_unlock(ptl);
502 split_huge_pmd(vma, pmd, addr);
503 } else {
504 get_page(page);
505 spin_unlock(ptl);
506 lock_page(page);
507 ret = split_huge_page(page);
508 unlock_page(page);
509 put_page(page);
510 if (ret)
511 return 0;
513 } else {
514 spin_unlock(ptl);
518 retry:
519 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
520 for (; addr != end; pte++, addr += PAGE_SIZE) {
521 if (!pte_present(*pte))
522 continue;
523 page = vm_normal_page(vma, addr, *pte);
524 if (!page)
525 continue;
527 * vm_normal_page() filters out zero pages, but there might
528 * still be PageReserved pages to skip, perhaps in a VDSO.
530 if (PageReserved(page))
531 continue;
532 nid = page_to_nid(page);
533 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
534 continue;
535 if (PageTail(page) && PageAnon(page)) {
536 get_page(page);
537 pte_unmap_unlock(pte, ptl);
538 lock_page(page);
539 ret = split_huge_page(page);
540 unlock_page(page);
541 put_page(page);
542 /* Failed to split -- skip. */
543 if (ret) {
544 pte = pte_offset_map_lock(walk->mm, pmd,
545 addr, &ptl);
546 continue;
548 goto retry;
551 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
552 migrate_page_add(page, qp->pagelist, flags);
554 pte_unmap_unlock(pte - 1, ptl);
555 cond_resched();
556 return 0;
559 static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
560 unsigned long addr, unsigned long end,
561 struct mm_walk *walk)
563 #ifdef CONFIG_HUGETLB_PAGE
564 struct queue_pages *qp = walk->private;
565 unsigned long flags = qp->flags;
566 int nid;
567 struct page *page;
568 spinlock_t *ptl;
569 pte_t entry;
571 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
572 entry = huge_ptep_get(pte);
573 if (!pte_present(entry))
574 goto unlock;
575 page = pte_page(entry);
576 nid = page_to_nid(page);
577 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
578 goto unlock;
579 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
580 if (flags & (MPOL_MF_MOVE_ALL) ||
581 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
582 isolate_huge_page(page, qp->pagelist);
583 unlock:
584 spin_unlock(ptl);
585 #else
586 BUG();
587 #endif
588 return 0;
591 #ifdef CONFIG_NUMA_BALANCING
593 * This is used to mark a range of virtual addresses to be inaccessible.
594 * These are later cleared by a NUMA hinting fault. Depending on these
595 * faults, pages may be migrated for better NUMA placement.
597 * This is assuming that NUMA faults are handled using PROT_NONE. If
598 * an architecture makes a different choice, it will need further
599 * changes to the core.
601 unsigned long change_prot_numa(struct vm_area_struct *vma,
602 unsigned long addr, unsigned long end)
604 int nr_updated;
606 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
607 if (nr_updated)
608 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
610 return nr_updated;
612 #else
613 static unsigned long change_prot_numa(struct vm_area_struct *vma,
614 unsigned long addr, unsigned long end)
616 return 0;
618 #endif /* CONFIG_NUMA_BALANCING */
620 static int queue_pages_test_walk(unsigned long start, unsigned long end,
621 struct mm_walk *walk)
623 struct vm_area_struct *vma = walk->vma;
624 struct queue_pages *qp = walk->private;
625 unsigned long endvma = vma->vm_end;
626 unsigned long flags = qp->flags;
628 if (vma->vm_flags & VM_PFNMAP)
629 return 1;
631 if (endvma > end)
632 endvma = end;
633 if (vma->vm_start > start)
634 start = vma->vm_start;
636 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
637 if (!vma->vm_next && vma->vm_end < end)
638 return -EFAULT;
639 if (qp->prev && qp->prev->vm_end < vma->vm_start)
640 return -EFAULT;
643 qp->prev = vma;
645 if (flags & MPOL_MF_LAZY) {
646 /* Similar to task_numa_work, skip inaccessible VMAs */
647 if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
648 change_prot_numa(vma, start, endvma);
649 return 1;
652 if ((flags & MPOL_MF_STRICT) ||
653 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
654 vma_migratable(vma)))
655 /* queue pages from current vma */
656 return 0;
657 return 1;
661 * Walk through page tables and collect pages to be migrated.
663 * If pages found in a given range are on a set of nodes (determined by
664 * @nodes and @flags,) it's isolated and queued to the pagelist which is
665 * passed via @private.)
667 static int
668 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
669 nodemask_t *nodes, unsigned long flags,
670 struct list_head *pagelist)
672 struct queue_pages qp = {
673 .pagelist = pagelist,
674 .flags = flags,
675 .nmask = nodes,
676 .prev = NULL,
678 struct mm_walk queue_pages_walk = {
679 .hugetlb_entry = queue_pages_hugetlb,
680 .pmd_entry = queue_pages_pte_range,
681 .test_walk = queue_pages_test_walk,
682 .mm = mm,
683 .private = &qp,
686 return walk_page_range(start, end, &queue_pages_walk);
690 * Apply policy to a single VMA
691 * This must be called with the mmap_sem held for writing.
693 static int vma_replace_policy(struct vm_area_struct *vma,
694 struct mempolicy *pol)
696 int err;
697 struct mempolicy *old;
698 struct mempolicy *new;
700 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
701 vma->vm_start, vma->vm_end, vma->vm_pgoff,
702 vma->vm_ops, vma->vm_file,
703 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
705 new = mpol_dup(pol);
706 if (IS_ERR(new))
707 return PTR_ERR(new);
709 if (vma->vm_ops && vma->vm_ops->set_policy) {
710 err = vma->vm_ops->set_policy(vma, new);
711 if (err)
712 goto err_out;
715 old = vma->vm_policy;
716 vma->vm_policy = new; /* protected by mmap_sem */
717 mpol_put(old);
719 return 0;
720 err_out:
721 mpol_put(new);
722 return err;
725 /* Step 2: apply policy to a range and do splits. */
726 static int mbind_range(struct mm_struct *mm, unsigned long start,
727 unsigned long end, struct mempolicy *new_pol)
729 struct vm_area_struct *next;
730 struct vm_area_struct *prev;
731 struct vm_area_struct *vma;
732 int err = 0;
733 pgoff_t pgoff;
734 unsigned long vmstart;
735 unsigned long vmend;
737 vma = find_vma(mm, start);
738 if (!vma || vma->vm_start > start)
739 return -EFAULT;
741 prev = vma->vm_prev;
742 if (start > vma->vm_start)
743 prev = vma;
745 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
746 next = vma->vm_next;
747 vmstart = max(start, vma->vm_start);
748 vmend = min(end, vma->vm_end);
750 if (mpol_equal(vma_policy(vma), new_pol))
751 continue;
753 pgoff = vma->vm_pgoff +
754 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
755 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
756 vma->anon_vma, vma->vm_file, pgoff,
757 new_pol, vma->vm_userfaultfd_ctx);
758 if (prev) {
759 vma = prev;
760 next = vma->vm_next;
761 if (mpol_equal(vma_policy(vma), new_pol))
762 continue;
763 /* vma_merge() joined vma && vma->next, case 8 */
764 goto replace;
766 if (vma->vm_start != vmstart) {
767 err = split_vma(vma->vm_mm, vma, vmstart, 1);
768 if (err)
769 goto out;
771 if (vma->vm_end != vmend) {
772 err = split_vma(vma->vm_mm, vma, vmend, 0);
773 if (err)
774 goto out;
776 replace:
777 err = vma_replace_policy(vma, new_pol);
778 if (err)
779 goto out;
782 out:
783 return err;
786 /* Set the process memory policy */
787 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
788 nodemask_t *nodes)
790 struct mempolicy *new, *old;
791 NODEMASK_SCRATCH(scratch);
792 int ret;
794 if (!scratch)
795 return -ENOMEM;
797 new = mpol_new(mode, flags, nodes);
798 if (IS_ERR(new)) {
799 ret = PTR_ERR(new);
800 goto out;
803 task_lock(current);
804 ret = mpol_set_nodemask(new, nodes, scratch);
805 if (ret) {
806 task_unlock(current);
807 mpol_put(new);
808 goto out;
810 old = current->mempolicy;
811 current->mempolicy = new;
812 if (new && new->mode == MPOL_INTERLEAVE &&
813 nodes_weight(new->v.nodes))
814 current->il_next = first_node(new->v.nodes);
815 task_unlock(current);
816 mpol_put(old);
817 ret = 0;
818 out:
819 NODEMASK_SCRATCH_FREE(scratch);
820 return ret;
824 * Return nodemask for policy for get_mempolicy() query
826 * Called with task's alloc_lock held
828 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
830 nodes_clear(*nodes);
831 if (p == &default_policy)
832 return;
834 switch (p->mode) {
835 case MPOL_BIND:
836 /* Fall through */
837 case MPOL_INTERLEAVE:
838 *nodes = p->v.nodes;
839 break;
840 case MPOL_PREFERRED:
841 if (!(p->flags & MPOL_F_LOCAL))
842 node_set(p->v.preferred_node, *nodes);
843 /* else return empty node mask for local allocation */
844 break;
845 default:
846 BUG();
850 static int lookup_node(struct mm_struct *mm, unsigned long addr)
852 struct page *p;
853 int err;
855 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
856 if (err >= 0) {
857 err = page_to_nid(p);
858 put_page(p);
860 return err;
863 /* Retrieve NUMA policy */
864 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
865 unsigned long addr, unsigned long flags)
867 int err;
868 struct mm_struct *mm = current->mm;
869 struct vm_area_struct *vma = NULL;
870 struct mempolicy *pol = current->mempolicy;
872 if (flags &
873 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
874 return -EINVAL;
876 if (flags & MPOL_F_MEMS_ALLOWED) {
877 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
878 return -EINVAL;
879 *policy = 0; /* just so it's initialized */
880 task_lock(current);
881 *nmask = cpuset_current_mems_allowed;
882 task_unlock(current);
883 return 0;
886 if (flags & MPOL_F_ADDR) {
888 * Do NOT fall back to task policy if the
889 * vma/shared policy at addr is NULL. We
890 * want to return MPOL_DEFAULT in this case.
892 down_read(&mm->mmap_sem);
893 vma = find_vma_intersection(mm, addr, addr+1);
894 if (!vma) {
895 up_read(&mm->mmap_sem);
896 return -EFAULT;
898 if (vma->vm_ops && vma->vm_ops->get_policy)
899 pol = vma->vm_ops->get_policy(vma, addr);
900 else
901 pol = vma->vm_policy;
902 } else if (addr)
903 return -EINVAL;
905 if (!pol)
906 pol = &default_policy; /* indicates default behavior */
908 if (flags & MPOL_F_NODE) {
909 if (flags & MPOL_F_ADDR) {
910 err = lookup_node(mm, addr);
911 if (err < 0)
912 goto out;
913 *policy = err;
914 } else if (pol == current->mempolicy &&
915 pol->mode == MPOL_INTERLEAVE) {
916 *policy = current->il_next;
917 } else {
918 err = -EINVAL;
919 goto out;
921 } else {
922 *policy = pol == &default_policy ? MPOL_DEFAULT :
923 pol->mode;
925 * Internal mempolicy flags must be masked off before exposing
926 * the policy to userspace.
928 *policy |= (pol->flags & MPOL_MODE_FLAGS);
931 if (vma) {
932 up_read(&current->mm->mmap_sem);
933 vma = NULL;
936 err = 0;
937 if (nmask) {
938 if (mpol_store_user_nodemask(pol)) {
939 *nmask = pol->w.user_nodemask;
940 } else {
941 task_lock(current);
942 get_policy_nodemask(pol, nmask);
943 task_unlock(current);
947 out:
948 mpol_cond_put(pol);
949 if (vma)
950 up_read(&current->mm->mmap_sem);
951 return err;
954 #ifdef CONFIG_MIGRATION
956 * page migration
958 static void migrate_page_add(struct page *page, struct list_head *pagelist,
959 unsigned long flags)
962 * Avoid migrating a page that is shared with others.
964 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
965 if (!isolate_lru_page(page)) {
966 list_add_tail(&page->lru, pagelist);
967 inc_zone_page_state(page, NR_ISOLATED_ANON +
968 page_is_file_cache(page));
973 static struct page *new_node_page(struct page *page, unsigned long node, int **x)
975 if (PageHuge(page))
976 return alloc_huge_page_node(page_hstate(compound_head(page)),
977 node);
978 else
979 return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
980 __GFP_THISNODE, 0);
984 * Migrate pages from one node to a target node.
985 * Returns error or the number of pages not migrated.
987 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
988 int flags)
990 nodemask_t nmask;
991 LIST_HEAD(pagelist);
992 int err = 0;
994 nodes_clear(nmask);
995 node_set(source, nmask);
998 * This does not "check" the range but isolates all pages that
999 * need migration. Between passing in the full user address
1000 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1002 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1003 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
1004 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1006 if (!list_empty(&pagelist)) {
1007 err = migrate_pages(&pagelist, new_node_page, NULL, dest,
1008 MIGRATE_SYNC, MR_SYSCALL);
1009 if (err)
1010 putback_movable_pages(&pagelist);
1013 return err;
1017 * Move pages between the two nodesets so as to preserve the physical
1018 * layout as much as possible.
1020 * Returns the number of page that could not be moved.
1022 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1023 const nodemask_t *to, int flags)
1025 int busy = 0;
1026 int err;
1027 nodemask_t tmp;
1029 err = migrate_prep();
1030 if (err)
1031 return err;
1033 down_read(&mm->mmap_sem);
1036 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1037 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1038 * bit in 'tmp', and return that <source, dest> pair for migration.
1039 * The pair of nodemasks 'to' and 'from' define the map.
1041 * If no pair of bits is found that way, fallback to picking some
1042 * pair of 'source' and 'dest' bits that are not the same. If the
1043 * 'source' and 'dest' bits are the same, this represents a node
1044 * that will be migrating to itself, so no pages need move.
1046 * If no bits are left in 'tmp', or if all remaining bits left
1047 * in 'tmp' correspond to the same bit in 'to', return false
1048 * (nothing left to migrate).
1050 * This lets us pick a pair of nodes to migrate between, such that
1051 * if possible the dest node is not already occupied by some other
1052 * source node, minimizing the risk of overloading the memory on a
1053 * node that would happen if we migrated incoming memory to a node
1054 * before migrating outgoing memory source that same node.
1056 * A single scan of tmp is sufficient. As we go, we remember the
1057 * most recent <s, d> pair that moved (s != d). If we find a pair
1058 * that not only moved, but what's better, moved to an empty slot
1059 * (d is not set in tmp), then we break out then, with that pair.
1060 * Otherwise when we finish scanning from_tmp, we at least have the
1061 * most recent <s, d> pair that moved. If we get all the way through
1062 * the scan of tmp without finding any node that moved, much less
1063 * moved to an empty node, then there is nothing left worth migrating.
1066 tmp = *from;
1067 while (!nodes_empty(tmp)) {
1068 int s,d;
1069 int source = NUMA_NO_NODE;
1070 int dest = 0;
1072 for_each_node_mask(s, tmp) {
1075 * do_migrate_pages() tries to maintain the relative
1076 * node relationship of the pages established between
1077 * threads and memory areas.
1079 * However if the number of source nodes is not equal to
1080 * the number of destination nodes we can not preserve
1081 * this node relative relationship. In that case, skip
1082 * copying memory from a node that is in the destination
1083 * mask.
1085 * Example: [2,3,4] -> [3,4,5] moves everything.
1086 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1089 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1090 (node_isset(s, *to)))
1091 continue;
1093 d = node_remap(s, *from, *to);
1094 if (s == d)
1095 continue;
1097 source = s; /* Node moved. Memorize */
1098 dest = d;
1100 /* dest not in remaining from nodes? */
1101 if (!node_isset(dest, tmp))
1102 break;
1104 if (source == NUMA_NO_NODE)
1105 break;
1107 node_clear(source, tmp);
1108 err = migrate_to_node(mm, source, dest, flags);
1109 if (err > 0)
1110 busy += err;
1111 if (err < 0)
1112 break;
1114 up_read(&mm->mmap_sem);
1115 if (err < 0)
1116 return err;
1117 return busy;
1122 * Allocate a new page for page migration based on vma policy.
1123 * Start by assuming the page is mapped by the same vma as contains @start.
1124 * Search forward from there, if not. N.B., this assumes that the
1125 * list of pages handed to migrate_pages()--which is how we get here--
1126 * is in virtual address order.
1128 static struct page *new_page(struct page *page, unsigned long start, int **x)
1130 struct vm_area_struct *vma;
1131 unsigned long uninitialized_var(address);
1133 vma = find_vma(current->mm, start);
1134 while (vma) {
1135 address = page_address_in_vma(page, vma);
1136 if (address != -EFAULT)
1137 break;
1138 vma = vma->vm_next;
1141 if (PageHuge(page)) {
1142 BUG_ON(!vma);
1143 return alloc_huge_page_noerr(vma, address, 1);
1146 * if !vma, alloc_page_vma() will use task or system default policy
1148 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1150 #else
1152 static void migrate_page_add(struct page *page, struct list_head *pagelist,
1153 unsigned long flags)
1157 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1158 const nodemask_t *to, int flags)
1160 return -ENOSYS;
1163 static struct page *new_page(struct page *page, unsigned long start, int **x)
1165 return NULL;
1167 #endif
1169 static long do_mbind(unsigned long start, unsigned long len,
1170 unsigned short mode, unsigned short mode_flags,
1171 nodemask_t *nmask, unsigned long flags)
1173 struct mm_struct *mm = current->mm;
1174 struct mempolicy *new;
1175 unsigned long end;
1176 int err;
1177 LIST_HEAD(pagelist);
1179 if (flags & ~(unsigned long)MPOL_MF_VALID)
1180 return -EINVAL;
1181 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1182 return -EPERM;
1184 if (start & ~PAGE_MASK)
1185 return -EINVAL;
1187 if (mode == MPOL_DEFAULT)
1188 flags &= ~MPOL_MF_STRICT;
1190 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1191 end = start + len;
1193 if (end < start)
1194 return -EINVAL;
1195 if (end == start)
1196 return 0;
1198 new = mpol_new(mode, mode_flags, nmask);
1199 if (IS_ERR(new))
1200 return PTR_ERR(new);
1202 if (flags & MPOL_MF_LAZY)
1203 new->flags |= MPOL_F_MOF;
1206 * If we are using the default policy then operation
1207 * on discontinuous address spaces is okay after all
1209 if (!new)
1210 flags |= MPOL_MF_DISCONTIG_OK;
1212 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1213 start, start + len, mode, mode_flags,
1214 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1216 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1218 err = migrate_prep();
1219 if (err)
1220 goto mpol_out;
1223 NODEMASK_SCRATCH(scratch);
1224 if (scratch) {
1225 down_write(&mm->mmap_sem);
1226 task_lock(current);
1227 err = mpol_set_nodemask(new, nmask, scratch);
1228 task_unlock(current);
1229 if (err)
1230 up_write(&mm->mmap_sem);
1231 } else
1232 err = -ENOMEM;
1233 NODEMASK_SCRATCH_FREE(scratch);
1235 if (err)
1236 goto mpol_out;
1238 err = queue_pages_range(mm, start, end, nmask,
1239 flags | MPOL_MF_INVERT, &pagelist);
1240 if (!err)
1241 err = mbind_range(mm, start, end, new);
1243 if (!err) {
1244 int nr_failed = 0;
1246 if (!list_empty(&pagelist)) {
1247 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1248 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1249 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1250 if (nr_failed)
1251 putback_movable_pages(&pagelist);
1254 if (nr_failed && (flags & MPOL_MF_STRICT))
1255 err = -EIO;
1256 } else
1257 putback_movable_pages(&pagelist);
1259 up_write(&mm->mmap_sem);
1260 mpol_out:
1261 mpol_put(new);
1262 return err;
1266 * User space interface with variable sized bitmaps for nodelists.
1269 /* Copy a node mask from user space. */
1270 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1271 unsigned long maxnode)
1273 unsigned long k;
1274 unsigned long nlongs;
1275 unsigned long endmask;
1277 --maxnode;
1278 nodes_clear(*nodes);
1279 if (maxnode == 0 || !nmask)
1280 return 0;
1281 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1282 return -EINVAL;
1284 nlongs = BITS_TO_LONGS(maxnode);
1285 if ((maxnode % BITS_PER_LONG) == 0)
1286 endmask = ~0UL;
1287 else
1288 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1290 /* When the user specified more nodes than supported just check
1291 if the non supported part is all zero. */
1292 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1293 if (nlongs > PAGE_SIZE/sizeof(long))
1294 return -EINVAL;
1295 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1296 unsigned long t;
1297 if (get_user(t, nmask + k))
1298 return -EFAULT;
1299 if (k == nlongs - 1) {
1300 if (t & endmask)
1301 return -EINVAL;
1302 } else if (t)
1303 return -EINVAL;
1305 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1306 endmask = ~0UL;
1309 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1310 return -EFAULT;
1311 nodes_addr(*nodes)[nlongs-1] &= endmask;
1312 return 0;
1315 /* Copy a kernel node mask to user space */
1316 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1317 nodemask_t *nodes)
1319 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1320 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1322 if (copy > nbytes) {
1323 if (copy > PAGE_SIZE)
1324 return -EINVAL;
1325 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1326 return -EFAULT;
1327 copy = nbytes;
1329 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1332 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1333 unsigned long, mode, const unsigned long __user *, nmask,
1334 unsigned long, maxnode, unsigned, flags)
1336 nodemask_t nodes;
1337 int err;
1338 unsigned short mode_flags;
1340 mode_flags = mode & MPOL_MODE_FLAGS;
1341 mode &= ~MPOL_MODE_FLAGS;
1342 if (mode >= MPOL_MAX)
1343 return -EINVAL;
1344 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1345 (mode_flags & MPOL_F_RELATIVE_NODES))
1346 return -EINVAL;
1347 err = get_nodes(&nodes, nmask, maxnode);
1348 if (err)
1349 return err;
1350 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1353 /* Set the process memory policy */
1354 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1355 unsigned long, maxnode)
1357 int err;
1358 nodemask_t nodes;
1359 unsigned short flags;
1361 flags = mode & MPOL_MODE_FLAGS;
1362 mode &= ~MPOL_MODE_FLAGS;
1363 if ((unsigned int)mode >= MPOL_MAX)
1364 return -EINVAL;
1365 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1366 return -EINVAL;
1367 err = get_nodes(&nodes, nmask, maxnode);
1368 if (err)
1369 return err;
1370 return do_set_mempolicy(mode, flags, &nodes);
1373 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1374 const unsigned long __user *, old_nodes,
1375 const unsigned long __user *, new_nodes)
1377 const struct cred *cred = current_cred(), *tcred;
1378 struct mm_struct *mm = NULL;
1379 struct task_struct *task;
1380 nodemask_t task_nodes;
1381 int err;
1382 nodemask_t *old;
1383 nodemask_t *new;
1384 NODEMASK_SCRATCH(scratch);
1386 if (!scratch)
1387 return -ENOMEM;
1389 old = &scratch->mask1;
1390 new = &scratch->mask2;
1392 err = get_nodes(old, old_nodes, maxnode);
1393 if (err)
1394 goto out;
1396 err = get_nodes(new, new_nodes, maxnode);
1397 if (err)
1398 goto out;
1400 /* Find the mm_struct */
1401 rcu_read_lock();
1402 task = pid ? find_task_by_vpid(pid) : current;
1403 if (!task) {
1404 rcu_read_unlock();
1405 err = -ESRCH;
1406 goto out;
1408 get_task_struct(task);
1410 err = -EINVAL;
1413 * Check if this process has the right to modify the specified
1414 * process. The right exists if the process has administrative
1415 * capabilities, superuser privileges or the same
1416 * userid as the target process.
1418 tcred = __task_cred(task);
1419 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1420 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
1421 !capable(CAP_SYS_NICE)) {
1422 rcu_read_unlock();
1423 err = -EPERM;
1424 goto out_put;
1426 rcu_read_unlock();
1428 task_nodes = cpuset_mems_allowed(task);
1429 /* Is the user allowed to access the target nodes? */
1430 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1431 err = -EPERM;
1432 goto out_put;
1435 if (!nodes_subset(*new, node_states[N_MEMORY])) {
1436 err = -EINVAL;
1437 goto out_put;
1440 err = security_task_movememory(task);
1441 if (err)
1442 goto out_put;
1444 mm = get_task_mm(task);
1445 put_task_struct(task);
1447 if (!mm) {
1448 err = -EINVAL;
1449 goto out;
1452 err = do_migrate_pages(mm, old, new,
1453 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1455 mmput(mm);
1456 out:
1457 NODEMASK_SCRATCH_FREE(scratch);
1459 return err;
1461 out_put:
1462 put_task_struct(task);
1463 goto out;
1468 /* Retrieve NUMA policy */
1469 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1470 unsigned long __user *, nmask, unsigned long, maxnode,
1471 unsigned long, addr, unsigned long, flags)
1473 int err;
1474 int uninitialized_var(pval);
1475 nodemask_t nodes;
1477 if (nmask != NULL && maxnode < MAX_NUMNODES)
1478 return -EINVAL;
1480 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1482 if (err)
1483 return err;
1485 if (policy && put_user(pval, policy))
1486 return -EFAULT;
1488 if (nmask)
1489 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1491 return err;
1494 #ifdef CONFIG_COMPAT
1496 COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1497 compat_ulong_t __user *, nmask,
1498 compat_ulong_t, maxnode,
1499 compat_ulong_t, addr, compat_ulong_t, flags)
1501 long err;
1502 unsigned long __user *nm = NULL;
1503 unsigned long nr_bits, alloc_size;
1504 DECLARE_BITMAP(bm, MAX_NUMNODES);
1506 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1507 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1509 if (nmask)
1510 nm = compat_alloc_user_space(alloc_size);
1512 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1514 if (!err && nmask) {
1515 unsigned long copy_size;
1516 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1517 err = copy_from_user(bm, nm, copy_size);
1518 /* ensure entire bitmap is zeroed */
1519 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1520 err |= compat_put_bitmap(nmask, bm, nr_bits);
1523 return err;
1526 COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1527 compat_ulong_t, maxnode)
1529 long err = 0;
1530 unsigned long __user *nm = NULL;
1531 unsigned long nr_bits, alloc_size;
1532 DECLARE_BITMAP(bm, MAX_NUMNODES);
1534 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1535 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1537 if (nmask) {
1538 err = compat_get_bitmap(bm, nmask, nr_bits);
1539 nm = compat_alloc_user_space(alloc_size);
1540 err |= copy_to_user(nm, bm, alloc_size);
1543 if (err)
1544 return -EFAULT;
1546 return sys_set_mempolicy(mode, nm, nr_bits+1);
1549 COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1550 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1551 compat_ulong_t, maxnode, compat_ulong_t, flags)
1553 long err = 0;
1554 unsigned long __user *nm = NULL;
1555 unsigned long nr_bits, alloc_size;
1556 nodemask_t bm;
1558 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1559 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1561 if (nmask) {
1562 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1563 nm = compat_alloc_user_space(alloc_size);
1564 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1567 if (err)
1568 return -EFAULT;
1570 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1573 #endif
1575 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1576 unsigned long addr)
1578 struct mempolicy *pol = NULL;
1580 if (vma) {
1581 if (vma->vm_ops && vma->vm_ops->get_policy) {
1582 pol = vma->vm_ops->get_policy(vma, addr);
1583 } else if (vma->vm_policy) {
1584 pol = vma->vm_policy;
1587 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1588 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1589 * count on these policies which will be dropped by
1590 * mpol_cond_put() later
1592 if (mpol_needs_cond_ref(pol))
1593 mpol_get(pol);
1597 return pol;
1601 * get_vma_policy(@vma, @addr)
1602 * @vma: virtual memory area whose policy is sought
1603 * @addr: address in @vma for shared policy lookup
1605 * Returns effective policy for a VMA at specified address.
1606 * Falls back to current->mempolicy or system default policy, as necessary.
1607 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1608 * count--added by the get_policy() vm_op, as appropriate--to protect against
1609 * freeing by another task. It is the caller's responsibility to free the
1610 * extra reference for shared policies.
1612 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1613 unsigned long addr)
1615 struct mempolicy *pol = __get_vma_policy(vma, addr);
1617 if (!pol)
1618 pol = get_task_policy(current);
1620 return pol;
1623 bool vma_policy_mof(struct vm_area_struct *vma)
1625 struct mempolicy *pol;
1627 if (vma->vm_ops && vma->vm_ops->get_policy) {
1628 bool ret = false;
1630 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1631 if (pol && (pol->flags & MPOL_F_MOF))
1632 ret = true;
1633 mpol_cond_put(pol);
1635 return ret;
1638 pol = vma->vm_policy;
1639 if (!pol)
1640 pol = get_task_policy(current);
1642 return pol->flags & MPOL_F_MOF;
1645 static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1647 enum zone_type dynamic_policy_zone = policy_zone;
1649 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1652 * if policy->v.nodes has movable memory only,
1653 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1655 * policy->v.nodes is intersect with node_states[N_MEMORY].
1656 * so if the following test faile, it implies
1657 * policy->v.nodes has movable memory only.
1659 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1660 dynamic_policy_zone = ZONE_MOVABLE;
1662 return zone >= dynamic_policy_zone;
1666 * Return a nodemask representing a mempolicy for filtering nodes for
1667 * page allocation
1669 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1671 /* Lower zones don't get a nodemask applied for MPOL_BIND */
1672 if (unlikely(policy->mode == MPOL_BIND) &&
1673 apply_policy_zone(policy, gfp_zone(gfp)) &&
1674 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1675 return &policy->v.nodes;
1677 return NULL;
1680 /* Return a zonelist indicated by gfp for node representing a mempolicy */
1681 static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1682 int nd)
1684 switch (policy->mode) {
1685 case MPOL_PREFERRED:
1686 if (!(policy->flags & MPOL_F_LOCAL))
1687 nd = policy->v.preferred_node;
1688 break;
1689 case MPOL_BIND:
1691 * Normally, MPOL_BIND allocations are node-local within the
1692 * allowed nodemask. However, if __GFP_THISNODE is set and the
1693 * current node isn't part of the mask, we use the zonelist for
1694 * the first node in the mask instead.
1696 if (unlikely(gfp & __GFP_THISNODE) &&
1697 unlikely(!node_isset(nd, policy->v.nodes)))
1698 nd = first_node(policy->v.nodes);
1699 break;
1700 default:
1701 BUG();
1703 return node_zonelist(nd, gfp);
1706 /* Do dynamic interleaving for a process */
1707 static unsigned interleave_nodes(struct mempolicy *policy)
1709 unsigned nid, next;
1710 struct task_struct *me = current;
1712 nid = me->il_next;
1713 next = next_node(nid, policy->v.nodes);
1714 if (next >= MAX_NUMNODES)
1715 next = first_node(policy->v.nodes);
1716 if (next < MAX_NUMNODES)
1717 me->il_next = next;
1718 return nid;
1722 * Depending on the memory policy provide a node from which to allocate the
1723 * next slab entry.
1725 unsigned int mempolicy_slab_node(void)
1727 struct mempolicy *policy;
1728 int node = numa_mem_id();
1730 if (in_interrupt())
1731 return node;
1733 policy = current->mempolicy;
1734 if (!policy || policy->flags & MPOL_F_LOCAL)
1735 return node;
1737 switch (policy->mode) {
1738 case MPOL_PREFERRED:
1740 * handled MPOL_F_LOCAL above
1742 return policy->v.preferred_node;
1744 case MPOL_INTERLEAVE:
1745 return interleave_nodes(policy);
1747 case MPOL_BIND: {
1749 * Follow bind policy behavior and start allocation at the
1750 * first node.
1752 struct zonelist *zonelist;
1753 struct zone *zone;
1754 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1755 zonelist = &NODE_DATA(node)->node_zonelists[0];
1756 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1757 &policy->v.nodes,
1758 &zone);
1759 return zone ? zone->node : node;
1762 default:
1763 BUG();
1767 /* Do static interleaving for a VMA with known offset. */
1768 static unsigned offset_il_node(struct mempolicy *pol,
1769 struct vm_area_struct *vma, unsigned long off)
1771 unsigned nnodes = nodes_weight(pol->v.nodes);
1772 unsigned target;
1773 int c;
1774 int nid = NUMA_NO_NODE;
1776 if (!nnodes)
1777 return numa_node_id();
1778 target = (unsigned int)off % nnodes;
1779 c = 0;
1780 do {
1781 nid = next_node(nid, pol->v.nodes);
1782 c++;
1783 } while (c <= target);
1784 return nid;
1787 /* Determine a node number for interleave */
1788 static inline unsigned interleave_nid(struct mempolicy *pol,
1789 struct vm_area_struct *vma, unsigned long addr, int shift)
1791 if (vma) {
1792 unsigned long off;
1795 * for small pages, there is no difference between
1796 * shift and PAGE_SHIFT, so the bit-shift is safe.
1797 * for huge pages, since vm_pgoff is in units of small
1798 * pages, we need to shift off the always 0 bits to get
1799 * a useful offset.
1801 BUG_ON(shift < PAGE_SHIFT);
1802 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1803 off += (addr - vma->vm_start) >> shift;
1804 return offset_il_node(pol, vma, off);
1805 } else
1806 return interleave_nodes(pol);
1810 * Return the bit number of a random bit set in the nodemask.
1811 * (returns NUMA_NO_NODE if nodemask is empty)
1813 int node_random(const nodemask_t *maskp)
1815 int w, bit = NUMA_NO_NODE;
1817 w = nodes_weight(*maskp);
1818 if (w)
1819 bit = bitmap_ord_to_pos(maskp->bits,
1820 get_random_int() % w, MAX_NUMNODES);
1821 return bit;
1824 #ifdef CONFIG_HUGETLBFS
1826 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1827 * @vma: virtual memory area whose policy is sought
1828 * @addr: address in @vma for shared policy lookup and interleave policy
1829 * @gfp_flags: for requested zone
1830 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1831 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
1833 * Returns a zonelist suitable for a huge page allocation and a pointer
1834 * to the struct mempolicy for conditional unref after allocation.
1835 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1836 * @nodemask for filtering the zonelist.
1838 * Must be protected by read_mems_allowed_begin()
1840 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1841 gfp_t gfp_flags, struct mempolicy **mpol,
1842 nodemask_t **nodemask)
1844 struct zonelist *zl;
1846 *mpol = get_vma_policy(vma, addr);
1847 *nodemask = NULL; /* assume !MPOL_BIND */
1849 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1850 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1851 huge_page_shift(hstate_vma(vma))), gfp_flags);
1852 } else {
1853 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
1854 if ((*mpol)->mode == MPOL_BIND)
1855 *nodemask = &(*mpol)->v.nodes;
1857 return zl;
1861 * init_nodemask_of_mempolicy
1863 * If the current task's mempolicy is "default" [NULL], return 'false'
1864 * to indicate default policy. Otherwise, extract the policy nodemask
1865 * for 'bind' or 'interleave' policy into the argument nodemask, or
1866 * initialize the argument nodemask to contain the single node for
1867 * 'preferred' or 'local' policy and return 'true' to indicate presence
1868 * of non-default mempolicy.
1870 * We don't bother with reference counting the mempolicy [mpol_get/put]
1871 * because the current task is examining it's own mempolicy and a task's
1872 * mempolicy is only ever changed by the task itself.
1874 * N.B., it is the caller's responsibility to free a returned nodemask.
1876 bool init_nodemask_of_mempolicy(nodemask_t *mask)
1878 struct mempolicy *mempolicy;
1879 int nid;
1881 if (!(mask && current->mempolicy))
1882 return false;
1884 task_lock(current);
1885 mempolicy = current->mempolicy;
1886 switch (mempolicy->mode) {
1887 case MPOL_PREFERRED:
1888 if (mempolicy->flags & MPOL_F_LOCAL)
1889 nid = numa_node_id();
1890 else
1891 nid = mempolicy->v.preferred_node;
1892 init_nodemask_of_node(mask, nid);
1893 break;
1895 case MPOL_BIND:
1896 /* Fall through */
1897 case MPOL_INTERLEAVE:
1898 *mask = mempolicy->v.nodes;
1899 break;
1901 default:
1902 BUG();
1904 task_unlock(current);
1906 return true;
1908 #endif
1911 * mempolicy_nodemask_intersects
1913 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1914 * policy. Otherwise, check for intersection between mask and the policy
1915 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1916 * policy, always return true since it may allocate elsewhere on fallback.
1918 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1920 bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1921 const nodemask_t *mask)
1923 struct mempolicy *mempolicy;
1924 bool ret = true;
1926 if (!mask)
1927 return ret;
1928 task_lock(tsk);
1929 mempolicy = tsk->mempolicy;
1930 if (!mempolicy)
1931 goto out;
1933 switch (mempolicy->mode) {
1934 case MPOL_PREFERRED:
1936 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1937 * allocate from, they may fallback to other nodes when oom.
1938 * Thus, it's possible for tsk to have allocated memory from
1939 * nodes in mask.
1941 break;
1942 case MPOL_BIND:
1943 case MPOL_INTERLEAVE:
1944 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1945 break;
1946 default:
1947 BUG();
1949 out:
1950 task_unlock(tsk);
1951 return ret;
1954 /* Allocate a page in interleaved policy.
1955 Own path because it needs to do special accounting. */
1956 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1957 unsigned nid)
1959 struct zonelist *zl;
1960 struct page *page;
1962 zl = node_zonelist(nid, gfp);
1963 page = __alloc_pages(gfp, order, zl);
1964 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1965 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1966 return page;
1970 * alloc_pages_vma - Allocate a page for a VMA.
1972 * @gfp:
1973 * %GFP_USER user allocation.
1974 * %GFP_KERNEL kernel allocations,
1975 * %GFP_HIGHMEM highmem/user allocations,
1976 * %GFP_FS allocation should not call back into a file system.
1977 * %GFP_ATOMIC don't sleep.
1979 * @order:Order of the GFP allocation.
1980 * @vma: Pointer to VMA or NULL if not available.
1981 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1982 * @node: Which node to prefer for allocation (modulo policy).
1983 * @hugepage: for hugepages try only the preferred node if possible
1985 * This function allocates a page from the kernel page pool and applies
1986 * a NUMA policy associated with the VMA or the current process.
1987 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1988 * mm_struct of the VMA to prevent it from going away. Should be used for
1989 * all allocations for pages that will be mapped into user space. Returns
1990 * NULL when no page can be allocated.
1992 struct page *
1993 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1994 unsigned long addr, int node, bool hugepage)
1996 struct mempolicy *pol;
1997 struct page *page;
1998 unsigned int cpuset_mems_cookie;
1999 struct zonelist *zl;
2000 nodemask_t *nmask;
2002 retry_cpuset:
2003 pol = get_vma_policy(vma, addr);
2004 cpuset_mems_cookie = read_mems_allowed_begin();
2006 if (pol->mode == MPOL_INTERLEAVE) {
2007 unsigned nid;
2009 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2010 mpol_cond_put(pol);
2011 page = alloc_page_interleave(gfp, order, nid);
2012 goto out;
2015 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2016 int hpage_node = node;
2019 * For hugepage allocation and non-interleave policy which
2020 * allows the current node (or other explicitly preferred
2021 * node) we only try to allocate from the current/preferred
2022 * node and don't fall back to other nodes, as the cost of
2023 * remote accesses would likely offset THP benefits.
2025 * If the policy is interleave, or does not allow the current
2026 * node in its nodemask, we allocate the standard way.
2028 if (pol->mode == MPOL_PREFERRED &&
2029 !(pol->flags & MPOL_F_LOCAL))
2030 hpage_node = pol->v.preferred_node;
2032 nmask = policy_nodemask(gfp, pol);
2033 if (!nmask || node_isset(hpage_node, *nmask)) {
2034 mpol_cond_put(pol);
2035 page = __alloc_pages_node(hpage_node,
2036 gfp | __GFP_THISNODE, order);
2037 goto out;
2041 nmask = policy_nodemask(gfp, pol);
2042 zl = policy_zonelist(gfp, pol, node);
2043 mpol_cond_put(pol);
2044 page = __alloc_pages_nodemask(gfp, order, zl, nmask);
2045 out:
2046 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2047 goto retry_cpuset;
2048 return page;
2052 * alloc_pages_current - Allocate pages.
2054 * @gfp:
2055 * %GFP_USER user allocation,
2056 * %GFP_KERNEL kernel allocation,
2057 * %GFP_HIGHMEM highmem allocation,
2058 * %GFP_FS don't call back into a file system.
2059 * %GFP_ATOMIC don't sleep.
2060 * @order: Power of two of allocation size in pages. 0 is a single page.
2062 * Allocate a page from the kernel page pool. When not in
2063 * interrupt context and apply the current process NUMA policy.
2064 * Returns NULL when no page can be allocated.
2066 * Don't call cpuset_update_task_memory_state() unless
2067 * 1) it's ok to take cpuset_sem (can WAIT), and
2068 * 2) allocating for current task (not interrupt).
2070 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
2072 struct mempolicy *pol = &default_policy;
2073 struct page *page;
2074 unsigned int cpuset_mems_cookie;
2076 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2077 pol = get_task_policy(current);
2079 retry_cpuset:
2080 cpuset_mems_cookie = read_mems_allowed_begin();
2083 * No reference counting needed for current->mempolicy
2084 * nor system default_policy
2086 if (pol->mode == MPOL_INTERLEAVE)
2087 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2088 else
2089 page = __alloc_pages_nodemask(gfp, order,
2090 policy_zonelist(gfp, pol, numa_node_id()),
2091 policy_nodemask(gfp, pol));
2093 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2094 goto retry_cpuset;
2096 return page;
2098 EXPORT_SYMBOL(alloc_pages_current);
2100 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2102 struct mempolicy *pol = mpol_dup(vma_policy(src));
2104 if (IS_ERR(pol))
2105 return PTR_ERR(pol);
2106 dst->vm_policy = pol;
2107 return 0;
2111 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2112 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2113 * with the mems_allowed returned by cpuset_mems_allowed(). This
2114 * keeps mempolicies cpuset relative after its cpuset moves. See
2115 * further kernel/cpuset.c update_nodemask().
2117 * current's mempolicy may be rebinded by the other task(the task that changes
2118 * cpuset's mems), so we needn't do rebind work for current task.
2121 /* Slow path of a mempolicy duplicate */
2122 struct mempolicy *__mpol_dup(struct mempolicy *old)
2124 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2126 if (!new)
2127 return ERR_PTR(-ENOMEM);
2129 /* task's mempolicy is protected by alloc_lock */
2130 if (old == current->mempolicy) {
2131 task_lock(current);
2132 *new = *old;
2133 task_unlock(current);
2134 } else
2135 *new = *old;
2137 if (current_cpuset_is_being_rebound()) {
2138 nodemask_t mems = cpuset_mems_allowed(current);
2139 if (new->flags & MPOL_F_REBINDING)
2140 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2141 else
2142 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
2144 atomic_set(&new->refcnt, 1);
2145 return new;
2148 /* Slow path of a mempolicy comparison */
2149 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2151 if (!a || !b)
2152 return false;
2153 if (a->mode != b->mode)
2154 return false;
2155 if (a->flags != b->flags)
2156 return false;
2157 if (mpol_store_user_nodemask(a))
2158 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2159 return false;
2161 switch (a->mode) {
2162 case MPOL_BIND:
2163 /* Fall through */
2164 case MPOL_INTERLEAVE:
2165 return !!nodes_equal(a->v.nodes, b->v.nodes);
2166 case MPOL_PREFERRED:
2167 return a->v.preferred_node == b->v.preferred_node;
2168 default:
2169 BUG();
2170 return false;
2175 * Shared memory backing store policy support.
2177 * Remember policies even when nobody has shared memory mapped.
2178 * The policies are kept in Red-Black tree linked from the inode.
2179 * They are protected by the sp->lock rwlock, which should be held
2180 * for any accesses to the tree.
2184 * lookup first element intersecting start-end. Caller holds sp->lock for
2185 * reading or for writing
2187 static struct sp_node *
2188 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2190 struct rb_node *n = sp->root.rb_node;
2192 while (n) {
2193 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2195 if (start >= p->end)
2196 n = n->rb_right;
2197 else if (end <= p->start)
2198 n = n->rb_left;
2199 else
2200 break;
2202 if (!n)
2203 return NULL;
2204 for (;;) {
2205 struct sp_node *w = NULL;
2206 struct rb_node *prev = rb_prev(n);
2207 if (!prev)
2208 break;
2209 w = rb_entry(prev, struct sp_node, nd);
2210 if (w->end <= start)
2211 break;
2212 n = prev;
2214 return rb_entry(n, struct sp_node, nd);
2218 * Insert a new shared policy into the list. Caller holds sp->lock for
2219 * writing.
2221 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2223 struct rb_node **p = &sp->root.rb_node;
2224 struct rb_node *parent = NULL;
2225 struct sp_node *nd;
2227 while (*p) {
2228 parent = *p;
2229 nd = rb_entry(parent, struct sp_node, nd);
2230 if (new->start < nd->start)
2231 p = &(*p)->rb_left;
2232 else if (new->end > nd->end)
2233 p = &(*p)->rb_right;
2234 else
2235 BUG();
2237 rb_link_node(&new->nd, parent, p);
2238 rb_insert_color(&new->nd, &sp->root);
2239 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2240 new->policy ? new->policy->mode : 0);
2243 /* Find shared policy intersecting idx */
2244 struct mempolicy *
2245 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2247 struct mempolicy *pol = NULL;
2248 struct sp_node *sn;
2250 if (!sp->root.rb_node)
2251 return NULL;
2252 read_lock(&sp->lock);
2253 sn = sp_lookup(sp, idx, idx+1);
2254 if (sn) {
2255 mpol_get(sn->policy);
2256 pol = sn->policy;
2258 read_unlock(&sp->lock);
2259 return pol;
2262 static void sp_free(struct sp_node *n)
2264 mpol_put(n->policy);
2265 kmem_cache_free(sn_cache, n);
2269 * mpol_misplaced - check whether current page node is valid in policy
2271 * @page: page to be checked
2272 * @vma: vm area where page mapped
2273 * @addr: virtual address where page mapped
2275 * Lookup current policy node id for vma,addr and "compare to" page's
2276 * node id.
2278 * Returns:
2279 * -1 - not misplaced, page is in the right node
2280 * node - node id where the page should be
2282 * Policy determination "mimics" alloc_page_vma().
2283 * Called from fault path where we know the vma and faulting address.
2285 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2287 struct mempolicy *pol;
2288 struct zone *zone;
2289 int curnid = page_to_nid(page);
2290 unsigned long pgoff;
2291 int thiscpu = raw_smp_processor_id();
2292 int thisnid = cpu_to_node(thiscpu);
2293 int polnid = -1;
2294 int ret = -1;
2296 BUG_ON(!vma);
2298 pol = get_vma_policy(vma, addr);
2299 if (!(pol->flags & MPOL_F_MOF))
2300 goto out;
2302 switch (pol->mode) {
2303 case MPOL_INTERLEAVE:
2304 BUG_ON(addr >= vma->vm_end);
2305 BUG_ON(addr < vma->vm_start);
2307 pgoff = vma->vm_pgoff;
2308 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2309 polnid = offset_il_node(pol, vma, pgoff);
2310 break;
2312 case MPOL_PREFERRED:
2313 if (pol->flags & MPOL_F_LOCAL)
2314 polnid = numa_node_id();
2315 else
2316 polnid = pol->v.preferred_node;
2317 break;
2319 case MPOL_BIND:
2321 * allows binding to multiple nodes.
2322 * use current page if in policy nodemask,
2323 * else select nearest allowed node, if any.
2324 * If no allowed nodes, use current [!misplaced].
2326 if (node_isset(curnid, pol->v.nodes))
2327 goto out;
2328 (void)first_zones_zonelist(
2329 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2330 gfp_zone(GFP_HIGHUSER),
2331 &pol->v.nodes, &zone);
2332 polnid = zone->node;
2333 break;
2335 default:
2336 BUG();
2339 /* Migrate the page towards the node whose CPU is referencing it */
2340 if (pol->flags & MPOL_F_MORON) {
2341 polnid = thisnid;
2343 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2344 goto out;
2347 if (curnid != polnid)
2348 ret = polnid;
2349 out:
2350 mpol_cond_put(pol);
2352 return ret;
2355 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2357 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2358 rb_erase(&n->nd, &sp->root);
2359 sp_free(n);
2362 static void sp_node_init(struct sp_node *node, unsigned long start,
2363 unsigned long end, struct mempolicy *pol)
2365 node->start = start;
2366 node->end = end;
2367 node->policy = pol;
2370 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2371 struct mempolicy *pol)
2373 struct sp_node *n;
2374 struct mempolicy *newpol;
2376 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2377 if (!n)
2378 return NULL;
2380 newpol = mpol_dup(pol);
2381 if (IS_ERR(newpol)) {
2382 kmem_cache_free(sn_cache, n);
2383 return NULL;
2385 newpol->flags |= MPOL_F_SHARED;
2386 sp_node_init(n, start, end, newpol);
2388 return n;
2391 /* Replace a policy range. */
2392 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2393 unsigned long end, struct sp_node *new)
2395 struct sp_node *n;
2396 struct sp_node *n_new = NULL;
2397 struct mempolicy *mpol_new = NULL;
2398 int ret = 0;
2400 restart:
2401 write_lock(&sp->lock);
2402 n = sp_lookup(sp, start, end);
2403 /* Take care of old policies in the same range. */
2404 while (n && n->start < end) {
2405 struct rb_node *next = rb_next(&n->nd);
2406 if (n->start >= start) {
2407 if (n->end <= end)
2408 sp_delete(sp, n);
2409 else
2410 n->start = end;
2411 } else {
2412 /* Old policy spanning whole new range. */
2413 if (n->end > end) {
2414 if (!n_new)
2415 goto alloc_new;
2417 *mpol_new = *n->policy;
2418 atomic_set(&mpol_new->refcnt, 1);
2419 sp_node_init(n_new, end, n->end, mpol_new);
2420 n->end = start;
2421 sp_insert(sp, n_new);
2422 n_new = NULL;
2423 mpol_new = NULL;
2424 break;
2425 } else
2426 n->end = start;
2428 if (!next)
2429 break;
2430 n = rb_entry(next, struct sp_node, nd);
2432 if (new)
2433 sp_insert(sp, new);
2434 write_unlock(&sp->lock);
2435 ret = 0;
2437 err_out:
2438 if (mpol_new)
2439 mpol_put(mpol_new);
2440 if (n_new)
2441 kmem_cache_free(sn_cache, n_new);
2443 return ret;
2445 alloc_new:
2446 write_unlock(&sp->lock);
2447 ret = -ENOMEM;
2448 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2449 if (!n_new)
2450 goto err_out;
2451 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2452 if (!mpol_new)
2453 goto err_out;
2454 goto restart;
2458 * mpol_shared_policy_init - initialize shared policy for inode
2459 * @sp: pointer to inode shared policy
2460 * @mpol: struct mempolicy to install
2462 * Install non-NULL @mpol in inode's shared policy rb-tree.
2463 * On entry, the current task has a reference on a non-NULL @mpol.
2464 * This must be released on exit.
2465 * This is called at get_inode() calls and we can use GFP_KERNEL.
2467 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2469 int ret;
2471 sp->root = RB_ROOT; /* empty tree == default mempolicy */
2472 rwlock_init(&sp->lock);
2474 if (mpol) {
2475 struct vm_area_struct pvma;
2476 struct mempolicy *new;
2477 NODEMASK_SCRATCH(scratch);
2479 if (!scratch)
2480 goto put_mpol;
2481 /* contextualize the tmpfs mount point mempolicy */
2482 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2483 if (IS_ERR(new))
2484 goto free_scratch; /* no valid nodemask intersection */
2486 task_lock(current);
2487 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2488 task_unlock(current);
2489 if (ret)
2490 goto put_new;
2492 /* Create pseudo-vma that contains just the policy */
2493 memset(&pvma, 0, sizeof(struct vm_area_struct));
2494 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2495 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2497 put_new:
2498 mpol_put(new); /* drop initial ref */
2499 free_scratch:
2500 NODEMASK_SCRATCH_FREE(scratch);
2501 put_mpol:
2502 mpol_put(mpol); /* drop our incoming ref on sb mpol */
2506 int mpol_set_shared_policy(struct shared_policy *info,
2507 struct vm_area_struct *vma, struct mempolicy *npol)
2509 int err;
2510 struct sp_node *new = NULL;
2511 unsigned long sz = vma_pages(vma);
2513 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2514 vma->vm_pgoff,
2515 sz, npol ? npol->mode : -1,
2516 npol ? npol->flags : -1,
2517 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
2519 if (npol) {
2520 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2521 if (!new)
2522 return -ENOMEM;
2524 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2525 if (err && new)
2526 sp_free(new);
2527 return err;
2530 /* Free a backing policy store on inode delete. */
2531 void mpol_free_shared_policy(struct shared_policy *p)
2533 struct sp_node *n;
2534 struct rb_node *next;
2536 if (!p->root.rb_node)
2537 return;
2538 write_lock(&p->lock);
2539 next = rb_first(&p->root);
2540 while (next) {
2541 n = rb_entry(next, struct sp_node, nd);
2542 next = rb_next(&n->nd);
2543 sp_delete(p, n);
2545 write_unlock(&p->lock);
2548 #ifdef CONFIG_NUMA_BALANCING
2549 static int __initdata numabalancing_override;
2551 static void __init check_numabalancing_enable(void)
2553 bool numabalancing_default = false;
2555 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2556 numabalancing_default = true;
2558 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2559 if (numabalancing_override)
2560 set_numabalancing_state(numabalancing_override == 1);
2562 if (num_online_nodes() > 1 && !numabalancing_override) {
2563 pr_info("%s automatic NUMA balancing. "
2564 "Configure with numa_balancing= or the "
2565 "kernel.numa_balancing sysctl",
2566 numabalancing_default ? "Enabling" : "Disabling");
2567 set_numabalancing_state(numabalancing_default);
2571 static int __init setup_numabalancing(char *str)
2573 int ret = 0;
2574 if (!str)
2575 goto out;
2577 if (!strcmp(str, "enable")) {
2578 numabalancing_override = 1;
2579 ret = 1;
2580 } else if (!strcmp(str, "disable")) {
2581 numabalancing_override = -1;
2582 ret = 1;
2584 out:
2585 if (!ret)
2586 pr_warn("Unable to parse numa_balancing=\n");
2588 return ret;
2590 __setup("numa_balancing=", setup_numabalancing);
2591 #else
2592 static inline void __init check_numabalancing_enable(void)
2595 #endif /* CONFIG_NUMA_BALANCING */
2597 /* assumes fs == KERNEL_DS */
2598 void __init numa_policy_init(void)
2600 nodemask_t interleave_nodes;
2601 unsigned long largest = 0;
2602 int nid, prefer = 0;
2604 policy_cache = kmem_cache_create("numa_policy",
2605 sizeof(struct mempolicy),
2606 0, SLAB_PANIC, NULL);
2608 sn_cache = kmem_cache_create("shared_policy_node",
2609 sizeof(struct sp_node),
2610 0, SLAB_PANIC, NULL);
2612 for_each_node(nid) {
2613 preferred_node_policy[nid] = (struct mempolicy) {
2614 .refcnt = ATOMIC_INIT(1),
2615 .mode = MPOL_PREFERRED,
2616 .flags = MPOL_F_MOF | MPOL_F_MORON,
2617 .v = { .preferred_node = nid, },
2622 * Set interleaving policy for system init. Interleaving is only
2623 * enabled across suitably sized nodes (default is >= 16MB), or
2624 * fall back to the largest node if they're all smaller.
2626 nodes_clear(interleave_nodes);
2627 for_each_node_state(nid, N_MEMORY) {
2628 unsigned long total_pages = node_present_pages(nid);
2630 /* Preserve the largest node */
2631 if (largest < total_pages) {
2632 largest = total_pages;
2633 prefer = nid;
2636 /* Interleave this node? */
2637 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2638 node_set(nid, interleave_nodes);
2641 /* All too small, use the largest */
2642 if (unlikely(nodes_empty(interleave_nodes)))
2643 node_set(prefer, interleave_nodes);
2645 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2646 pr_err("%s: interleaving failed\n", __func__);
2648 check_numabalancing_enable();
2651 /* Reset policy of current process to default */
2652 void numa_default_policy(void)
2654 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2658 * Parse and format mempolicy from/to strings
2662 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
2664 static const char * const policy_modes[] =
2666 [MPOL_DEFAULT] = "default",
2667 [MPOL_PREFERRED] = "prefer",
2668 [MPOL_BIND] = "bind",
2669 [MPOL_INTERLEAVE] = "interleave",
2670 [MPOL_LOCAL] = "local",
2674 #ifdef CONFIG_TMPFS
2676 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2677 * @str: string containing mempolicy to parse
2678 * @mpol: pointer to struct mempolicy pointer, returned on success.
2680 * Format of input:
2681 * <mode>[=<flags>][:<nodelist>]
2683 * On success, returns 0, else 1
2685 int mpol_parse_str(char *str, struct mempolicy **mpol)
2687 struct mempolicy *new = NULL;
2688 unsigned short mode;
2689 unsigned short mode_flags;
2690 nodemask_t nodes;
2691 char *nodelist = strchr(str, ':');
2692 char *flags = strchr(str, '=');
2693 int err = 1;
2695 if (nodelist) {
2696 /* NUL-terminate mode or flags string */
2697 *nodelist++ = '\0';
2698 if (nodelist_parse(nodelist, nodes))
2699 goto out;
2700 if (!nodes_subset(nodes, node_states[N_MEMORY]))
2701 goto out;
2702 } else
2703 nodes_clear(nodes);
2705 if (flags)
2706 *flags++ = '\0'; /* terminate mode string */
2708 for (mode = 0; mode < MPOL_MAX; mode++) {
2709 if (!strcmp(str, policy_modes[mode])) {
2710 break;
2713 if (mode >= MPOL_MAX)
2714 goto out;
2716 switch (mode) {
2717 case MPOL_PREFERRED:
2719 * Insist on a nodelist of one node only
2721 if (nodelist) {
2722 char *rest = nodelist;
2723 while (isdigit(*rest))
2724 rest++;
2725 if (*rest)
2726 goto out;
2728 break;
2729 case MPOL_INTERLEAVE:
2731 * Default to online nodes with memory if no nodelist
2733 if (!nodelist)
2734 nodes = node_states[N_MEMORY];
2735 break;
2736 case MPOL_LOCAL:
2738 * Don't allow a nodelist; mpol_new() checks flags
2740 if (nodelist)
2741 goto out;
2742 mode = MPOL_PREFERRED;
2743 break;
2744 case MPOL_DEFAULT:
2746 * Insist on a empty nodelist
2748 if (!nodelist)
2749 err = 0;
2750 goto out;
2751 case MPOL_BIND:
2753 * Insist on a nodelist
2755 if (!nodelist)
2756 goto out;
2759 mode_flags = 0;
2760 if (flags) {
2762 * Currently, we only support two mutually exclusive
2763 * mode flags.
2765 if (!strcmp(flags, "static"))
2766 mode_flags |= MPOL_F_STATIC_NODES;
2767 else if (!strcmp(flags, "relative"))
2768 mode_flags |= MPOL_F_RELATIVE_NODES;
2769 else
2770 goto out;
2773 new = mpol_new(mode, mode_flags, &nodes);
2774 if (IS_ERR(new))
2775 goto out;
2778 * Save nodes for mpol_to_str() to show the tmpfs mount options
2779 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2781 if (mode != MPOL_PREFERRED)
2782 new->v.nodes = nodes;
2783 else if (nodelist)
2784 new->v.preferred_node = first_node(nodes);
2785 else
2786 new->flags |= MPOL_F_LOCAL;
2789 * Save nodes for contextualization: this will be used to "clone"
2790 * the mempolicy in a specific context [cpuset] at a later time.
2792 new->w.user_nodemask = nodes;
2794 err = 0;
2796 out:
2797 /* Restore string for error message */
2798 if (nodelist)
2799 *--nodelist = ':';
2800 if (flags)
2801 *--flags = '=';
2802 if (!err)
2803 *mpol = new;
2804 return err;
2806 #endif /* CONFIG_TMPFS */
2809 * mpol_to_str - format a mempolicy structure for printing
2810 * @buffer: to contain formatted mempolicy string
2811 * @maxlen: length of @buffer
2812 * @pol: pointer to mempolicy to be formatted
2814 * Convert @pol into a string. If @buffer is too short, truncate the string.
2815 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2816 * longest flag, "relative", and to display at least a few node ids.
2818 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
2820 char *p = buffer;
2821 nodemask_t nodes = NODE_MASK_NONE;
2822 unsigned short mode = MPOL_DEFAULT;
2823 unsigned short flags = 0;
2825 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2826 mode = pol->mode;
2827 flags = pol->flags;
2830 switch (mode) {
2831 case MPOL_DEFAULT:
2832 break;
2833 case MPOL_PREFERRED:
2834 if (flags & MPOL_F_LOCAL)
2835 mode = MPOL_LOCAL;
2836 else
2837 node_set(pol->v.preferred_node, nodes);
2838 break;
2839 case MPOL_BIND:
2840 case MPOL_INTERLEAVE:
2841 nodes = pol->v.nodes;
2842 break;
2843 default:
2844 WARN_ON_ONCE(1);
2845 snprintf(p, maxlen, "unknown");
2846 return;
2849 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
2851 if (flags & MPOL_MODE_FLAGS) {
2852 p += snprintf(p, buffer + maxlen - p, "=");
2855 * Currently, the only defined flags are mutually exclusive
2857 if (flags & MPOL_F_STATIC_NODES)
2858 p += snprintf(p, buffer + maxlen - p, "static");
2859 else if (flags & MPOL_F_RELATIVE_NODES)
2860 p += snprintf(p, buffer + maxlen - p, "relative");
2863 if (!nodes_empty(nodes))
2864 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
2865 nodemask_pr_args(&nodes));