xfs: push buffer of flush locked dquot to avoid quotacheck deadlock
[linux/fpc-iii.git] / mm / migrate.c
blob821623fc7091d8ec50c81e0b75574345746298c4
1 /*
2 * Memory Migration functionality - linux/mm/migrate.c
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
12 * Christoph Lameter
15 #include <linux/migrate.h>
16 #include <linux/export.h>
17 #include <linux/swap.h>
18 #include <linux/swapops.h>
19 #include <linux/pagemap.h>
20 #include <linux/buffer_head.h>
21 #include <linux/mm_inline.h>
22 #include <linux/nsproxy.h>
23 #include <linux/pagevec.h>
24 #include <linux/ksm.h>
25 #include <linux/rmap.h>
26 #include <linux/topology.h>
27 #include <linux/cpu.h>
28 #include <linux/cpuset.h>
29 #include <linux/writeback.h>
30 #include <linux/mempolicy.h>
31 #include <linux/vmalloc.h>
32 #include <linux/security.h>
33 #include <linux/backing-dev.h>
34 #include <linux/compaction.h>
35 #include <linux/syscalls.h>
36 #include <linux/hugetlb.h>
37 #include <linux/hugetlb_cgroup.h>
38 #include <linux/gfp.h>
39 #include <linux/balloon_compaction.h>
40 #include <linux/mmu_notifier.h>
41 #include <linux/page_idle.h>
42 #include <linux/page_owner.h>
43 #include <linux/ptrace.h>
45 #include <asm/tlbflush.h>
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/migrate.h>
50 #include "internal.h"
53 * migrate_prep() needs to be called before we start compiling a list of pages
54 * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
55 * undesirable, use migrate_prep_local()
57 int migrate_prep(void)
60 * Clear the LRU lists so pages can be isolated.
61 * Note that pages may be moved off the LRU after we have
62 * drained them. Those pages will fail to migrate like other
63 * pages that may be busy.
65 lru_add_drain_all();
67 return 0;
70 /* Do the necessary work of migrate_prep but not if it involves other CPUs */
71 int migrate_prep_local(void)
73 lru_add_drain();
75 return 0;
78 bool isolate_movable_page(struct page *page, isolate_mode_t mode)
80 struct address_space *mapping;
83 * Avoid burning cycles with pages that are yet under __free_pages(),
84 * or just got freed under us.
86 * In case we 'win' a race for a movable page being freed under us and
87 * raise its refcount preventing __free_pages() from doing its job
88 * the put_page() at the end of this block will take care of
89 * release this page, thus avoiding a nasty leakage.
91 if (unlikely(!get_page_unless_zero(page)))
92 goto out;
95 * Check PageMovable before holding a PG_lock because page's owner
96 * assumes anybody doesn't touch PG_lock of newly allocated page
97 * so unconditionally grapping the lock ruins page's owner side.
99 if (unlikely(!__PageMovable(page)))
100 goto out_putpage;
102 * As movable pages are not isolated from LRU lists, concurrent
103 * compaction threads can race against page migration functions
104 * as well as race against the releasing a page.
106 * In order to avoid having an already isolated movable page
107 * being (wrongly) re-isolated while it is under migration,
108 * or to avoid attempting to isolate pages being released,
109 * lets be sure we have the page lock
110 * before proceeding with the movable page isolation steps.
112 if (unlikely(!trylock_page(page)))
113 goto out_putpage;
115 if (!PageMovable(page) || PageIsolated(page))
116 goto out_no_isolated;
118 mapping = page_mapping(page);
119 VM_BUG_ON_PAGE(!mapping, page);
121 if (!mapping->a_ops->isolate_page(page, mode))
122 goto out_no_isolated;
124 /* Driver shouldn't use PG_isolated bit of page->flags */
125 WARN_ON_ONCE(PageIsolated(page));
126 __SetPageIsolated(page);
127 unlock_page(page);
129 return true;
131 out_no_isolated:
132 unlock_page(page);
133 out_putpage:
134 put_page(page);
135 out:
136 return false;
139 /* It should be called on page which is PG_movable */
140 void putback_movable_page(struct page *page)
142 struct address_space *mapping;
144 VM_BUG_ON_PAGE(!PageLocked(page), page);
145 VM_BUG_ON_PAGE(!PageMovable(page), page);
146 VM_BUG_ON_PAGE(!PageIsolated(page), page);
148 mapping = page_mapping(page);
149 mapping->a_ops->putback_page(page);
150 __ClearPageIsolated(page);
154 * Put previously isolated pages back onto the appropriate lists
155 * from where they were once taken off for compaction/migration.
157 * This function shall be used whenever the isolated pageset has been
158 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
159 * and isolate_huge_page().
161 void putback_movable_pages(struct list_head *l)
163 struct page *page;
164 struct page *page2;
166 list_for_each_entry_safe(page, page2, l, lru) {
167 if (unlikely(PageHuge(page))) {
168 putback_active_hugepage(page);
169 continue;
171 list_del(&page->lru);
173 * We isolated non-lru movable page so here we can use
174 * __PageMovable because LRU page's mapping cannot have
175 * PAGE_MAPPING_MOVABLE.
177 if (unlikely(__PageMovable(page))) {
178 VM_BUG_ON_PAGE(!PageIsolated(page), page);
179 lock_page(page);
180 if (PageMovable(page))
181 putback_movable_page(page);
182 else
183 __ClearPageIsolated(page);
184 unlock_page(page);
185 put_page(page);
186 } else {
187 dec_node_page_state(page, NR_ISOLATED_ANON +
188 page_is_file_cache(page));
189 putback_lru_page(page);
195 * Restore a potential migration pte to a working pte entry
197 static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
198 unsigned long addr, void *old)
200 struct mm_struct *mm = vma->vm_mm;
201 swp_entry_t entry;
202 pmd_t *pmd;
203 pte_t *ptep, pte;
204 spinlock_t *ptl;
206 if (unlikely(PageHuge(new))) {
207 ptep = huge_pte_offset(mm, addr);
208 if (!ptep)
209 goto out;
210 ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep);
211 } else {
212 pmd = mm_find_pmd(mm, addr);
213 if (!pmd)
214 goto out;
216 ptep = pte_offset_map(pmd, addr);
219 * Peek to check is_swap_pte() before taking ptlock? No, we
220 * can race mremap's move_ptes(), which skips anon_vma lock.
223 ptl = pte_lockptr(mm, pmd);
226 spin_lock(ptl);
227 pte = *ptep;
228 if (!is_swap_pte(pte))
229 goto unlock;
231 entry = pte_to_swp_entry(pte);
233 if (!is_migration_entry(entry) ||
234 migration_entry_to_page(entry) != old)
235 goto unlock;
237 get_page(new);
238 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
239 if (pte_swp_soft_dirty(*ptep))
240 pte = pte_mksoft_dirty(pte);
242 /* Recheck VMA as permissions can change since migration started */
243 if (is_write_migration_entry(entry))
244 pte = maybe_mkwrite(pte, vma);
246 #ifdef CONFIG_HUGETLB_PAGE
247 if (PageHuge(new)) {
248 pte = pte_mkhuge(pte);
249 pte = arch_make_huge_pte(pte, vma, new, 0);
251 #endif
252 flush_dcache_page(new);
253 set_pte_at(mm, addr, ptep, pte);
255 if (PageHuge(new)) {
256 if (PageAnon(new))
257 hugepage_add_anon_rmap(new, vma, addr);
258 else
259 page_dup_rmap(new, true);
260 } else if (PageAnon(new))
261 page_add_anon_rmap(new, vma, addr, false);
262 else
263 page_add_file_rmap(new, false);
265 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
266 mlock_vma_page(new);
268 /* No need to invalidate - it was non-present before */
269 update_mmu_cache(vma, addr, ptep);
270 unlock:
271 pte_unmap_unlock(ptep, ptl);
272 out:
273 return SWAP_AGAIN;
277 * Get rid of all migration entries and replace them by
278 * references to the indicated page.
280 void remove_migration_ptes(struct page *old, struct page *new, bool locked)
282 struct rmap_walk_control rwc = {
283 .rmap_one = remove_migration_pte,
284 .arg = old,
287 if (locked)
288 rmap_walk_locked(new, &rwc);
289 else
290 rmap_walk(new, &rwc);
294 * Something used the pte of a page under migration. We need to
295 * get to the page and wait until migration is finished.
296 * When we return from this function the fault will be retried.
298 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
299 spinlock_t *ptl)
301 pte_t pte;
302 swp_entry_t entry;
303 struct page *page;
305 spin_lock(ptl);
306 pte = *ptep;
307 if (!is_swap_pte(pte))
308 goto out;
310 entry = pte_to_swp_entry(pte);
311 if (!is_migration_entry(entry))
312 goto out;
314 page = migration_entry_to_page(entry);
317 * Once radix-tree replacement of page migration started, page_count
318 * *must* be zero. And, we don't want to call wait_on_page_locked()
319 * against a page without get_page().
320 * So, we use get_page_unless_zero(), here. Even failed, page fault
321 * will occur again.
323 if (!get_page_unless_zero(page))
324 goto out;
325 pte_unmap_unlock(ptep, ptl);
326 wait_on_page_locked(page);
327 put_page(page);
328 return;
329 out:
330 pte_unmap_unlock(ptep, ptl);
333 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
334 unsigned long address)
336 spinlock_t *ptl = pte_lockptr(mm, pmd);
337 pte_t *ptep = pte_offset_map(pmd, address);
338 __migration_entry_wait(mm, ptep, ptl);
341 void migration_entry_wait_huge(struct vm_area_struct *vma,
342 struct mm_struct *mm, pte_t *pte)
344 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
345 __migration_entry_wait(mm, pte, ptl);
348 #ifdef CONFIG_BLOCK
349 /* Returns true if all buffers are successfully locked */
350 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
351 enum migrate_mode mode)
353 struct buffer_head *bh = head;
355 /* Simple case, sync compaction */
356 if (mode != MIGRATE_ASYNC) {
357 do {
358 get_bh(bh);
359 lock_buffer(bh);
360 bh = bh->b_this_page;
362 } while (bh != head);
364 return true;
367 /* async case, we cannot block on lock_buffer so use trylock_buffer */
368 do {
369 get_bh(bh);
370 if (!trylock_buffer(bh)) {
372 * We failed to lock the buffer and cannot stall in
373 * async migration. Release the taken locks
375 struct buffer_head *failed_bh = bh;
376 put_bh(failed_bh);
377 bh = head;
378 while (bh != failed_bh) {
379 unlock_buffer(bh);
380 put_bh(bh);
381 bh = bh->b_this_page;
383 return false;
386 bh = bh->b_this_page;
387 } while (bh != head);
388 return true;
390 #else
391 static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
392 enum migrate_mode mode)
394 return true;
396 #endif /* CONFIG_BLOCK */
399 * Replace the page in the mapping.
401 * The number of remaining references must be:
402 * 1 for anonymous pages without a mapping
403 * 2 for pages with a mapping
404 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
406 int migrate_page_move_mapping(struct address_space *mapping,
407 struct page *newpage, struct page *page,
408 struct buffer_head *head, enum migrate_mode mode,
409 int extra_count)
411 struct zone *oldzone, *newzone;
412 int dirty;
413 int expected_count = 1 + extra_count;
414 void **pslot;
416 if (!mapping) {
417 /* Anonymous page without mapping */
418 if (page_count(page) != expected_count)
419 return -EAGAIN;
421 /* No turning back from here */
422 newpage->index = page->index;
423 newpage->mapping = page->mapping;
424 if (PageSwapBacked(page))
425 __SetPageSwapBacked(newpage);
427 return MIGRATEPAGE_SUCCESS;
430 oldzone = page_zone(page);
431 newzone = page_zone(newpage);
433 spin_lock_irq(&mapping->tree_lock);
435 pslot = radix_tree_lookup_slot(&mapping->page_tree,
436 page_index(page));
438 expected_count += 1 + page_has_private(page);
439 if (page_count(page) != expected_count ||
440 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
441 spin_unlock_irq(&mapping->tree_lock);
442 return -EAGAIN;
445 if (!page_ref_freeze(page, expected_count)) {
446 spin_unlock_irq(&mapping->tree_lock);
447 return -EAGAIN;
451 * In the async migration case of moving a page with buffers, lock the
452 * buffers using trylock before the mapping is moved. If the mapping
453 * was moved, we later failed to lock the buffers and could not move
454 * the mapping back due to an elevated page count, we would have to
455 * block waiting on other references to be dropped.
457 if (mode == MIGRATE_ASYNC && head &&
458 !buffer_migrate_lock_buffers(head, mode)) {
459 page_ref_unfreeze(page, expected_count);
460 spin_unlock_irq(&mapping->tree_lock);
461 return -EAGAIN;
465 * Now we know that no one else is looking at the page:
466 * no turning back from here.
468 newpage->index = page->index;
469 newpage->mapping = page->mapping;
470 if (PageSwapBacked(page))
471 __SetPageSwapBacked(newpage);
473 get_page(newpage); /* add cache reference */
474 if (PageSwapCache(page)) {
475 SetPageSwapCache(newpage);
476 set_page_private(newpage, page_private(page));
479 /* Move dirty while page refs frozen and newpage not yet exposed */
480 dirty = PageDirty(page);
481 if (dirty) {
482 ClearPageDirty(page);
483 SetPageDirty(newpage);
486 radix_tree_replace_slot(pslot, newpage);
489 * Drop cache reference from old page by unfreezing
490 * to one less reference.
491 * We know this isn't the last reference.
493 page_ref_unfreeze(page, expected_count - 1);
495 spin_unlock(&mapping->tree_lock);
496 /* Leave irq disabled to prevent preemption while updating stats */
499 * If moved to a different zone then also account
500 * the page for that zone. Other VM counters will be
501 * taken care of when we establish references to the
502 * new page and drop references to the old page.
504 * Note that anonymous pages are accounted for
505 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
506 * are mapped to swap space.
508 if (newzone != oldzone) {
509 __dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES);
510 __inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES);
511 if (PageSwapBacked(page) && !PageSwapCache(page)) {
512 __dec_node_state(oldzone->zone_pgdat, NR_SHMEM);
513 __inc_node_state(newzone->zone_pgdat, NR_SHMEM);
515 if (dirty && mapping_cap_account_dirty(mapping)) {
516 __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
517 __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
518 __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
519 __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
522 local_irq_enable();
524 return MIGRATEPAGE_SUCCESS;
526 EXPORT_SYMBOL(migrate_page_move_mapping);
529 * The expected number of remaining references is the same as that
530 * of migrate_page_move_mapping().
532 int migrate_huge_page_move_mapping(struct address_space *mapping,
533 struct page *newpage, struct page *page)
535 int expected_count;
536 void **pslot;
538 spin_lock_irq(&mapping->tree_lock);
540 pslot = radix_tree_lookup_slot(&mapping->page_tree,
541 page_index(page));
543 expected_count = 2 + page_has_private(page);
544 if (page_count(page) != expected_count ||
545 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
546 spin_unlock_irq(&mapping->tree_lock);
547 return -EAGAIN;
550 if (!page_ref_freeze(page, expected_count)) {
551 spin_unlock_irq(&mapping->tree_lock);
552 return -EAGAIN;
555 newpage->index = page->index;
556 newpage->mapping = page->mapping;
558 get_page(newpage);
560 radix_tree_replace_slot(pslot, newpage);
562 page_ref_unfreeze(page, expected_count - 1);
564 spin_unlock_irq(&mapping->tree_lock);
566 return MIGRATEPAGE_SUCCESS;
570 * Gigantic pages are so large that we do not guarantee that page++ pointer
571 * arithmetic will work across the entire page. We need something more
572 * specialized.
574 static void __copy_gigantic_page(struct page *dst, struct page *src,
575 int nr_pages)
577 int i;
578 struct page *dst_base = dst;
579 struct page *src_base = src;
581 for (i = 0; i < nr_pages; ) {
582 cond_resched();
583 copy_highpage(dst, src);
585 i++;
586 dst = mem_map_next(dst, dst_base, i);
587 src = mem_map_next(src, src_base, i);
591 static void copy_huge_page(struct page *dst, struct page *src)
593 int i;
594 int nr_pages;
596 if (PageHuge(src)) {
597 /* hugetlbfs page */
598 struct hstate *h = page_hstate(src);
599 nr_pages = pages_per_huge_page(h);
601 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
602 __copy_gigantic_page(dst, src, nr_pages);
603 return;
605 } else {
606 /* thp page */
607 BUG_ON(!PageTransHuge(src));
608 nr_pages = hpage_nr_pages(src);
611 for (i = 0; i < nr_pages; i++) {
612 cond_resched();
613 copy_highpage(dst + i, src + i);
618 * Copy the page to its new location
620 void migrate_page_copy(struct page *newpage, struct page *page)
622 int cpupid;
624 if (PageHuge(page) || PageTransHuge(page))
625 copy_huge_page(newpage, page);
626 else
627 copy_highpage(newpage, page);
629 if (PageError(page))
630 SetPageError(newpage);
631 if (PageReferenced(page))
632 SetPageReferenced(newpage);
633 if (PageUptodate(page))
634 SetPageUptodate(newpage);
635 if (TestClearPageActive(page)) {
636 VM_BUG_ON_PAGE(PageUnevictable(page), page);
637 SetPageActive(newpage);
638 } else if (TestClearPageUnevictable(page))
639 SetPageUnevictable(newpage);
640 if (PageChecked(page))
641 SetPageChecked(newpage);
642 if (PageMappedToDisk(page))
643 SetPageMappedToDisk(newpage);
645 /* Move dirty on pages not done by migrate_page_move_mapping() */
646 if (PageDirty(page))
647 SetPageDirty(newpage);
649 if (page_is_young(page))
650 set_page_young(newpage);
651 if (page_is_idle(page))
652 set_page_idle(newpage);
655 * Copy NUMA information to the new page, to prevent over-eager
656 * future migrations of this same page.
658 cpupid = page_cpupid_xchg_last(page, -1);
659 page_cpupid_xchg_last(newpage, cpupid);
661 ksm_migrate_page(newpage, page);
663 * Please do not reorder this without considering how mm/ksm.c's
664 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
666 if (PageSwapCache(page))
667 ClearPageSwapCache(page);
668 ClearPagePrivate(page);
669 set_page_private(page, 0);
672 * If any waiters have accumulated on the new page then
673 * wake them up.
675 if (PageWriteback(newpage))
676 end_page_writeback(newpage);
678 copy_page_owner(page, newpage);
680 mem_cgroup_migrate(page, newpage);
682 EXPORT_SYMBOL(migrate_page_copy);
684 /************************************************************
685 * Migration functions
686 ***********************************************************/
689 * Common logic to directly migrate a single LRU page suitable for
690 * pages that do not use PagePrivate/PagePrivate2.
692 * Pages are locked upon entry and exit.
694 int migrate_page(struct address_space *mapping,
695 struct page *newpage, struct page *page,
696 enum migrate_mode mode)
698 int rc;
700 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
702 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
704 if (rc != MIGRATEPAGE_SUCCESS)
705 return rc;
707 migrate_page_copy(newpage, page);
708 return MIGRATEPAGE_SUCCESS;
710 EXPORT_SYMBOL(migrate_page);
712 #ifdef CONFIG_BLOCK
714 * Migration function for pages with buffers. This function can only be used
715 * if the underlying filesystem guarantees that no other references to "page"
716 * exist.
718 int buffer_migrate_page(struct address_space *mapping,
719 struct page *newpage, struct page *page, enum migrate_mode mode)
721 struct buffer_head *bh, *head;
722 int rc;
724 if (!page_has_buffers(page))
725 return migrate_page(mapping, newpage, page, mode);
727 head = page_buffers(page);
729 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
731 if (rc != MIGRATEPAGE_SUCCESS)
732 return rc;
735 * In the async case, migrate_page_move_mapping locked the buffers
736 * with an IRQ-safe spinlock held. In the sync case, the buffers
737 * need to be locked now
739 if (mode != MIGRATE_ASYNC)
740 BUG_ON(!buffer_migrate_lock_buffers(head, mode));
742 ClearPagePrivate(page);
743 set_page_private(newpage, page_private(page));
744 set_page_private(page, 0);
745 put_page(page);
746 get_page(newpage);
748 bh = head;
749 do {
750 set_bh_page(bh, newpage, bh_offset(bh));
751 bh = bh->b_this_page;
753 } while (bh != head);
755 SetPagePrivate(newpage);
757 migrate_page_copy(newpage, page);
759 bh = head;
760 do {
761 unlock_buffer(bh);
762 put_bh(bh);
763 bh = bh->b_this_page;
765 } while (bh != head);
767 return MIGRATEPAGE_SUCCESS;
769 EXPORT_SYMBOL(buffer_migrate_page);
770 #endif
773 * Writeback a page to clean the dirty state
775 static int writeout(struct address_space *mapping, struct page *page)
777 struct writeback_control wbc = {
778 .sync_mode = WB_SYNC_NONE,
779 .nr_to_write = 1,
780 .range_start = 0,
781 .range_end = LLONG_MAX,
782 .for_reclaim = 1
784 int rc;
786 if (!mapping->a_ops->writepage)
787 /* No write method for the address space */
788 return -EINVAL;
790 if (!clear_page_dirty_for_io(page))
791 /* Someone else already triggered a write */
792 return -EAGAIN;
795 * A dirty page may imply that the underlying filesystem has
796 * the page on some queue. So the page must be clean for
797 * migration. Writeout may mean we loose the lock and the
798 * page state is no longer what we checked for earlier.
799 * At this point we know that the migration attempt cannot
800 * be successful.
802 remove_migration_ptes(page, page, false);
804 rc = mapping->a_ops->writepage(page, &wbc);
806 if (rc != AOP_WRITEPAGE_ACTIVATE)
807 /* unlocked. Relock */
808 lock_page(page);
810 return (rc < 0) ? -EIO : -EAGAIN;
814 * Default handling if a filesystem does not provide a migration function.
816 static int fallback_migrate_page(struct address_space *mapping,
817 struct page *newpage, struct page *page, enum migrate_mode mode)
819 if (PageDirty(page)) {
820 /* Only writeback pages in full synchronous migration */
821 if (mode != MIGRATE_SYNC)
822 return -EBUSY;
823 return writeout(mapping, page);
827 * Buffers may be managed in a filesystem specific way.
828 * We must have no buffers or drop them.
830 if (page_has_private(page) &&
831 !try_to_release_page(page, GFP_KERNEL))
832 return -EAGAIN;
834 return migrate_page(mapping, newpage, page, mode);
838 * Move a page to a newly allocated page
839 * The page is locked and all ptes have been successfully removed.
841 * The new page will have replaced the old page if this function
842 * is successful.
844 * Return value:
845 * < 0 - error code
846 * MIGRATEPAGE_SUCCESS - success
848 static int move_to_new_page(struct page *newpage, struct page *page,
849 enum migrate_mode mode)
851 struct address_space *mapping;
852 int rc = -EAGAIN;
853 bool is_lru = !__PageMovable(page);
855 VM_BUG_ON_PAGE(!PageLocked(page), page);
856 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
858 mapping = page_mapping(page);
860 if (likely(is_lru)) {
861 if (!mapping)
862 rc = migrate_page(mapping, newpage, page, mode);
863 else if (mapping->a_ops->migratepage)
865 * Most pages have a mapping and most filesystems
866 * provide a migratepage callback. Anonymous pages
867 * are part of swap space which also has its own
868 * migratepage callback. This is the most common path
869 * for page migration.
871 rc = mapping->a_ops->migratepage(mapping, newpage,
872 page, mode);
873 else
874 rc = fallback_migrate_page(mapping, newpage,
875 page, mode);
876 } else {
878 * In case of non-lru page, it could be released after
879 * isolation step. In that case, we shouldn't try migration.
881 VM_BUG_ON_PAGE(!PageIsolated(page), page);
882 if (!PageMovable(page)) {
883 rc = MIGRATEPAGE_SUCCESS;
884 __ClearPageIsolated(page);
885 goto out;
888 rc = mapping->a_ops->migratepage(mapping, newpage,
889 page, mode);
890 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
891 !PageIsolated(page));
895 * When successful, old pagecache page->mapping must be cleared before
896 * page is freed; but stats require that PageAnon be left as PageAnon.
898 if (rc == MIGRATEPAGE_SUCCESS) {
899 if (__PageMovable(page)) {
900 VM_BUG_ON_PAGE(!PageIsolated(page), page);
903 * We clear PG_movable under page_lock so any compactor
904 * cannot try to migrate this page.
906 __ClearPageIsolated(page);
910 * Anonymous and movable page->mapping will be cleard by
911 * free_pages_prepare so don't reset it here for keeping
912 * the type to work PageAnon, for example.
914 if (!PageMappingFlags(page))
915 page->mapping = NULL;
917 out:
918 return rc;
921 static int __unmap_and_move(struct page *page, struct page *newpage,
922 int force, enum migrate_mode mode)
924 int rc = -EAGAIN;
925 int page_was_mapped = 0;
926 struct anon_vma *anon_vma = NULL;
927 bool is_lru = !__PageMovable(page);
929 if (!trylock_page(page)) {
930 if (!force || mode == MIGRATE_ASYNC)
931 goto out;
934 * It's not safe for direct compaction to call lock_page.
935 * For example, during page readahead pages are added locked
936 * to the LRU. Later, when the IO completes the pages are
937 * marked uptodate and unlocked. However, the queueing
938 * could be merging multiple pages for one bio (e.g.
939 * mpage_readpages). If an allocation happens for the
940 * second or third page, the process can end up locking
941 * the same page twice and deadlocking. Rather than
942 * trying to be clever about what pages can be locked,
943 * avoid the use of lock_page for direct compaction
944 * altogether.
946 if (current->flags & PF_MEMALLOC)
947 goto out;
949 lock_page(page);
952 if (PageWriteback(page)) {
954 * Only in the case of a full synchronous migration is it
955 * necessary to wait for PageWriteback. In the async case,
956 * the retry loop is too short and in the sync-light case,
957 * the overhead of stalling is too much
959 if (mode != MIGRATE_SYNC) {
960 rc = -EBUSY;
961 goto out_unlock;
963 if (!force)
964 goto out_unlock;
965 wait_on_page_writeback(page);
969 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
970 * we cannot notice that anon_vma is freed while we migrates a page.
971 * This get_anon_vma() delays freeing anon_vma pointer until the end
972 * of migration. File cache pages are no problem because of page_lock()
973 * File Caches may use write_page() or lock_page() in migration, then,
974 * just care Anon page here.
976 * Only page_get_anon_vma() understands the subtleties of
977 * getting a hold on an anon_vma from outside one of its mms.
978 * But if we cannot get anon_vma, then we won't need it anyway,
979 * because that implies that the anon page is no longer mapped
980 * (and cannot be remapped so long as we hold the page lock).
982 if (PageAnon(page) && !PageKsm(page))
983 anon_vma = page_get_anon_vma(page);
986 * Block others from accessing the new page when we get around to
987 * establishing additional references. We are usually the only one
988 * holding a reference to newpage at this point. We used to have a BUG
989 * here if trylock_page(newpage) fails, but would like to allow for
990 * cases where there might be a race with the previous use of newpage.
991 * This is much like races on refcount of oldpage: just don't BUG().
993 if (unlikely(!trylock_page(newpage)))
994 goto out_unlock;
996 if (unlikely(!is_lru)) {
997 rc = move_to_new_page(newpage, page, mode);
998 goto out_unlock_both;
1002 * Corner case handling:
1003 * 1. When a new swap-cache page is read into, it is added to the LRU
1004 * and treated as swapcache but it has no rmap yet.
1005 * Calling try_to_unmap() against a page->mapping==NULL page will
1006 * trigger a BUG. So handle it here.
1007 * 2. An orphaned page (see truncate_complete_page) might have
1008 * fs-private metadata. The page can be picked up due to memory
1009 * offlining. Everywhere else except page reclaim, the page is
1010 * invisible to the vm, so the page can not be migrated. So try to
1011 * free the metadata, so the page can be freed.
1013 if (!page->mapping) {
1014 VM_BUG_ON_PAGE(PageAnon(page), page);
1015 if (page_has_private(page)) {
1016 try_to_free_buffers(page);
1017 goto out_unlock_both;
1019 } else if (page_mapped(page)) {
1020 /* Establish migration ptes */
1021 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1022 page);
1023 try_to_unmap(page,
1024 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1025 page_was_mapped = 1;
1028 if (!page_mapped(page))
1029 rc = move_to_new_page(newpage, page, mode);
1031 if (page_was_mapped)
1032 remove_migration_ptes(page,
1033 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
1035 out_unlock_both:
1036 unlock_page(newpage);
1037 out_unlock:
1038 /* Drop an anon_vma reference if we took one */
1039 if (anon_vma)
1040 put_anon_vma(anon_vma);
1041 unlock_page(page);
1042 out:
1044 * If migration is successful, decrease refcount of the newpage
1045 * which will not free the page because new page owner increased
1046 * refcounter. As well, if it is LRU page, add the page to LRU
1047 * list in here.
1049 if (rc == MIGRATEPAGE_SUCCESS) {
1050 if (unlikely(__PageMovable(newpage)))
1051 put_page(newpage);
1052 else
1053 putback_lru_page(newpage);
1056 return rc;
1060 * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move(). Work
1061 * around it.
1063 #if (GCC_VERSION >= 40700 && GCC_VERSION < 40900) && defined(CONFIG_ARM)
1064 #define ICE_noinline noinline
1065 #else
1066 #define ICE_noinline
1067 #endif
1070 * Obtain the lock on page, remove all ptes and migrate the page
1071 * to the newly allocated page in newpage.
1073 static ICE_noinline int unmap_and_move(new_page_t get_new_page,
1074 free_page_t put_new_page,
1075 unsigned long private, struct page *page,
1076 int force, enum migrate_mode mode,
1077 enum migrate_reason reason)
1079 int rc = MIGRATEPAGE_SUCCESS;
1080 int *result = NULL;
1081 struct page *newpage;
1083 newpage = get_new_page(page, private, &result);
1084 if (!newpage)
1085 return -ENOMEM;
1087 if (page_count(page) == 1) {
1088 /* page was freed from under us. So we are done. */
1089 ClearPageActive(page);
1090 ClearPageUnevictable(page);
1091 if (unlikely(__PageMovable(page))) {
1092 lock_page(page);
1093 if (!PageMovable(page))
1094 __ClearPageIsolated(page);
1095 unlock_page(page);
1097 if (put_new_page)
1098 put_new_page(newpage, private);
1099 else
1100 put_page(newpage);
1101 goto out;
1104 if (unlikely(PageTransHuge(page))) {
1105 lock_page(page);
1106 rc = split_huge_page(page);
1107 unlock_page(page);
1108 if (rc)
1109 goto out;
1112 rc = __unmap_and_move(page, newpage, force, mode);
1113 if (rc == MIGRATEPAGE_SUCCESS)
1114 set_page_owner_migrate_reason(newpage, reason);
1116 out:
1117 if (rc != -EAGAIN) {
1119 * A page that has been migrated has all references
1120 * removed and will be freed. A page that has not been
1121 * migrated will have kepts its references and be
1122 * restored.
1124 list_del(&page->lru);
1127 * Compaction can migrate also non-LRU pages which are
1128 * not accounted to NR_ISOLATED_*. They can be recognized
1129 * as __PageMovable
1131 if (likely(!__PageMovable(page)))
1132 dec_node_page_state(page, NR_ISOLATED_ANON +
1133 page_is_file_cache(page));
1137 * If migration is successful, releases reference grabbed during
1138 * isolation. Otherwise, restore the page to right list unless
1139 * we want to retry.
1141 if (rc == MIGRATEPAGE_SUCCESS) {
1142 put_page(page);
1143 if (reason == MR_MEMORY_FAILURE) {
1145 * Set PG_HWPoison on just freed page
1146 * intentionally. Although it's rather weird,
1147 * it's how HWPoison flag works at the moment.
1149 if (!test_set_page_hwpoison(page))
1150 num_poisoned_pages_inc();
1152 } else {
1153 if (rc != -EAGAIN) {
1154 if (likely(!__PageMovable(page))) {
1155 putback_lru_page(page);
1156 goto put_new;
1159 lock_page(page);
1160 if (PageMovable(page))
1161 putback_movable_page(page);
1162 else
1163 __ClearPageIsolated(page);
1164 unlock_page(page);
1165 put_page(page);
1167 put_new:
1168 if (put_new_page)
1169 put_new_page(newpage, private);
1170 else
1171 put_page(newpage);
1174 if (result) {
1175 if (rc)
1176 *result = rc;
1177 else
1178 *result = page_to_nid(newpage);
1180 return rc;
1184 * Counterpart of unmap_and_move_page() for hugepage migration.
1186 * This function doesn't wait the completion of hugepage I/O
1187 * because there is no race between I/O and migration for hugepage.
1188 * Note that currently hugepage I/O occurs only in direct I/O
1189 * where no lock is held and PG_writeback is irrelevant,
1190 * and writeback status of all subpages are counted in the reference
1191 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1192 * under direct I/O, the reference of the head page is 512 and a bit more.)
1193 * This means that when we try to migrate hugepage whose subpages are
1194 * doing direct I/O, some references remain after try_to_unmap() and
1195 * hugepage migration fails without data corruption.
1197 * There is also no race when direct I/O is issued on the page under migration,
1198 * because then pte is replaced with migration swap entry and direct I/O code
1199 * will wait in the page fault for migration to complete.
1201 static int unmap_and_move_huge_page(new_page_t get_new_page,
1202 free_page_t put_new_page, unsigned long private,
1203 struct page *hpage, int force,
1204 enum migrate_mode mode, int reason)
1206 int rc = -EAGAIN;
1207 int *result = NULL;
1208 int page_was_mapped = 0;
1209 struct page *new_hpage;
1210 struct anon_vma *anon_vma = NULL;
1213 * Movability of hugepages depends on architectures and hugepage size.
1214 * This check is necessary because some callers of hugepage migration
1215 * like soft offline and memory hotremove don't walk through page
1216 * tables or check whether the hugepage is pmd-based or not before
1217 * kicking migration.
1219 if (!hugepage_migration_supported(page_hstate(hpage))) {
1220 putback_active_hugepage(hpage);
1221 return -ENOSYS;
1224 new_hpage = get_new_page(hpage, private, &result);
1225 if (!new_hpage)
1226 return -ENOMEM;
1228 if (!trylock_page(hpage)) {
1229 if (!force || mode != MIGRATE_SYNC)
1230 goto out;
1231 lock_page(hpage);
1234 if (PageAnon(hpage))
1235 anon_vma = page_get_anon_vma(hpage);
1237 if (unlikely(!trylock_page(new_hpage)))
1238 goto put_anon;
1240 if (page_mapped(hpage)) {
1241 try_to_unmap(hpage,
1242 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1243 page_was_mapped = 1;
1246 if (!page_mapped(hpage))
1247 rc = move_to_new_page(new_hpage, hpage, mode);
1249 if (page_was_mapped)
1250 remove_migration_ptes(hpage,
1251 rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
1253 unlock_page(new_hpage);
1255 put_anon:
1256 if (anon_vma)
1257 put_anon_vma(anon_vma);
1259 if (rc == MIGRATEPAGE_SUCCESS) {
1260 hugetlb_cgroup_migrate(hpage, new_hpage);
1261 put_new_page = NULL;
1262 set_page_owner_migrate_reason(new_hpage, reason);
1265 unlock_page(hpage);
1266 out:
1267 if (rc != -EAGAIN)
1268 putback_active_hugepage(hpage);
1271 * If migration was not successful and there's a freeing callback, use
1272 * it. Otherwise, put_page() will drop the reference grabbed during
1273 * isolation.
1275 if (put_new_page)
1276 put_new_page(new_hpage, private);
1277 else
1278 putback_active_hugepage(new_hpage);
1280 if (result) {
1281 if (rc)
1282 *result = rc;
1283 else
1284 *result = page_to_nid(new_hpage);
1286 return rc;
1290 * migrate_pages - migrate the pages specified in a list, to the free pages
1291 * supplied as the target for the page migration
1293 * @from: The list of pages to be migrated.
1294 * @get_new_page: The function used to allocate free pages to be used
1295 * as the target of the page migration.
1296 * @put_new_page: The function used to free target pages if migration
1297 * fails, or NULL if no special handling is necessary.
1298 * @private: Private data to be passed on to get_new_page()
1299 * @mode: The migration mode that specifies the constraints for
1300 * page migration, if any.
1301 * @reason: The reason for page migration.
1303 * The function returns after 10 attempts or if no pages are movable any more
1304 * because the list has become empty or no retryable pages exist any more.
1305 * The caller should call putback_movable_pages() to return pages to the LRU
1306 * or free list only if ret != 0.
1308 * Returns the number of pages that were not migrated, or an error code.
1310 int migrate_pages(struct list_head *from, new_page_t get_new_page,
1311 free_page_t put_new_page, unsigned long private,
1312 enum migrate_mode mode, int reason)
1314 int retry = 1;
1315 int nr_failed = 0;
1316 int nr_succeeded = 0;
1317 int pass = 0;
1318 struct page *page;
1319 struct page *page2;
1320 int swapwrite = current->flags & PF_SWAPWRITE;
1321 int rc;
1323 if (!swapwrite)
1324 current->flags |= PF_SWAPWRITE;
1326 for(pass = 0; pass < 10 && retry; pass++) {
1327 retry = 0;
1329 list_for_each_entry_safe(page, page2, from, lru) {
1330 cond_resched();
1332 if (PageHuge(page))
1333 rc = unmap_and_move_huge_page(get_new_page,
1334 put_new_page, private, page,
1335 pass > 2, mode, reason);
1336 else
1337 rc = unmap_and_move(get_new_page, put_new_page,
1338 private, page, pass > 2, mode,
1339 reason);
1341 switch(rc) {
1342 case -ENOMEM:
1343 nr_failed++;
1344 goto out;
1345 case -EAGAIN:
1346 retry++;
1347 break;
1348 case MIGRATEPAGE_SUCCESS:
1349 nr_succeeded++;
1350 break;
1351 default:
1353 * Permanent failure (-EBUSY, -ENOSYS, etc.):
1354 * unlike -EAGAIN case, the failed page is
1355 * removed from migration page list and not
1356 * retried in the next outer loop.
1358 nr_failed++;
1359 break;
1363 nr_failed += retry;
1364 rc = nr_failed;
1365 out:
1366 if (nr_succeeded)
1367 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1368 if (nr_failed)
1369 count_vm_events(PGMIGRATE_FAIL, nr_failed);
1370 trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
1372 if (!swapwrite)
1373 current->flags &= ~PF_SWAPWRITE;
1375 return rc;
1378 #ifdef CONFIG_NUMA
1380 * Move a list of individual pages
1382 struct page_to_node {
1383 unsigned long addr;
1384 struct page *page;
1385 int node;
1386 int status;
1389 static struct page *new_page_node(struct page *p, unsigned long private,
1390 int **result)
1392 struct page_to_node *pm = (struct page_to_node *)private;
1394 while (pm->node != MAX_NUMNODES && pm->page != p)
1395 pm++;
1397 if (pm->node == MAX_NUMNODES)
1398 return NULL;
1400 *result = &pm->status;
1402 if (PageHuge(p))
1403 return alloc_huge_page_node(page_hstate(compound_head(p)),
1404 pm->node);
1405 else
1406 return __alloc_pages_node(pm->node,
1407 GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
1411 * Move a set of pages as indicated in the pm array. The addr
1412 * field must be set to the virtual address of the page to be moved
1413 * and the node number must contain a valid target node.
1414 * The pm array ends with node = MAX_NUMNODES.
1416 static int do_move_page_to_node_array(struct mm_struct *mm,
1417 struct page_to_node *pm,
1418 int migrate_all)
1420 int err;
1421 struct page_to_node *pp;
1422 LIST_HEAD(pagelist);
1424 down_read(&mm->mmap_sem);
1427 * Build a list of pages to migrate
1429 for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
1430 struct vm_area_struct *vma;
1431 struct page *page;
1433 err = -EFAULT;
1434 vma = find_vma(mm, pp->addr);
1435 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
1436 goto set_status;
1438 /* FOLL_DUMP to ignore special (like zero) pages */
1439 page = follow_page(vma, pp->addr,
1440 FOLL_GET | FOLL_SPLIT | FOLL_DUMP);
1442 err = PTR_ERR(page);
1443 if (IS_ERR(page))
1444 goto set_status;
1446 err = -ENOENT;
1447 if (!page)
1448 goto set_status;
1450 pp->page = page;
1451 err = page_to_nid(page);
1453 if (err == pp->node)
1455 * Node already in the right place
1457 goto put_and_set;
1459 err = -EACCES;
1460 if (page_mapcount(page) > 1 &&
1461 !migrate_all)
1462 goto put_and_set;
1464 if (PageHuge(page)) {
1465 if (PageHead(page))
1466 isolate_huge_page(page, &pagelist);
1467 goto put_and_set;
1470 err = isolate_lru_page(page);
1471 if (!err) {
1472 list_add_tail(&page->lru, &pagelist);
1473 inc_node_page_state(page, NR_ISOLATED_ANON +
1474 page_is_file_cache(page));
1476 put_and_set:
1478 * Either remove the duplicate refcount from
1479 * isolate_lru_page() or drop the page ref if it was
1480 * not isolated.
1482 put_page(page);
1483 set_status:
1484 pp->status = err;
1487 err = 0;
1488 if (!list_empty(&pagelist)) {
1489 err = migrate_pages(&pagelist, new_page_node, NULL,
1490 (unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
1491 if (err)
1492 putback_movable_pages(&pagelist);
1495 up_read(&mm->mmap_sem);
1496 return err;
1500 * Migrate an array of page address onto an array of nodes and fill
1501 * the corresponding array of status.
1503 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1504 unsigned long nr_pages,
1505 const void __user * __user *pages,
1506 const int __user *nodes,
1507 int __user *status, int flags)
1509 struct page_to_node *pm;
1510 unsigned long chunk_nr_pages;
1511 unsigned long chunk_start;
1512 int err;
1514 err = -ENOMEM;
1515 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
1516 if (!pm)
1517 goto out;
1519 migrate_prep();
1522 * Store a chunk of page_to_node array in a page,
1523 * but keep the last one as a marker
1525 chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
1527 for (chunk_start = 0;
1528 chunk_start < nr_pages;
1529 chunk_start += chunk_nr_pages) {
1530 int j;
1532 if (chunk_start + chunk_nr_pages > nr_pages)
1533 chunk_nr_pages = nr_pages - chunk_start;
1535 /* fill the chunk pm with addrs and nodes from user-space */
1536 for (j = 0; j < chunk_nr_pages; j++) {
1537 const void __user *p;
1538 int node;
1540 err = -EFAULT;
1541 if (get_user(p, pages + j + chunk_start))
1542 goto out_pm;
1543 pm[j].addr = (unsigned long) p;
1545 if (get_user(node, nodes + j + chunk_start))
1546 goto out_pm;
1548 err = -ENODEV;
1549 if (node < 0 || node >= MAX_NUMNODES)
1550 goto out_pm;
1552 if (!node_state(node, N_MEMORY))
1553 goto out_pm;
1555 err = -EACCES;
1556 if (!node_isset(node, task_nodes))
1557 goto out_pm;
1559 pm[j].node = node;
1562 /* End marker for this chunk */
1563 pm[chunk_nr_pages].node = MAX_NUMNODES;
1565 /* Migrate this chunk */
1566 err = do_move_page_to_node_array(mm, pm,
1567 flags & MPOL_MF_MOVE_ALL);
1568 if (err < 0)
1569 goto out_pm;
1571 /* Return status information */
1572 for (j = 0; j < chunk_nr_pages; j++)
1573 if (put_user(pm[j].status, status + j + chunk_start)) {
1574 err = -EFAULT;
1575 goto out_pm;
1578 err = 0;
1580 out_pm:
1581 free_page((unsigned long)pm);
1582 out:
1583 return err;
1587 * Determine the nodes of an array of pages and store it in an array of status.
1589 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1590 const void __user **pages, int *status)
1592 unsigned long i;
1594 down_read(&mm->mmap_sem);
1596 for (i = 0; i < nr_pages; i++) {
1597 unsigned long addr = (unsigned long)(*pages);
1598 struct vm_area_struct *vma;
1599 struct page *page;
1600 int err = -EFAULT;
1602 vma = find_vma(mm, addr);
1603 if (!vma || addr < vma->vm_start)
1604 goto set_status;
1606 /* FOLL_DUMP to ignore special (like zero) pages */
1607 page = follow_page(vma, addr, FOLL_DUMP);
1609 err = PTR_ERR(page);
1610 if (IS_ERR(page))
1611 goto set_status;
1613 err = page ? page_to_nid(page) : -ENOENT;
1614 set_status:
1615 *status = err;
1617 pages++;
1618 status++;
1621 up_read(&mm->mmap_sem);
1625 * Determine the nodes of a user array of pages and store it in
1626 * a user array of status.
1628 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1629 const void __user * __user *pages,
1630 int __user *status)
1632 #define DO_PAGES_STAT_CHUNK_NR 16
1633 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1634 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1636 while (nr_pages) {
1637 unsigned long chunk_nr;
1639 chunk_nr = nr_pages;
1640 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1641 chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1643 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1644 break;
1646 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1648 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1649 break;
1651 pages += chunk_nr;
1652 status += chunk_nr;
1653 nr_pages -= chunk_nr;
1655 return nr_pages ? -EFAULT : 0;
1659 * Move a list of pages in the address space of the currently executing
1660 * process.
1662 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1663 const void __user * __user *, pages,
1664 const int __user *, nodes,
1665 int __user *, status, int, flags)
1667 struct task_struct *task;
1668 struct mm_struct *mm;
1669 int err;
1670 nodemask_t task_nodes;
1672 /* Check flags */
1673 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1674 return -EINVAL;
1676 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1677 return -EPERM;
1679 /* Find the mm_struct */
1680 rcu_read_lock();
1681 task = pid ? find_task_by_vpid(pid) : current;
1682 if (!task) {
1683 rcu_read_unlock();
1684 return -ESRCH;
1686 get_task_struct(task);
1689 * Check if this process has the right to modify the specified
1690 * process. Use the regular "ptrace_may_access()" checks.
1692 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1693 rcu_read_unlock();
1694 err = -EPERM;
1695 goto out;
1697 rcu_read_unlock();
1699 err = security_task_movememory(task);
1700 if (err)
1701 goto out;
1703 task_nodes = cpuset_mems_allowed(task);
1704 mm = get_task_mm(task);
1705 put_task_struct(task);
1707 if (!mm)
1708 return -EINVAL;
1710 if (nodes)
1711 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1712 nodes, status, flags);
1713 else
1714 err = do_pages_stat(mm, nr_pages, pages, status);
1716 mmput(mm);
1717 return err;
1719 out:
1720 put_task_struct(task);
1721 return err;
1724 #ifdef CONFIG_NUMA_BALANCING
1726 * Returns true if this is a safe migration target node for misplaced NUMA
1727 * pages. Currently it only checks the watermarks which crude
1729 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
1730 unsigned long nr_migrate_pages)
1732 int z;
1734 if (!pgdat_reclaimable(pgdat))
1735 return false;
1737 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1738 struct zone *zone = pgdat->node_zones + z;
1740 if (!populated_zone(zone))
1741 continue;
1743 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
1744 if (!zone_watermark_ok(zone, 0,
1745 high_wmark_pages(zone) +
1746 nr_migrate_pages,
1747 0, 0))
1748 continue;
1749 return true;
1751 return false;
1754 static struct page *alloc_misplaced_dst_page(struct page *page,
1755 unsigned long data,
1756 int **result)
1758 int nid = (int) data;
1759 struct page *newpage;
1761 newpage = __alloc_pages_node(nid,
1762 (GFP_HIGHUSER_MOVABLE |
1763 __GFP_THISNODE | __GFP_NOMEMALLOC |
1764 __GFP_NORETRY | __GFP_NOWARN) &
1765 ~__GFP_RECLAIM, 0);
1767 return newpage;
1771 * page migration rate limiting control.
1772 * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
1773 * window of time. Default here says do not migrate more than 1280M per second.
1775 static unsigned int migrate_interval_millisecs __read_mostly = 100;
1776 static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
1778 /* Returns true if the node is migrate rate-limited after the update */
1779 static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
1780 unsigned long nr_pages)
1783 * Rate-limit the amount of data that is being migrated to a node.
1784 * Optimal placement is no good if the memory bus is saturated and
1785 * all the time is being spent migrating!
1787 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
1788 spin_lock(&pgdat->numabalancing_migrate_lock);
1789 pgdat->numabalancing_migrate_nr_pages = 0;
1790 pgdat->numabalancing_migrate_next_window = jiffies +
1791 msecs_to_jiffies(migrate_interval_millisecs);
1792 spin_unlock(&pgdat->numabalancing_migrate_lock);
1794 if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
1795 trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
1796 nr_pages);
1797 return true;
1801 * This is an unlocked non-atomic update so errors are possible.
1802 * The consequences are failing to migrate when we potentiall should
1803 * have which is not severe enough to warrant locking. If it is ever
1804 * a problem, it can be converted to a per-cpu counter.
1806 pgdat->numabalancing_migrate_nr_pages += nr_pages;
1807 return false;
1810 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
1812 int page_lru;
1814 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
1816 /* Avoid migrating to a node that is nearly full */
1817 if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
1818 return 0;
1820 if (isolate_lru_page(page))
1821 return 0;
1824 * migrate_misplaced_transhuge_page() skips page migration's usual
1825 * check on page_count(), so we must do it here, now that the page
1826 * has been isolated: a GUP pin, or any other pin, prevents migration.
1827 * The expected page count is 3: 1 for page's mapcount and 1 for the
1828 * caller's pin and 1 for the reference taken by isolate_lru_page().
1830 if (PageTransHuge(page) && page_count(page) != 3) {
1831 putback_lru_page(page);
1832 return 0;
1835 page_lru = page_is_file_cache(page);
1836 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
1837 hpage_nr_pages(page));
1840 * Isolating the page has taken another reference, so the
1841 * caller's reference can be safely dropped without the page
1842 * disappearing underneath us during migration.
1844 put_page(page);
1845 return 1;
1848 bool pmd_trans_migrating(pmd_t pmd)
1850 struct page *page = pmd_page(pmd);
1851 return PageLocked(page);
1855 * Attempt to migrate a misplaced page to the specified destination
1856 * node. Caller is expected to have an elevated reference count on
1857 * the page that will be dropped by this function before returning.
1859 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
1860 int node)
1862 pg_data_t *pgdat = NODE_DATA(node);
1863 int isolated;
1864 int nr_remaining;
1865 LIST_HEAD(migratepages);
1868 * Don't migrate file pages that are mapped in multiple processes
1869 * with execute permissions as they are probably shared libraries.
1871 if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
1872 (vma->vm_flags & VM_EXEC))
1873 goto out;
1876 * Rate-limit the amount of data that is being migrated to a node.
1877 * Optimal placement is no good if the memory bus is saturated and
1878 * all the time is being spent migrating!
1880 if (numamigrate_update_ratelimit(pgdat, 1))
1881 goto out;
1883 isolated = numamigrate_isolate_page(pgdat, page);
1884 if (!isolated)
1885 goto out;
1887 list_add(&page->lru, &migratepages);
1888 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
1889 NULL, node, MIGRATE_ASYNC,
1890 MR_NUMA_MISPLACED);
1891 if (nr_remaining) {
1892 if (!list_empty(&migratepages)) {
1893 list_del(&page->lru);
1894 dec_node_page_state(page, NR_ISOLATED_ANON +
1895 page_is_file_cache(page));
1896 putback_lru_page(page);
1898 isolated = 0;
1899 } else
1900 count_vm_numa_event(NUMA_PAGE_MIGRATE);
1901 BUG_ON(!list_empty(&migratepages));
1902 return isolated;
1904 out:
1905 put_page(page);
1906 return 0;
1908 #endif /* CONFIG_NUMA_BALANCING */
1910 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1912 * Migrates a THP to a given target node. page must be locked and is unlocked
1913 * before returning.
1915 int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1916 struct vm_area_struct *vma,
1917 pmd_t *pmd, pmd_t entry,
1918 unsigned long address,
1919 struct page *page, int node)
1921 spinlock_t *ptl;
1922 pg_data_t *pgdat = NODE_DATA(node);
1923 int isolated = 0;
1924 struct page *new_page = NULL;
1925 int page_lru = page_is_file_cache(page);
1926 unsigned long mmun_start = address & HPAGE_PMD_MASK;
1927 unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
1928 pmd_t orig_entry;
1931 * Rate-limit the amount of data that is being migrated to a node.
1932 * Optimal placement is no good if the memory bus is saturated and
1933 * all the time is being spent migrating!
1935 if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
1936 goto out_dropref;
1938 new_page = alloc_pages_node(node,
1939 (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
1940 HPAGE_PMD_ORDER);
1941 if (!new_page)
1942 goto out_fail;
1943 prep_transhuge_page(new_page);
1945 isolated = numamigrate_isolate_page(pgdat, page);
1946 if (!isolated) {
1947 put_page(new_page);
1948 goto out_fail;
1951 * We are not sure a pending tlb flush here is for a huge page
1952 * mapping or not. Hence use the tlb range variant
1954 if (mm_tlb_flush_pending(mm))
1955 flush_tlb_range(vma, mmun_start, mmun_end);
1957 /* Prepare a page as a migration target */
1958 __SetPageLocked(new_page);
1959 __SetPageSwapBacked(new_page);
1961 /* anon mapping, we can simply copy page->mapping to the new page: */
1962 new_page->mapping = page->mapping;
1963 new_page->index = page->index;
1964 migrate_page_copy(new_page, page);
1965 WARN_ON(PageLRU(new_page));
1967 /* Recheck the target PMD */
1968 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1969 ptl = pmd_lock(mm, pmd);
1970 if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
1971 fail_putback:
1972 spin_unlock(ptl);
1973 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1975 /* Reverse changes made by migrate_page_copy() */
1976 if (TestClearPageActive(new_page))
1977 SetPageActive(page);
1978 if (TestClearPageUnevictable(new_page))
1979 SetPageUnevictable(page);
1981 unlock_page(new_page);
1982 put_page(new_page); /* Free it */
1984 /* Retake the callers reference and putback on LRU */
1985 get_page(page);
1986 putback_lru_page(page);
1987 mod_node_page_state(page_pgdat(page),
1988 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
1990 goto out_unlock;
1993 orig_entry = *pmd;
1994 entry = mk_huge_pmd(new_page, vma->vm_page_prot);
1995 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1998 * Clear the old entry under pagetable lock and establish the new PTE.
1999 * Any parallel GUP will either observe the old page blocking on the
2000 * page lock, block on the page table lock or observe the new page.
2001 * The SetPageUptodate on the new page and page_add_new_anon_rmap
2002 * guarantee the copy is visible before the pagetable update.
2004 flush_cache_range(vma, mmun_start, mmun_end);
2005 page_add_anon_rmap(new_page, vma, mmun_start, true);
2006 pmdp_huge_clear_flush_notify(vma, mmun_start, pmd);
2007 set_pmd_at(mm, mmun_start, pmd, entry);
2008 update_mmu_cache_pmd(vma, address, &entry);
2010 if (page_count(page) != 2) {
2011 set_pmd_at(mm, mmun_start, pmd, orig_entry);
2012 flush_pmd_tlb_range(vma, mmun_start, mmun_end);
2013 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
2014 update_mmu_cache_pmd(vma, address, &entry);
2015 page_remove_rmap(new_page, true);
2016 goto fail_putback;
2019 mlock_migrate_page(new_page, page);
2020 page_remove_rmap(page, true);
2021 set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
2023 spin_unlock(ptl);
2024 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2026 /* Take an "isolate" reference and put new page on the LRU. */
2027 get_page(new_page);
2028 putback_lru_page(new_page);
2030 unlock_page(new_page);
2031 unlock_page(page);
2032 put_page(page); /* Drop the rmap reference */
2033 put_page(page); /* Drop the LRU isolation reference */
2035 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
2036 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
2038 mod_node_page_state(page_pgdat(page),
2039 NR_ISOLATED_ANON + page_lru,
2040 -HPAGE_PMD_NR);
2041 return isolated;
2043 out_fail:
2044 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
2045 out_dropref:
2046 ptl = pmd_lock(mm, pmd);
2047 if (pmd_same(*pmd, entry)) {
2048 entry = pmd_modify(entry, vma->vm_page_prot);
2049 set_pmd_at(mm, mmun_start, pmd, entry);
2050 update_mmu_cache_pmd(vma, address, &entry);
2052 spin_unlock(ptl);
2054 out_unlock:
2055 unlock_page(page);
2056 put_page(page);
2057 return 0;
2059 #endif /* CONFIG_NUMA_BALANCING */
2061 #endif /* CONFIG_NUMA */