perf kmem: Perform some cleanup if '--time' is given an invalid value
[linux/fpc-iii.git] / mm / compaction.c
blob03d31a87534160f23eadb0aefeb48feca89191ab
1 /*
2 * linux/mm/compaction.c
4 * Memory compaction for the reduction of external fragmentation. Note that
5 * this heavily depends upon page migration to do all the real heavy
6 * lifting
8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9 */
10 #include <linux/cpu.h>
11 #include <linux/swap.h>
12 #include <linux/migrate.h>
13 #include <linux/compaction.h>
14 #include <linux/mm_inline.h>
15 #include <linux/sched/signal.h>
16 #include <linux/backing-dev.h>
17 #include <linux/sysctl.h>
18 #include <linux/sysfs.h>
19 #include <linux/page-isolation.h>
20 #include <linux/kasan.h>
21 #include <linux/kthread.h>
22 #include <linux/freezer.h>
23 #include <linux/page_owner.h>
24 #include "internal.h"
26 #ifdef CONFIG_COMPACTION
27 static inline void count_compact_event(enum vm_event_item item)
29 count_vm_event(item);
32 static inline void count_compact_events(enum vm_event_item item, long delta)
34 count_vm_events(item, delta);
36 #else
37 #define count_compact_event(item) do { } while (0)
38 #define count_compact_events(item, delta) do { } while (0)
39 #endif
41 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
43 #define CREATE_TRACE_POINTS
44 #include <trace/events/compaction.h>
46 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
47 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
48 #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order)
49 #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order)
51 static unsigned long release_freepages(struct list_head *freelist)
53 struct page *page, *next;
54 unsigned long high_pfn = 0;
56 list_for_each_entry_safe(page, next, freelist, lru) {
57 unsigned long pfn = page_to_pfn(page);
58 list_del(&page->lru);
59 __free_page(page);
60 if (pfn > high_pfn)
61 high_pfn = pfn;
64 return high_pfn;
67 static void map_pages(struct list_head *list)
69 unsigned int i, order, nr_pages;
70 struct page *page, *next;
71 LIST_HEAD(tmp_list);
73 list_for_each_entry_safe(page, next, list, lru) {
74 list_del(&page->lru);
76 order = page_private(page);
77 nr_pages = 1 << order;
79 post_alloc_hook(page, order, __GFP_MOVABLE);
80 if (order)
81 split_page(page, order);
83 for (i = 0; i < nr_pages; i++) {
84 list_add(&page->lru, &tmp_list);
85 page++;
89 list_splice(&tmp_list, list);
92 #ifdef CONFIG_COMPACTION
94 int PageMovable(struct page *page)
96 struct address_space *mapping;
98 VM_BUG_ON_PAGE(!PageLocked(page), page);
99 if (!__PageMovable(page))
100 return 0;
102 mapping = page_mapping(page);
103 if (mapping && mapping->a_ops && mapping->a_ops->isolate_page)
104 return 1;
106 return 0;
108 EXPORT_SYMBOL(PageMovable);
110 void __SetPageMovable(struct page *page, struct address_space *mapping)
112 VM_BUG_ON_PAGE(!PageLocked(page), page);
113 VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
114 page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
116 EXPORT_SYMBOL(__SetPageMovable);
118 void __ClearPageMovable(struct page *page)
120 VM_BUG_ON_PAGE(!PageLocked(page), page);
121 VM_BUG_ON_PAGE(!PageMovable(page), page);
123 * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE
124 * flag so that VM can catch up released page by driver after isolation.
125 * With it, VM migration doesn't try to put it back.
127 page->mapping = (void *)((unsigned long)page->mapping &
128 PAGE_MAPPING_MOVABLE);
130 EXPORT_SYMBOL(__ClearPageMovable);
132 /* Do not skip compaction more than 64 times */
133 #define COMPACT_MAX_DEFER_SHIFT 6
136 * Compaction is deferred when compaction fails to result in a page
137 * allocation success. 1 << compact_defer_limit compactions are skipped up
138 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
140 void defer_compaction(struct zone *zone, int order)
142 zone->compact_considered = 0;
143 zone->compact_defer_shift++;
145 if (order < zone->compact_order_failed)
146 zone->compact_order_failed = order;
148 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
149 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
151 trace_mm_compaction_defer_compaction(zone, order);
154 /* Returns true if compaction should be skipped this time */
155 bool compaction_deferred(struct zone *zone, int order)
157 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
159 if (order < zone->compact_order_failed)
160 return false;
162 /* Avoid possible overflow */
163 if (++zone->compact_considered > defer_limit)
164 zone->compact_considered = defer_limit;
166 if (zone->compact_considered >= defer_limit)
167 return false;
169 trace_mm_compaction_deferred(zone, order);
171 return true;
175 * Update defer tracking counters after successful compaction of given order,
176 * which means an allocation either succeeded (alloc_success == true) or is
177 * expected to succeed.
179 void compaction_defer_reset(struct zone *zone, int order,
180 bool alloc_success)
182 if (alloc_success) {
183 zone->compact_considered = 0;
184 zone->compact_defer_shift = 0;
186 if (order >= zone->compact_order_failed)
187 zone->compact_order_failed = order + 1;
189 trace_mm_compaction_defer_reset(zone, order);
192 /* Returns true if restarting compaction after many failures */
193 bool compaction_restarting(struct zone *zone, int order)
195 if (order < zone->compact_order_failed)
196 return false;
198 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
199 zone->compact_considered >= 1UL << zone->compact_defer_shift;
202 /* Returns true if the pageblock should be scanned for pages to isolate. */
203 static inline bool isolation_suitable(struct compact_control *cc,
204 struct page *page)
206 if (cc->ignore_skip_hint)
207 return true;
209 return !get_pageblock_skip(page);
212 static void reset_cached_positions(struct zone *zone)
214 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
215 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
216 zone->compact_cached_free_pfn =
217 pageblock_start_pfn(zone_end_pfn(zone) - 1);
221 * This function is called to clear all cached information on pageblocks that
222 * should be skipped for page isolation when the migrate and free page scanner
223 * meet.
225 static void __reset_isolation_suitable(struct zone *zone)
227 unsigned long start_pfn = zone->zone_start_pfn;
228 unsigned long end_pfn = zone_end_pfn(zone);
229 unsigned long pfn;
231 zone->compact_blockskip_flush = false;
233 /* Walk the zone and mark every pageblock as suitable for isolation */
234 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
235 struct page *page;
237 cond_resched();
239 page = pfn_to_online_page(pfn);
240 if (!page)
241 continue;
242 if (zone != page_zone(page))
243 continue;
245 clear_pageblock_skip(page);
248 reset_cached_positions(zone);
251 void reset_isolation_suitable(pg_data_t *pgdat)
253 int zoneid;
255 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
256 struct zone *zone = &pgdat->node_zones[zoneid];
257 if (!populated_zone(zone))
258 continue;
260 /* Only flush if a full compaction finished recently */
261 if (zone->compact_blockskip_flush)
262 __reset_isolation_suitable(zone);
267 * If no pages were isolated then mark this pageblock to be skipped in the
268 * future. The information is later cleared by __reset_isolation_suitable().
270 static void update_pageblock_skip(struct compact_control *cc,
271 struct page *page, unsigned long nr_isolated,
272 bool migrate_scanner)
274 struct zone *zone = cc->zone;
275 unsigned long pfn;
277 if (cc->ignore_skip_hint)
278 return;
280 if (!page)
281 return;
283 if (nr_isolated)
284 return;
286 set_pageblock_skip(page);
288 pfn = page_to_pfn(page);
290 /* Update where async and sync compaction should restart */
291 if (migrate_scanner) {
292 if (pfn > zone->compact_cached_migrate_pfn[0])
293 zone->compact_cached_migrate_pfn[0] = pfn;
294 if (cc->mode != MIGRATE_ASYNC &&
295 pfn > zone->compact_cached_migrate_pfn[1])
296 zone->compact_cached_migrate_pfn[1] = pfn;
297 } else {
298 if (pfn < zone->compact_cached_free_pfn)
299 zone->compact_cached_free_pfn = pfn;
302 #else
303 static inline bool isolation_suitable(struct compact_control *cc,
304 struct page *page)
306 return true;
309 static void update_pageblock_skip(struct compact_control *cc,
310 struct page *page, unsigned long nr_isolated,
311 bool migrate_scanner)
314 #endif /* CONFIG_COMPACTION */
317 * Compaction requires the taking of some coarse locks that are potentially
318 * very heavily contended. For async compaction, back out if the lock cannot
319 * be taken immediately. For sync compaction, spin on the lock if needed.
321 * Returns true if the lock is held
322 * Returns false if the lock is not held and compaction should abort
324 static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
325 struct compact_control *cc)
327 if (cc->mode == MIGRATE_ASYNC) {
328 if (!spin_trylock_irqsave(lock, *flags)) {
329 cc->contended = true;
330 return false;
332 } else {
333 spin_lock_irqsave(lock, *flags);
336 return true;
340 * Compaction requires the taking of some coarse locks that are potentially
341 * very heavily contended. The lock should be periodically unlocked to avoid
342 * having disabled IRQs for a long time, even when there is nobody waiting on
343 * the lock. It might also be that allowing the IRQs will result in
344 * need_resched() becoming true. If scheduling is needed, async compaction
345 * aborts. Sync compaction schedules.
346 * Either compaction type will also abort if a fatal signal is pending.
347 * In either case if the lock was locked, it is dropped and not regained.
349 * Returns true if compaction should abort due to fatal signal pending, or
350 * async compaction due to need_resched()
351 * Returns false when compaction can continue (sync compaction might have
352 * scheduled)
354 static bool compact_unlock_should_abort(spinlock_t *lock,
355 unsigned long flags, bool *locked, struct compact_control *cc)
357 if (*locked) {
358 spin_unlock_irqrestore(lock, flags);
359 *locked = false;
362 if (fatal_signal_pending(current)) {
363 cc->contended = true;
364 return true;
367 if (need_resched()) {
368 if (cc->mode == MIGRATE_ASYNC) {
369 cc->contended = true;
370 return true;
372 cond_resched();
375 return false;
379 * Aside from avoiding lock contention, compaction also periodically checks
380 * need_resched() and either schedules in sync compaction or aborts async
381 * compaction. This is similar to what compact_unlock_should_abort() does, but
382 * is used where no lock is concerned.
384 * Returns false when no scheduling was needed, or sync compaction scheduled.
385 * Returns true when async compaction should abort.
387 static inline bool compact_should_abort(struct compact_control *cc)
389 /* async compaction aborts if contended */
390 if (need_resched()) {
391 if (cc->mode == MIGRATE_ASYNC) {
392 cc->contended = true;
393 return true;
396 cond_resched();
399 return false;
403 * Isolate free pages onto a private freelist. If @strict is true, will abort
404 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
405 * (even though it may still end up isolating some pages).
407 static unsigned long isolate_freepages_block(struct compact_control *cc,
408 unsigned long *start_pfn,
409 unsigned long end_pfn,
410 struct list_head *freelist,
411 bool strict)
413 int nr_scanned = 0, total_isolated = 0;
414 struct page *cursor, *valid_page = NULL;
415 unsigned long flags = 0;
416 bool locked = false;
417 unsigned long blockpfn = *start_pfn;
418 unsigned int order;
420 cursor = pfn_to_page(blockpfn);
422 /* Isolate free pages. */
423 for (; blockpfn < end_pfn; blockpfn++, cursor++) {
424 int isolated;
425 struct page *page = cursor;
428 * Periodically drop the lock (if held) regardless of its
429 * contention, to give chance to IRQs. Abort if fatal signal
430 * pending or async compaction detects need_resched()
432 if (!(blockpfn % SWAP_CLUSTER_MAX)
433 && compact_unlock_should_abort(&cc->zone->lock, flags,
434 &locked, cc))
435 break;
437 nr_scanned++;
438 if (!pfn_valid_within(blockpfn))
439 goto isolate_fail;
441 if (!valid_page)
442 valid_page = page;
445 * For compound pages such as THP and hugetlbfs, we can save
446 * potentially a lot of iterations if we skip them at once.
447 * The check is racy, but we can consider only valid values
448 * and the only danger is skipping too much.
450 if (PageCompound(page)) {
451 unsigned int comp_order = compound_order(page);
453 if (likely(comp_order < MAX_ORDER)) {
454 blockpfn += (1UL << comp_order) - 1;
455 cursor += (1UL << comp_order) - 1;
458 goto isolate_fail;
461 if (!PageBuddy(page))
462 goto isolate_fail;
465 * If we already hold the lock, we can skip some rechecking.
466 * Note that if we hold the lock now, checked_pageblock was
467 * already set in some previous iteration (or strict is true),
468 * so it is correct to skip the suitable migration target
469 * recheck as well.
471 if (!locked) {
473 * The zone lock must be held to isolate freepages.
474 * Unfortunately this is a very coarse lock and can be
475 * heavily contended if there are parallel allocations
476 * or parallel compactions. For async compaction do not
477 * spin on the lock and we acquire the lock as late as
478 * possible.
480 locked = compact_trylock_irqsave(&cc->zone->lock,
481 &flags, cc);
482 if (!locked)
483 break;
485 /* Recheck this is a buddy page under lock */
486 if (!PageBuddy(page))
487 goto isolate_fail;
490 /* Found a free page, will break it into order-0 pages */
491 order = page_order(page);
492 isolated = __isolate_free_page(page, order);
493 if (!isolated)
494 break;
495 set_page_private(page, order);
497 total_isolated += isolated;
498 cc->nr_freepages += isolated;
499 list_add_tail(&page->lru, freelist);
501 if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
502 blockpfn += isolated;
503 break;
505 /* Advance to the end of split page */
506 blockpfn += isolated - 1;
507 cursor += isolated - 1;
508 continue;
510 isolate_fail:
511 if (strict)
512 break;
513 else
514 continue;
518 if (locked)
519 spin_unlock_irqrestore(&cc->zone->lock, flags);
522 * There is a tiny chance that we have read bogus compound_order(),
523 * so be careful to not go outside of the pageblock.
525 if (unlikely(blockpfn > end_pfn))
526 blockpfn = end_pfn;
528 trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
529 nr_scanned, total_isolated);
531 /* Record how far we have got within the block */
532 *start_pfn = blockpfn;
535 * If strict isolation is requested by CMA then check that all the
536 * pages requested were isolated. If there were any failures, 0 is
537 * returned and CMA will fail.
539 if (strict && blockpfn < end_pfn)
540 total_isolated = 0;
542 /* Update the pageblock-skip if the whole pageblock was scanned */
543 if (blockpfn == end_pfn)
544 update_pageblock_skip(cc, valid_page, total_isolated, false);
546 cc->total_free_scanned += nr_scanned;
547 if (total_isolated)
548 count_compact_events(COMPACTISOLATED, total_isolated);
549 return total_isolated;
553 * isolate_freepages_range() - isolate free pages.
554 * @start_pfn: The first PFN to start isolating.
555 * @end_pfn: The one-past-last PFN.
557 * Non-free pages, invalid PFNs, or zone boundaries within the
558 * [start_pfn, end_pfn) range are considered errors, cause function to
559 * undo its actions and return zero.
561 * Otherwise, function returns one-past-the-last PFN of isolated page
562 * (which may be greater then end_pfn if end fell in a middle of
563 * a free page).
565 unsigned long
566 isolate_freepages_range(struct compact_control *cc,
567 unsigned long start_pfn, unsigned long end_pfn)
569 unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
570 LIST_HEAD(freelist);
572 pfn = start_pfn;
573 block_start_pfn = pageblock_start_pfn(pfn);
574 if (block_start_pfn < cc->zone->zone_start_pfn)
575 block_start_pfn = cc->zone->zone_start_pfn;
576 block_end_pfn = pageblock_end_pfn(pfn);
578 for (; pfn < end_pfn; pfn += isolated,
579 block_start_pfn = block_end_pfn,
580 block_end_pfn += pageblock_nr_pages) {
581 /* Protect pfn from changing by isolate_freepages_block */
582 unsigned long isolate_start_pfn = pfn;
584 block_end_pfn = min(block_end_pfn, end_pfn);
587 * pfn could pass the block_end_pfn if isolated freepage
588 * is more than pageblock order. In this case, we adjust
589 * scanning range to right one.
591 if (pfn >= block_end_pfn) {
592 block_start_pfn = pageblock_start_pfn(pfn);
593 block_end_pfn = pageblock_end_pfn(pfn);
594 block_end_pfn = min(block_end_pfn, end_pfn);
597 if (!pageblock_pfn_to_page(block_start_pfn,
598 block_end_pfn, cc->zone))
599 break;
601 isolated = isolate_freepages_block(cc, &isolate_start_pfn,
602 block_end_pfn, &freelist, true);
605 * In strict mode, isolate_freepages_block() returns 0 if
606 * there are any holes in the block (ie. invalid PFNs or
607 * non-free pages).
609 if (!isolated)
610 break;
613 * If we managed to isolate pages, it is always (1 << n) *
614 * pageblock_nr_pages for some non-negative n. (Max order
615 * page may span two pageblocks).
619 /* __isolate_free_page() does not map the pages */
620 map_pages(&freelist);
622 if (pfn < end_pfn) {
623 /* Loop terminated early, cleanup. */
624 release_freepages(&freelist);
625 return 0;
628 /* We don't use freelists for anything. */
629 return pfn;
632 /* Similar to reclaim, but different enough that they don't share logic */
633 static bool too_many_isolated(struct zone *zone)
635 unsigned long active, inactive, isolated;
637 inactive = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) +
638 node_page_state(zone->zone_pgdat, NR_INACTIVE_ANON);
639 active = node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE) +
640 node_page_state(zone->zone_pgdat, NR_ACTIVE_ANON);
641 isolated = node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE) +
642 node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON);
644 return isolated > (inactive + active) / 2;
648 * isolate_migratepages_block() - isolate all migrate-able pages within
649 * a single pageblock
650 * @cc: Compaction control structure.
651 * @low_pfn: The first PFN to isolate
652 * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock
653 * @isolate_mode: Isolation mode to be used.
655 * Isolate all pages that can be migrated from the range specified by
656 * [low_pfn, end_pfn). The range is expected to be within same pageblock.
657 * Returns zero if there is a fatal signal pending, otherwise PFN of the
658 * first page that was not scanned (which may be both less, equal to or more
659 * than end_pfn).
661 * The pages are isolated on cc->migratepages list (not required to be empty),
662 * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
663 * is neither read nor updated.
665 static unsigned long
666 isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
667 unsigned long end_pfn, isolate_mode_t isolate_mode)
669 struct zone *zone = cc->zone;
670 unsigned long nr_scanned = 0, nr_isolated = 0;
671 struct lruvec *lruvec;
672 unsigned long flags = 0;
673 bool locked = false;
674 struct page *page = NULL, *valid_page = NULL;
675 unsigned long start_pfn = low_pfn;
676 bool skip_on_failure = false;
677 unsigned long next_skip_pfn = 0;
680 * Ensure that there are not too many pages isolated from the LRU
681 * list by either parallel reclaimers or compaction. If there are,
682 * delay for some time until fewer pages are isolated
684 while (unlikely(too_many_isolated(zone))) {
685 /* async migration should just abort */
686 if (cc->mode == MIGRATE_ASYNC)
687 return 0;
689 congestion_wait(BLK_RW_ASYNC, HZ/10);
691 if (fatal_signal_pending(current))
692 return 0;
695 if (compact_should_abort(cc))
696 return 0;
698 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
699 skip_on_failure = true;
700 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
703 /* Time to isolate some pages for migration */
704 for (; low_pfn < end_pfn; low_pfn++) {
706 if (skip_on_failure && low_pfn >= next_skip_pfn) {
708 * We have isolated all migration candidates in the
709 * previous order-aligned block, and did not skip it due
710 * to failure. We should migrate the pages now and
711 * hopefully succeed compaction.
713 if (nr_isolated)
714 break;
717 * We failed to isolate in the previous order-aligned
718 * block. Set the new boundary to the end of the
719 * current block. Note we can't simply increase
720 * next_skip_pfn by 1 << order, as low_pfn might have
721 * been incremented by a higher number due to skipping
722 * a compound or a high-order buddy page in the
723 * previous loop iteration.
725 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
729 * Periodically drop the lock (if held) regardless of its
730 * contention, to give chance to IRQs. Abort async compaction
731 * if contended.
733 if (!(low_pfn % SWAP_CLUSTER_MAX)
734 && compact_unlock_should_abort(zone_lru_lock(zone), flags,
735 &locked, cc))
736 break;
738 if (!pfn_valid_within(low_pfn))
739 goto isolate_fail;
740 nr_scanned++;
742 page = pfn_to_page(low_pfn);
744 if (!valid_page)
745 valid_page = page;
748 * Skip if free. We read page order here without zone lock
749 * which is generally unsafe, but the race window is small and
750 * the worst thing that can happen is that we skip some
751 * potential isolation targets.
753 if (PageBuddy(page)) {
754 unsigned long freepage_order = page_order_unsafe(page);
757 * Without lock, we cannot be sure that what we got is
758 * a valid page order. Consider only values in the
759 * valid order range to prevent low_pfn overflow.
761 if (freepage_order > 0 && freepage_order < MAX_ORDER)
762 low_pfn += (1UL << freepage_order) - 1;
763 continue;
767 * Regardless of being on LRU, compound pages such as THP and
768 * hugetlbfs are not to be compacted. We can potentially save
769 * a lot of iterations if we skip them at once. The check is
770 * racy, but we can consider only valid values and the only
771 * danger is skipping too much.
773 if (PageCompound(page)) {
774 unsigned int comp_order = compound_order(page);
776 if (likely(comp_order < MAX_ORDER))
777 low_pfn += (1UL << comp_order) - 1;
779 goto isolate_fail;
783 * Check may be lockless but that's ok as we recheck later.
784 * It's possible to migrate LRU and non-lru movable pages.
785 * Skip any other type of page
787 if (!PageLRU(page)) {
789 * __PageMovable can return false positive so we need
790 * to verify it under page_lock.
792 if (unlikely(__PageMovable(page)) &&
793 !PageIsolated(page)) {
794 if (locked) {
795 spin_unlock_irqrestore(zone_lru_lock(zone),
796 flags);
797 locked = false;
800 if (!isolate_movable_page(page, isolate_mode))
801 goto isolate_success;
804 goto isolate_fail;
808 * Migration will fail if an anonymous page is pinned in memory,
809 * so avoid taking lru_lock and isolating it unnecessarily in an
810 * admittedly racy check.
812 if (!page_mapping(page) &&
813 page_count(page) > page_mapcount(page))
814 goto isolate_fail;
817 * Only allow to migrate anonymous pages in GFP_NOFS context
818 * because those do not depend on fs locks.
820 if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page))
821 goto isolate_fail;
823 /* If we already hold the lock, we can skip some rechecking */
824 if (!locked) {
825 locked = compact_trylock_irqsave(zone_lru_lock(zone),
826 &flags, cc);
827 if (!locked)
828 break;
830 /* Recheck PageLRU and PageCompound under lock */
831 if (!PageLRU(page))
832 goto isolate_fail;
835 * Page become compound since the non-locked check,
836 * and it's on LRU. It can only be a THP so the order
837 * is safe to read and it's 0 for tail pages.
839 if (unlikely(PageCompound(page))) {
840 low_pfn += (1UL << compound_order(page)) - 1;
841 goto isolate_fail;
845 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
847 /* Try isolate the page */
848 if (__isolate_lru_page(page, isolate_mode) != 0)
849 goto isolate_fail;
851 VM_BUG_ON_PAGE(PageCompound(page), page);
853 /* Successfully isolated */
854 del_page_from_lru_list(page, lruvec, page_lru(page));
855 inc_node_page_state(page,
856 NR_ISOLATED_ANON + page_is_file_cache(page));
858 isolate_success:
859 list_add(&page->lru, &cc->migratepages);
860 cc->nr_migratepages++;
861 nr_isolated++;
864 * Record where we could have freed pages by migration and not
865 * yet flushed them to buddy allocator.
866 * - this is the lowest page that was isolated and likely be
867 * then freed by migration.
869 if (!cc->last_migrated_pfn)
870 cc->last_migrated_pfn = low_pfn;
872 /* Avoid isolating too much */
873 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
874 ++low_pfn;
875 break;
878 continue;
879 isolate_fail:
880 if (!skip_on_failure)
881 continue;
884 * We have isolated some pages, but then failed. Release them
885 * instead of migrating, as we cannot form the cc->order buddy
886 * page anyway.
888 if (nr_isolated) {
889 if (locked) {
890 spin_unlock_irqrestore(zone_lru_lock(zone), flags);
891 locked = false;
893 putback_movable_pages(&cc->migratepages);
894 cc->nr_migratepages = 0;
895 cc->last_migrated_pfn = 0;
896 nr_isolated = 0;
899 if (low_pfn < next_skip_pfn) {
900 low_pfn = next_skip_pfn - 1;
902 * The check near the loop beginning would have updated
903 * next_skip_pfn too, but this is a bit simpler.
905 next_skip_pfn += 1UL << cc->order;
910 * The PageBuddy() check could have potentially brought us outside
911 * the range to be scanned.
913 if (unlikely(low_pfn > end_pfn))
914 low_pfn = end_pfn;
916 if (locked)
917 spin_unlock_irqrestore(zone_lru_lock(zone), flags);
920 * Update the pageblock-skip information and cached scanner pfn,
921 * if the whole pageblock was scanned without isolating any page.
923 if (low_pfn == end_pfn)
924 update_pageblock_skip(cc, valid_page, nr_isolated, true);
926 trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
927 nr_scanned, nr_isolated);
929 cc->total_migrate_scanned += nr_scanned;
930 if (nr_isolated)
931 count_compact_events(COMPACTISOLATED, nr_isolated);
933 return low_pfn;
937 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
938 * @cc: Compaction control structure.
939 * @start_pfn: The first PFN to start isolating.
940 * @end_pfn: The one-past-last PFN.
942 * Returns zero if isolation fails fatally due to e.g. pending signal.
943 * Otherwise, function returns one-past-the-last PFN of isolated page
944 * (which may be greater than end_pfn if end fell in a middle of a THP page).
946 unsigned long
947 isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
948 unsigned long end_pfn)
950 unsigned long pfn, block_start_pfn, block_end_pfn;
952 /* Scan block by block. First and last block may be incomplete */
953 pfn = start_pfn;
954 block_start_pfn = pageblock_start_pfn(pfn);
955 if (block_start_pfn < cc->zone->zone_start_pfn)
956 block_start_pfn = cc->zone->zone_start_pfn;
957 block_end_pfn = pageblock_end_pfn(pfn);
959 for (; pfn < end_pfn; pfn = block_end_pfn,
960 block_start_pfn = block_end_pfn,
961 block_end_pfn += pageblock_nr_pages) {
963 block_end_pfn = min(block_end_pfn, end_pfn);
965 if (!pageblock_pfn_to_page(block_start_pfn,
966 block_end_pfn, cc->zone))
967 continue;
969 pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
970 ISOLATE_UNEVICTABLE);
972 if (!pfn)
973 break;
975 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
976 break;
979 return pfn;
982 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
983 #ifdef CONFIG_COMPACTION
985 static bool suitable_migration_source(struct compact_control *cc,
986 struct page *page)
988 int block_mt;
990 if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction)
991 return true;
993 block_mt = get_pageblock_migratetype(page);
995 if (cc->migratetype == MIGRATE_MOVABLE)
996 return is_migrate_movable(block_mt);
997 else
998 return block_mt == cc->migratetype;
1001 /* Returns true if the page is within a block suitable for migration to */
1002 static bool suitable_migration_target(struct compact_control *cc,
1003 struct page *page)
1005 /* If the page is a large free page, then disallow migration */
1006 if (PageBuddy(page)) {
1008 * We are checking page_order without zone->lock taken. But
1009 * the only small danger is that we skip a potentially suitable
1010 * pageblock, so it's not worth to check order for valid range.
1012 if (page_order_unsafe(page) >= pageblock_order)
1013 return false;
1016 if (cc->ignore_block_suitable)
1017 return true;
1019 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
1020 if (is_migrate_movable(get_pageblock_migratetype(page)))
1021 return true;
1023 /* Otherwise skip the block */
1024 return false;
1028 * Test whether the free scanner has reached the same or lower pageblock than
1029 * the migration scanner, and compaction should thus terminate.
1031 static inline bool compact_scanners_met(struct compact_control *cc)
1033 return (cc->free_pfn >> pageblock_order)
1034 <= (cc->migrate_pfn >> pageblock_order);
1038 * Based on information in the current compact_control, find blocks
1039 * suitable for isolating free pages from and then isolate them.
1041 static void isolate_freepages(struct compact_control *cc)
1043 struct zone *zone = cc->zone;
1044 struct page *page;
1045 unsigned long block_start_pfn; /* start of current pageblock */
1046 unsigned long isolate_start_pfn; /* exact pfn we start at */
1047 unsigned long block_end_pfn; /* end of current pageblock */
1048 unsigned long low_pfn; /* lowest pfn scanner is able to scan */
1049 struct list_head *freelist = &cc->freepages;
1052 * Initialise the free scanner. The starting point is where we last
1053 * successfully isolated from, zone-cached value, or the end of the
1054 * zone when isolating for the first time. For looping we also need
1055 * this pfn aligned down to the pageblock boundary, because we do
1056 * block_start_pfn -= pageblock_nr_pages in the for loop.
1057 * For ending point, take care when isolating in last pageblock of a
1058 * a zone which ends in the middle of a pageblock.
1059 * The low boundary is the end of the pageblock the migration scanner
1060 * is using.
1062 isolate_start_pfn = cc->free_pfn;
1063 block_start_pfn = pageblock_start_pfn(cc->free_pfn);
1064 block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
1065 zone_end_pfn(zone));
1066 low_pfn = pageblock_end_pfn(cc->migrate_pfn);
1069 * Isolate free pages until enough are available to migrate the
1070 * pages on cc->migratepages. We stop searching if the migrate
1071 * and free page scanners meet or enough free pages are isolated.
1073 for (; block_start_pfn >= low_pfn;
1074 block_end_pfn = block_start_pfn,
1075 block_start_pfn -= pageblock_nr_pages,
1076 isolate_start_pfn = block_start_pfn) {
1078 * This can iterate a massively long zone without finding any
1079 * suitable migration targets, so periodically check if we need
1080 * to schedule, or even abort async compaction.
1082 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
1083 && compact_should_abort(cc))
1084 break;
1086 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1087 zone);
1088 if (!page)
1089 continue;
1091 /* Check the block is suitable for migration */
1092 if (!suitable_migration_target(cc, page))
1093 continue;
1095 /* If isolation recently failed, do not retry */
1096 if (!isolation_suitable(cc, page))
1097 continue;
1099 /* Found a block suitable for isolating free pages from. */
1100 isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
1101 freelist, false);
1104 * If we isolated enough freepages, or aborted due to lock
1105 * contention, terminate.
1107 if ((cc->nr_freepages >= cc->nr_migratepages)
1108 || cc->contended) {
1109 if (isolate_start_pfn >= block_end_pfn) {
1111 * Restart at previous pageblock if more
1112 * freepages can be isolated next time.
1114 isolate_start_pfn =
1115 block_start_pfn - pageblock_nr_pages;
1117 break;
1118 } else if (isolate_start_pfn < block_end_pfn) {
1120 * If isolation failed early, do not continue
1121 * needlessly.
1123 break;
1127 /* __isolate_free_page() does not map the pages */
1128 map_pages(freelist);
1131 * Record where the free scanner will restart next time. Either we
1132 * broke from the loop and set isolate_start_pfn based on the last
1133 * call to isolate_freepages_block(), or we met the migration scanner
1134 * and the loop terminated due to isolate_start_pfn < low_pfn
1136 cc->free_pfn = isolate_start_pfn;
1140 * This is a migrate-callback that "allocates" freepages by taking pages
1141 * from the isolated freelists in the block we are migrating to.
1143 static struct page *compaction_alloc(struct page *migratepage,
1144 unsigned long data,
1145 int **result)
1147 struct compact_control *cc = (struct compact_control *)data;
1148 struct page *freepage;
1151 * Isolate free pages if necessary, and if we are not aborting due to
1152 * contention.
1154 if (list_empty(&cc->freepages)) {
1155 if (!cc->contended)
1156 isolate_freepages(cc);
1158 if (list_empty(&cc->freepages))
1159 return NULL;
1162 freepage = list_entry(cc->freepages.next, struct page, lru);
1163 list_del(&freepage->lru);
1164 cc->nr_freepages--;
1166 return freepage;
1170 * This is a migrate-callback that "frees" freepages back to the isolated
1171 * freelist. All pages on the freelist are from the same zone, so there is no
1172 * special handling needed for NUMA.
1174 static void compaction_free(struct page *page, unsigned long data)
1176 struct compact_control *cc = (struct compact_control *)data;
1178 list_add(&page->lru, &cc->freepages);
1179 cc->nr_freepages++;
1182 /* possible outcome of isolate_migratepages */
1183 typedef enum {
1184 ISOLATE_ABORT, /* Abort compaction now */
1185 ISOLATE_NONE, /* No pages isolated, continue scanning */
1186 ISOLATE_SUCCESS, /* Pages isolated, migrate */
1187 } isolate_migrate_t;
1190 * Allow userspace to control policy on scanning the unevictable LRU for
1191 * compactable pages.
1193 int sysctl_compact_unevictable_allowed __read_mostly = 1;
1196 * Isolate all pages that can be migrated from the first suitable block,
1197 * starting at the block pointed to by the migrate scanner pfn within
1198 * compact_control.
1200 static isolate_migrate_t isolate_migratepages(struct zone *zone,
1201 struct compact_control *cc)
1203 unsigned long block_start_pfn;
1204 unsigned long block_end_pfn;
1205 unsigned long low_pfn;
1206 struct page *page;
1207 const isolate_mode_t isolate_mode =
1208 (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
1209 (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
1212 * Start at where we last stopped, or beginning of the zone as
1213 * initialized by compact_zone()
1215 low_pfn = cc->migrate_pfn;
1216 block_start_pfn = pageblock_start_pfn(low_pfn);
1217 if (block_start_pfn < zone->zone_start_pfn)
1218 block_start_pfn = zone->zone_start_pfn;
1220 /* Only scan within a pageblock boundary */
1221 block_end_pfn = pageblock_end_pfn(low_pfn);
1224 * Iterate over whole pageblocks until we find the first suitable.
1225 * Do not cross the free scanner.
1227 for (; block_end_pfn <= cc->free_pfn;
1228 low_pfn = block_end_pfn,
1229 block_start_pfn = block_end_pfn,
1230 block_end_pfn += pageblock_nr_pages) {
1233 * This can potentially iterate a massively long zone with
1234 * many pageblocks unsuitable, so periodically check if we
1235 * need to schedule, or even abort async compaction.
1237 if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
1238 && compact_should_abort(cc))
1239 break;
1241 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1242 zone);
1243 if (!page)
1244 continue;
1246 /* If isolation recently failed, do not retry */
1247 if (!isolation_suitable(cc, page))
1248 continue;
1251 * For async compaction, also only scan in MOVABLE blocks.
1252 * Async compaction is optimistic to see if the minimum amount
1253 * of work satisfies the allocation.
1255 if (!suitable_migration_source(cc, page))
1256 continue;
1258 /* Perform the isolation */
1259 low_pfn = isolate_migratepages_block(cc, low_pfn,
1260 block_end_pfn, isolate_mode);
1262 if (!low_pfn || cc->contended)
1263 return ISOLATE_ABORT;
1266 * Either we isolated something and proceed with migration. Or
1267 * we failed and compact_zone should decide if we should
1268 * continue or not.
1270 break;
1273 /* Record where migration scanner will be restarted. */
1274 cc->migrate_pfn = low_pfn;
1276 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
1280 * order == -1 is expected when compacting via
1281 * /proc/sys/vm/compact_memory
1283 static inline bool is_via_compact_memory(int order)
1285 return order == -1;
1288 static enum compact_result __compact_finished(struct zone *zone,
1289 struct compact_control *cc)
1291 unsigned int order;
1292 const int migratetype = cc->migratetype;
1294 if (cc->contended || fatal_signal_pending(current))
1295 return COMPACT_CONTENDED;
1297 /* Compaction run completes if the migrate and free scanner meet */
1298 if (compact_scanners_met(cc)) {
1299 /* Let the next compaction start anew. */
1300 reset_cached_positions(zone);
1303 * Mark that the PG_migrate_skip information should be cleared
1304 * by kswapd when it goes to sleep. kcompactd does not set the
1305 * flag itself as the decision to be clear should be directly
1306 * based on an allocation request.
1308 if (cc->direct_compaction)
1309 zone->compact_blockskip_flush = true;
1311 if (cc->whole_zone)
1312 return COMPACT_COMPLETE;
1313 else
1314 return COMPACT_PARTIAL_SKIPPED;
1317 if (is_via_compact_memory(cc->order))
1318 return COMPACT_CONTINUE;
1320 if (cc->finishing_block) {
1322 * We have finished the pageblock, but better check again that
1323 * we really succeeded.
1325 if (IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages))
1326 cc->finishing_block = false;
1327 else
1328 return COMPACT_CONTINUE;
1331 /* Direct compactor: Is a suitable page free? */
1332 for (order = cc->order; order < MAX_ORDER; order++) {
1333 struct free_area *area = &zone->free_area[order];
1334 bool can_steal;
1336 /* Job done if page is free of the right migratetype */
1337 if (!list_empty(&area->free_list[migratetype]))
1338 return COMPACT_SUCCESS;
1340 #ifdef CONFIG_CMA
1341 /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
1342 if (migratetype == MIGRATE_MOVABLE &&
1343 !list_empty(&area->free_list[MIGRATE_CMA]))
1344 return COMPACT_SUCCESS;
1345 #endif
1347 * Job done if allocation would steal freepages from
1348 * other migratetype buddy lists.
1350 if (find_suitable_fallback(area, order, migratetype,
1351 true, &can_steal) != -1) {
1353 /* movable pages are OK in any pageblock */
1354 if (migratetype == MIGRATE_MOVABLE)
1355 return COMPACT_SUCCESS;
1358 * We are stealing for a non-movable allocation. Make
1359 * sure we finish compacting the current pageblock
1360 * first so it is as free as possible and we won't
1361 * have to steal another one soon. This only applies
1362 * to sync compaction, as async compaction operates
1363 * on pageblocks of the same migratetype.
1365 if (cc->mode == MIGRATE_ASYNC ||
1366 IS_ALIGNED(cc->migrate_pfn,
1367 pageblock_nr_pages)) {
1368 return COMPACT_SUCCESS;
1371 cc->finishing_block = true;
1372 return COMPACT_CONTINUE;
1376 return COMPACT_NO_SUITABLE_PAGE;
1379 static enum compact_result compact_finished(struct zone *zone,
1380 struct compact_control *cc)
1382 int ret;
1384 ret = __compact_finished(zone, cc);
1385 trace_mm_compaction_finished(zone, cc->order, ret);
1386 if (ret == COMPACT_NO_SUITABLE_PAGE)
1387 ret = COMPACT_CONTINUE;
1389 return ret;
1393 * compaction_suitable: Is this suitable to run compaction on this zone now?
1394 * Returns
1395 * COMPACT_SKIPPED - If there are too few free pages for compaction
1396 * COMPACT_SUCCESS - If the allocation would succeed without compaction
1397 * COMPACT_CONTINUE - If compaction should run now
1399 static enum compact_result __compaction_suitable(struct zone *zone, int order,
1400 unsigned int alloc_flags,
1401 int classzone_idx,
1402 unsigned long wmark_target)
1404 unsigned long watermark;
1406 if (is_via_compact_memory(order))
1407 return COMPACT_CONTINUE;
1409 watermark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1411 * If watermarks for high-order allocation are already met, there
1412 * should be no need for compaction at all.
1414 if (zone_watermark_ok(zone, order, watermark, classzone_idx,
1415 alloc_flags))
1416 return COMPACT_SUCCESS;
1419 * Watermarks for order-0 must be met for compaction to be able to
1420 * isolate free pages for migration targets. This means that the
1421 * watermark and alloc_flags have to match, or be more pessimistic than
1422 * the check in __isolate_free_page(). We don't use the direct
1423 * compactor's alloc_flags, as they are not relevant for freepage
1424 * isolation. We however do use the direct compactor's classzone_idx to
1425 * skip over zones where lowmem reserves would prevent allocation even
1426 * if compaction succeeds.
1427 * For costly orders, we require low watermark instead of min for
1428 * compaction to proceed to increase its chances.
1429 * ALLOC_CMA is used, as pages in CMA pageblocks are considered
1430 * suitable migration targets
1432 watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
1433 low_wmark_pages(zone) : min_wmark_pages(zone);
1434 watermark += compact_gap(order);
1435 if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
1436 ALLOC_CMA, wmark_target))
1437 return COMPACT_SKIPPED;
1439 return COMPACT_CONTINUE;
1442 enum compact_result compaction_suitable(struct zone *zone, int order,
1443 unsigned int alloc_flags,
1444 int classzone_idx)
1446 enum compact_result ret;
1447 int fragindex;
1449 ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx,
1450 zone_page_state(zone, NR_FREE_PAGES));
1452 * fragmentation index determines if allocation failures are due to
1453 * low memory or external fragmentation
1455 * index of -1000 would imply allocations might succeed depending on
1456 * watermarks, but we already failed the high-order watermark check
1457 * index towards 0 implies failure is due to lack of memory
1458 * index towards 1000 implies failure is due to fragmentation
1460 * Only compact if a failure would be due to fragmentation. Also
1461 * ignore fragindex for non-costly orders where the alternative to
1462 * a successful reclaim/compaction is OOM. Fragindex and the
1463 * vm.extfrag_threshold sysctl is meant as a heuristic to prevent
1464 * excessive compaction for costly orders, but it should not be at the
1465 * expense of system stability.
1467 if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) {
1468 fragindex = fragmentation_index(zone, order);
1469 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
1470 ret = COMPACT_NOT_SUITABLE_ZONE;
1473 trace_mm_compaction_suitable(zone, order, ret);
1474 if (ret == COMPACT_NOT_SUITABLE_ZONE)
1475 ret = COMPACT_SKIPPED;
1477 return ret;
1480 bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
1481 int alloc_flags)
1483 struct zone *zone;
1484 struct zoneref *z;
1487 * Make sure at least one zone would pass __compaction_suitable if we continue
1488 * retrying the reclaim.
1490 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1491 ac->nodemask) {
1492 unsigned long available;
1493 enum compact_result compact_result;
1496 * Do not consider all the reclaimable memory because we do not
1497 * want to trash just for a single high order allocation which
1498 * is even not guaranteed to appear even if __compaction_suitable
1499 * is happy about the watermark check.
1501 available = zone_reclaimable_pages(zone) / order;
1502 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
1503 compact_result = __compaction_suitable(zone, order, alloc_flags,
1504 ac_classzone_idx(ac), available);
1505 if (compact_result != COMPACT_SKIPPED)
1506 return true;
1509 return false;
1512 static enum compact_result compact_zone(struct zone *zone, struct compact_control *cc)
1514 enum compact_result ret;
1515 unsigned long start_pfn = zone->zone_start_pfn;
1516 unsigned long end_pfn = zone_end_pfn(zone);
1517 const bool sync = cc->mode != MIGRATE_ASYNC;
1519 cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
1520 ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
1521 cc->classzone_idx);
1522 /* Compaction is likely to fail */
1523 if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED)
1524 return ret;
1526 /* huh, compaction_suitable is returning something unexpected */
1527 VM_BUG_ON(ret != COMPACT_CONTINUE);
1530 * Clear pageblock skip if there were failures recently and compaction
1531 * is about to be retried after being deferred.
1533 if (compaction_restarting(zone, cc->order))
1534 __reset_isolation_suitable(zone);
1537 * Setup to move all movable pages to the end of the zone. Used cached
1538 * information on where the scanners should start (unless we explicitly
1539 * want to compact the whole zone), but check that it is initialised
1540 * by ensuring the values are within zone boundaries.
1542 if (cc->whole_zone) {
1543 cc->migrate_pfn = start_pfn;
1544 cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
1545 } else {
1546 cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
1547 cc->free_pfn = zone->compact_cached_free_pfn;
1548 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
1549 cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
1550 zone->compact_cached_free_pfn = cc->free_pfn;
1552 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
1553 cc->migrate_pfn = start_pfn;
1554 zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
1555 zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
1558 if (cc->migrate_pfn == start_pfn)
1559 cc->whole_zone = true;
1562 cc->last_migrated_pfn = 0;
1564 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
1565 cc->free_pfn, end_pfn, sync);
1567 migrate_prep_local();
1569 while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
1570 int err;
1572 switch (isolate_migratepages(zone, cc)) {
1573 case ISOLATE_ABORT:
1574 ret = COMPACT_CONTENDED;
1575 putback_movable_pages(&cc->migratepages);
1576 cc->nr_migratepages = 0;
1577 goto out;
1578 case ISOLATE_NONE:
1580 * We haven't isolated and migrated anything, but
1581 * there might still be unflushed migrations from
1582 * previous cc->order aligned block.
1584 goto check_drain;
1585 case ISOLATE_SUCCESS:
1589 err = migrate_pages(&cc->migratepages, compaction_alloc,
1590 compaction_free, (unsigned long)cc, cc->mode,
1591 MR_COMPACTION);
1593 trace_mm_compaction_migratepages(cc->nr_migratepages, err,
1594 &cc->migratepages);
1596 /* All pages were either migrated or will be released */
1597 cc->nr_migratepages = 0;
1598 if (err) {
1599 putback_movable_pages(&cc->migratepages);
1601 * migrate_pages() may return -ENOMEM when scanners meet
1602 * and we want compact_finished() to detect it
1604 if (err == -ENOMEM && !compact_scanners_met(cc)) {
1605 ret = COMPACT_CONTENDED;
1606 goto out;
1609 * We failed to migrate at least one page in the current
1610 * order-aligned block, so skip the rest of it.
1612 if (cc->direct_compaction &&
1613 (cc->mode == MIGRATE_ASYNC)) {
1614 cc->migrate_pfn = block_end_pfn(
1615 cc->migrate_pfn - 1, cc->order);
1616 /* Draining pcplists is useless in this case */
1617 cc->last_migrated_pfn = 0;
1622 check_drain:
1624 * Has the migration scanner moved away from the previous
1625 * cc->order aligned block where we migrated from? If yes,
1626 * flush the pages that were freed, so that they can merge and
1627 * compact_finished() can detect immediately if allocation
1628 * would succeed.
1630 if (cc->order > 0 && cc->last_migrated_pfn) {
1631 int cpu;
1632 unsigned long current_block_start =
1633 block_start_pfn(cc->migrate_pfn, cc->order);
1635 if (cc->last_migrated_pfn < current_block_start) {
1636 cpu = get_cpu();
1637 lru_add_drain_cpu(cpu);
1638 drain_local_pages(zone);
1639 put_cpu();
1640 /* No more flushing until we migrate again */
1641 cc->last_migrated_pfn = 0;
1647 out:
1649 * Release free pages and update where the free scanner should restart,
1650 * so we don't leave any returned pages behind in the next attempt.
1652 if (cc->nr_freepages > 0) {
1653 unsigned long free_pfn = release_freepages(&cc->freepages);
1655 cc->nr_freepages = 0;
1656 VM_BUG_ON(free_pfn == 0);
1657 /* The cached pfn is always the first in a pageblock */
1658 free_pfn = pageblock_start_pfn(free_pfn);
1660 * Only go back, not forward. The cached pfn might have been
1661 * already reset to zone end in compact_finished()
1663 if (free_pfn > zone->compact_cached_free_pfn)
1664 zone->compact_cached_free_pfn = free_pfn;
1667 count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned);
1668 count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned);
1670 trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
1671 cc->free_pfn, end_pfn, sync, ret);
1673 return ret;
1676 static enum compact_result compact_zone_order(struct zone *zone, int order,
1677 gfp_t gfp_mask, enum compact_priority prio,
1678 unsigned int alloc_flags, int classzone_idx)
1680 enum compact_result ret;
1681 struct compact_control cc = {
1682 .nr_freepages = 0,
1683 .nr_migratepages = 0,
1684 .total_migrate_scanned = 0,
1685 .total_free_scanned = 0,
1686 .order = order,
1687 .gfp_mask = gfp_mask,
1688 .zone = zone,
1689 .mode = (prio == COMPACT_PRIO_ASYNC) ?
1690 MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT,
1691 .alloc_flags = alloc_flags,
1692 .classzone_idx = classzone_idx,
1693 .direct_compaction = true,
1694 .whole_zone = (prio == MIN_COMPACT_PRIORITY),
1695 .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
1696 .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
1698 INIT_LIST_HEAD(&cc.freepages);
1699 INIT_LIST_HEAD(&cc.migratepages);
1701 ret = compact_zone(zone, &cc);
1703 VM_BUG_ON(!list_empty(&cc.freepages));
1704 VM_BUG_ON(!list_empty(&cc.migratepages));
1706 return ret;
1709 int sysctl_extfrag_threshold = 500;
1712 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
1713 * @gfp_mask: The GFP mask of the current allocation
1714 * @order: The order of the current allocation
1715 * @alloc_flags: The allocation flags of the current allocation
1716 * @ac: The context of current allocation
1717 * @mode: The migration mode for async, sync light, or sync migration
1719 * This is the main entry point for direct page compaction.
1721 enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
1722 unsigned int alloc_flags, const struct alloc_context *ac,
1723 enum compact_priority prio)
1725 int may_perform_io = gfp_mask & __GFP_IO;
1726 struct zoneref *z;
1727 struct zone *zone;
1728 enum compact_result rc = COMPACT_SKIPPED;
1731 * Check if the GFP flags allow compaction - GFP_NOIO is really
1732 * tricky context because the migration might require IO
1734 if (!may_perform_io)
1735 return COMPACT_SKIPPED;
1737 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
1739 /* Compact each zone in the list */
1740 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1741 ac->nodemask) {
1742 enum compact_result status;
1744 if (prio > MIN_COMPACT_PRIORITY
1745 && compaction_deferred(zone, order)) {
1746 rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
1747 continue;
1750 status = compact_zone_order(zone, order, gfp_mask, prio,
1751 alloc_flags, ac_classzone_idx(ac));
1752 rc = max(status, rc);
1754 /* The allocation should succeed, stop compacting */
1755 if (status == COMPACT_SUCCESS) {
1757 * We think the allocation will succeed in this zone,
1758 * but it is not certain, hence the false. The caller
1759 * will repeat this with true if allocation indeed
1760 * succeeds in this zone.
1762 compaction_defer_reset(zone, order, false);
1764 break;
1767 if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE ||
1768 status == COMPACT_PARTIAL_SKIPPED))
1770 * We think that allocation won't succeed in this zone
1771 * so we defer compaction there. If it ends up
1772 * succeeding after all, it will be reset.
1774 defer_compaction(zone, order);
1777 * We might have stopped compacting due to need_resched() in
1778 * async compaction, or due to a fatal signal detected. In that
1779 * case do not try further zones
1781 if ((prio == COMPACT_PRIO_ASYNC && need_resched())
1782 || fatal_signal_pending(current))
1783 break;
1786 return rc;
1790 /* Compact all zones within a node */
1791 static void compact_node(int nid)
1793 pg_data_t *pgdat = NODE_DATA(nid);
1794 int zoneid;
1795 struct zone *zone;
1796 struct compact_control cc = {
1797 .order = -1,
1798 .total_migrate_scanned = 0,
1799 .total_free_scanned = 0,
1800 .mode = MIGRATE_SYNC,
1801 .ignore_skip_hint = true,
1802 .whole_zone = true,
1803 .gfp_mask = GFP_KERNEL,
1807 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
1809 zone = &pgdat->node_zones[zoneid];
1810 if (!populated_zone(zone))
1811 continue;
1813 cc.nr_freepages = 0;
1814 cc.nr_migratepages = 0;
1815 cc.zone = zone;
1816 INIT_LIST_HEAD(&cc.freepages);
1817 INIT_LIST_HEAD(&cc.migratepages);
1819 compact_zone(zone, &cc);
1821 VM_BUG_ON(!list_empty(&cc.freepages));
1822 VM_BUG_ON(!list_empty(&cc.migratepages));
1826 /* Compact all nodes in the system */
1827 static void compact_nodes(void)
1829 int nid;
1831 /* Flush pending updates to the LRU lists */
1832 lru_add_drain_all();
1834 for_each_online_node(nid)
1835 compact_node(nid);
1838 /* The written value is actually unused, all memory is compacted */
1839 int sysctl_compact_memory;
1842 * This is the entry point for compacting all nodes via
1843 * /proc/sys/vm/compact_memory
1845 int sysctl_compaction_handler(struct ctl_table *table, int write,
1846 void __user *buffer, size_t *length, loff_t *ppos)
1848 if (write)
1849 compact_nodes();
1851 return 0;
1854 int sysctl_extfrag_handler(struct ctl_table *table, int write,
1855 void __user *buffer, size_t *length, loff_t *ppos)
1857 proc_dointvec_minmax(table, write, buffer, length, ppos);
1859 return 0;
1862 #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
1863 static ssize_t sysfs_compact_node(struct device *dev,
1864 struct device_attribute *attr,
1865 const char *buf, size_t count)
1867 int nid = dev->id;
1869 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1870 /* Flush pending updates to the LRU lists */
1871 lru_add_drain_all();
1873 compact_node(nid);
1876 return count;
1878 static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
1880 int compaction_register_node(struct node *node)
1882 return device_create_file(&node->dev, &dev_attr_compact);
1885 void compaction_unregister_node(struct node *node)
1887 return device_remove_file(&node->dev, &dev_attr_compact);
1889 #endif /* CONFIG_SYSFS && CONFIG_NUMA */
1891 static inline bool kcompactd_work_requested(pg_data_t *pgdat)
1893 return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
1896 static bool kcompactd_node_suitable(pg_data_t *pgdat)
1898 int zoneid;
1899 struct zone *zone;
1900 enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;
1902 for (zoneid = 0; zoneid <= classzone_idx; zoneid++) {
1903 zone = &pgdat->node_zones[zoneid];
1905 if (!populated_zone(zone))
1906 continue;
1908 if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
1909 classzone_idx) == COMPACT_CONTINUE)
1910 return true;
1913 return false;
1916 static void kcompactd_do_work(pg_data_t *pgdat)
1919 * With no special task, compact all zones so that a page of requested
1920 * order is allocatable.
1922 int zoneid;
1923 struct zone *zone;
1924 struct compact_control cc = {
1925 .order = pgdat->kcompactd_max_order,
1926 .total_migrate_scanned = 0,
1927 .total_free_scanned = 0,
1928 .classzone_idx = pgdat->kcompactd_classzone_idx,
1929 .mode = MIGRATE_SYNC_LIGHT,
1930 .ignore_skip_hint = true,
1931 .gfp_mask = GFP_KERNEL,
1934 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
1935 cc.classzone_idx);
1936 count_compact_event(KCOMPACTD_WAKE);
1938 for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) {
1939 int status;
1941 zone = &pgdat->node_zones[zoneid];
1942 if (!populated_zone(zone))
1943 continue;
1945 if (compaction_deferred(zone, cc.order))
1946 continue;
1948 if (compaction_suitable(zone, cc.order, 0, zoneid) !=
1949 COMPACT_CONTINUE)
1950 continue;
1952 cc.nr_freepages = 0;
1953 cc.nr_migratepages = 0;
1954 cc.total_migrate_scanned = 0;
1955 cc.total_free_scanned = 0;
1956 cc.zone = zone;
1957 INIT_LIST_HEAD(&cc.freepages);
1958 INIT_LIST_HEAD(&cc.migratepages);
1960 if (kthread_should_stop())
1961 return;
1962 status = compact_zone(zone, &cc);
1964 if (status == COMPACT_SUCCESS) {
1965 compaction_defer_reset(zone, cc.order, false);
1966 } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
1968 * We use sync migration mode here, so we defer like
1969 * sync direct compaction does.
1971 defer_compaction(zone, cc.order);
1974 count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
1975 cc.total_migrate_scanned);
1976 count_compact_events(KCOMPACTD_FREE_SCANNED,
1977 cc.total_free_scanned);
1979 VM_BUG_ON(!list_empty(&cc.freepages));
1980 VM_BUG_ON(!list_empty(&cc.migratepages));
1984 * Regardless of success, we are done until woken up next. But remember
1985 * the requested order/classzone_idx in case it was higher/tighter than
1986 * our current ones
1988 if (pgdat->kcompactd_max_order <= cc.order)
1989 pgdat->kcompactd_max_order = 0;
1990 if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
1991 pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
1994 void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
1996 if (!order)
1997 return;
1999 if (pgdat->kcompactd_max_order < order)
2000 pgdat->kcompactd_max_order = order;
2002 if (pgdat->kcompactd_classzone_idx > classzone_idx)
2003 pgdat->kcompactd_classzone_idx = classzone_idx;
2006 * Pairs with implicit barrier in wait_event_freezable()
2007 * such that wakeups are not missed.
2009 if (!wq_has_sleeper(&pgdat->kcompactd_wait))
2010 return;
2012 if (!kcompactd_node_suitable(pgdat))
2013 return;
2015 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
2016 classzone_idx);
2017 wake_up_interruptible(&pgdat->kcompactd_wait);
2021 * The background compaction daemon, started as a kernel thread
2022 * from the init process.
2024 static int kcompactd(void *p)
2026 pg_data_t *pgdat = (pg_data_t*)p;
2027 struct task_struct *tsk = current;
2029 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2031 if (!cpumask_empty(cpumask))
2032 set_cpus_allowed_ptr(tsk, cpumask);
2034 set_freezable();
2036 pgdat->kcompactd_max_order = 0;
2037 pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
2039 while (!kthread_should_stop()) {
2040 trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
2041 wait_event_freezable(pgdat->kcompactd_wait,
2042 kcompactd_work_requested(pgdat));
2044 kcompactd_do_work(pgdat);
2047 return 0;
2051 * This kcompactd start function will be called by init and node-hot-add.
2052 * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
2054 int kcompactd_run(int nid)
2056 pg_data_t *pgdat = NODE_DATA(nid);
2057 int ret = 0;
2059 if (pgdat->kcompactd)
2060 return 0;
2062 pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
2063 if (IS_ERR(pgdat->kcompactd)) {
2064 pr_err("Failed to start kcompactd on node %d\n", nid);
2065 ret = PTR_ERR(pgdat->kcompactd);
2066 pgdat->kcompactd = NULL;
2068 return ret;
2072 * Called by memory hotplug when all memory in a node is offlined. Caller must
2073 * hold mem_hotplug_begin/end().
2075 void kcompactd_stop(int nid)
2077 struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
2079 if (kcompactd) {
2080 kthread_stop(kcompactd);
2081 NODE_DATA(nid)->kcompactd = NULL;
2086 * It's optimal to keep kcompactd on the same CPUs as their memory, but
2087 * not required for correctness. So if the last cpu in a node goes
2088 * away, we get changed to run anywhere: as the first one comes back,
2089 * restore their cpu bindings.
2091 static int kcompactd_cpu_online(unsigned int cpu)
2093 int nid;
2095 for_each_node_state(nid, N_MEMORY) {
2096 pg_data_t *pgdat = NODE_DATA(nid);
2097 const struct cpumask *mask;
2099 mask = cpumask_of_node(pgdat->node_id);
2101 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2102 /* One of our CPUs online: restore mask */
2103 set_cpus_allowed_ptr(pgdat->kcompactd, mask);
2105 return 0;
2108 static int __init kcompactd_init(void)
2110 int nid;
2111 int ret;
2113 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
2114 "mm/compaction:online",
2115 kcompactd_cpu_online, NULL);
2116 if (ret < 0) {
2117 pr_err("kcompactd: failed to register hotplug callbacks.\n");
2118 return ret;
2121 for_each_node_state(nid, N_MEMORY)
2122 kcompactd_run(nid);
2123 return 0;
2125 subsys_initcall(kcompactd_init)
2127 #endif /* CONFIG_COMPACTION */