hpsa: fix bad -ENOMEM return value in hpsa_big_passthru_ioctl
[linux/fpc-iii.git] / mm / compaction.c
blob6441083e76d38d15fa7932877a9ea32759b47e7c
1 /*
2 * linux/mm/compaction.c
4 * Memory compaction for the reduction of external fragmentation. Note that
5 * this heavily depends upon page migration to do all the real heavy
6 * lifting
8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9 */
10 #include <linux/swap.h>
11 #include <linux/migrate.h>
12 #include <linux/compaction.h>
13 #include <linux/mm_inline.h>
14 #include <linux/backing-dev.h>
15 #include <linux/sysctl.h>
16 #include <linux/sysfs.h>
17 #include <linux/balloon_compaction.h>
18 #include <linux/page-isolation.h>
19 #include "internal.h"
21 #ifdef CONFIG_COMPACTION
22 static inline void count_compact_event(enum vm_event_item item)
24 count_vm_event(item);
27 static inline void count_compact_events(enum vm_event_item item, long delta)
29 count_vm_events(item, delta);
31 #else
32 #define count_compact_event(item) do { } while (0)
33 #define count_compact_events(item, delta) do { } while (0)
34 #endif
36 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/compaction.h>
41 static unsigned long release_freepages(struct list_head *freelist)
43 struct page *page, *next;
44 unsigned long count = 0;
46 list_for_each_entry_safe(page, next, freelist, lru) {
47 list_del(&page->lru);
48 __free_page(page);
49 count++;
52 return count;
55 static void map_pages(struct list_head *list)
57 struct page *page;
59 list_for_each_entry(page, list, lru) {
60 arch_alloc_page(page, 0);
61 kernel_map_pages(page, 1, 1);
65 static inline bool migrate_async_suitable(int migratetype)
67 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
70 #ifdef CONFIG_COMPACTION
71 /* Returns true if the pageblock should be scanned for pages to isolate. */
72 static inline bool isolation_suitable(struct compact_control *cc,
73 struct page *page)
75 if (cc->ignore_skip_hint)
76 return true;
78 return !get_pageblock_skip(page);
82 * This function is called to clear all cached information on pageblocks that
83 * should be skipped for page isolation when the migrate and free page scanner
84 * meet.
86 static void __reset_isolation_suitable(struct zone *zone)
88 unsigned long start_pfn = zone->zone_start_pfn;
89 unsigned long end_pfn = zone_end_pfn(zone);
90 unsigned long pfn;
92 zone->compact_cached_migrate_pfn = start_pfn;
93 zone->compact_cached_free_pfn = end_pfn;
94 zone->compact_blockskip_flush = false;
96 /* Walk the zone and mark every pageblock as suitable for isolation */
97 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
98 struct page *page;
100 cond_resched();
102 if (!pfn_valid(pfn))
103 continue;
105 page = pfn_to_page(pfn);
106 if (zone != page_zone(page))
107 continue;
109 clear_pageblock_skip(page);
113 void reset_isolation_suitable(pg_data_t *pgdat)
115 int zoneid;
117 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
118 struct zone *zone = &pgdat->node_zones[zoneid];
119 if (!populated_zone(zone))
120 continue;
122 /* Only flush if a full compaction finished recently */
123 if (zone->compact_blockskip_flush)
124 __reset_isolation_suitable(zone);
129 * If no pages were isolated then mark this pageblock to be skipped in the
130 * future. The information is later cleared by __reset_isolation_suitable().
132 static void update_pageblock_skip(struct compact_control *cc,
133 struct page *page, unsigned long nr_isolated,
134 bool migrate_scanner)
136 struct zone *zone = cc->zone;
138 if (cc->ignore_skip_hint)
139 return;
141 if (!page)
142 return;
144 if (!nr_isolated) {
145 unsigned long pfn = page_to_pfn(page);
146 set_pageblock_skip(page);
148 /* Update where compaction should restart */
149 if (migrate_scanner) {
150 if (!cc->finished_update_migrate &&
151 pfn > zone->compact_cached_migrate_pfn)
152 zone->compact_cached_migrate_pfn = pfn;
153 } else {
154 if (!cc->finished_update_free &&
155 pfn < zone->compact_cached_free_pfn)
156 zone->compact_cached_free_pfn = pfn;
160 #else
161 static inline bool isolation_suitable(struct compact_control *cc,
162 struct page *page)
164 return true;
167 static void update_pageblock_skip(struct compact_control *cc,
168 struct page *page, unsigned long nr_isolated,
169 bool migrate_scanner)
172 #endif /* CONFIG_COMPACTION */
174 static inline bool should_release_lock(spinlock_t *lock)
176 return need_resched() || spin_is_contended(lock);
180 * Compaction requires the taking of some coarse locks that are potentially
181 * very heavily contended. Check if the process needs to be scheduled or
182 * if the lock is contended. For async compaction, back out in the event
183 * if contention is severe. For sync compaction, schedule.
185 * Returns true if the lock is held.
186 * Returns false if the lock is released and compaction should abort
188 static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
189 bool locked, struct compact_control *cc)
191 if (should_release_lock(lock)) {
192 if (locked) {
193 spin_unlock_irqrestore(lock, *flags);
194 locked = false;
197 /* async aborts if taking too long or contended */
198 if (!cc->sync) {
199 cc->contended = true;
200 return false;
203 cond_resched();
206 if (!locked)
207 spin_lock_irqsave(lock, *flags);
208 return true;
211 static inline bool compact_trylock_irqsave(spinlock_t *lock,
212 unsigned long *flags, struct compact_control *cc)
214 return compact_checklock_irqsave(lock, flags, false, cc);
217 /* Returns true if the page is within a block suitable for migration to */
218 static bool suitable_migration_target(struct page *page)
220 int migratetype = get_pageblock_migratetype(page);
222 /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
223 if (migratetype == MIGRATE_RESERVE)
224 return false;
226 if (is_migrate_isolate(migratetype))
227 return false;
229 /* If the page is a large free page, then allow migration */
230 if (PageBuddy(page) && page_order(page) >= pageblock_order)
231 return true;
233 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
234 if (migrate_async_suitable(migratetype))
235 return true;
237 /* Otherwise skip the block */
238 return false;
242 * Isolate free pages onto a private freelist. Caller must hold zone->lock.
243 * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
244 * pages inside of the pageblock (even though it may still end up isolating
245 * some pages).
247 static unsigned long isolate_freepages_block(struct compact_control *cc,
248 unsigned long blockpfn,
249 unsigned long end_pfn,
250 struct list_head *freelist,
251 bool strict)
253 int nr_scanned = 0, total_isolated = 0;
254 struct page *cursor, *valid_page = NULL;
255 unsigned long flags;
256 bool locked = false;
258 cursor = pfn_to_page(blockpfn);
260 /* Isolate free pages. */
261 for (; blockpfn < end_pfn; blockpfn++, cursor++) {
262 int isolated, i;
263 struct page *page = cursor;
265 nr_scanned++;
266 if (!pfn_valid_within(blockpfn))
267 goto isolate_fail;
269 if (!valid_page)
270 valid_page = page;
271 if (!PageBuddy(page))
272 goto isolate_fail;
275 * The zone lock must be held to isolate freepages.
276 * Unfortunately this is a very coarse lock and can be
277 * heavily contended if there are parallel allocations
278 * or parallel compactions. For async compaction do not
279 * spin on the lock and we acquire the lock as late as
280 * possible.
282 locked = compact_checklock_irqsave(&cc->zone->lock, &flags,
283 locked, cc);
284 if (!locked)
285 break;
287 /* Recheck this is a suitable migration target under lock */
288 if (!strict && !suitable_migration_target(page))
289 break;
291 /* Recheck this is a buddy page under lock */
292 if (!PageBuddy(page))
293 goto isolate_fail;
295 /* Found a free page, break it into order-0 pages */
296 isolated = split_free_page(page);
297 total_isolated += isolated;
298 for (i = 0; i < isolated; i++) {
299 list_add(&page->lru, freelist);
300 page++;
303 /* If a page was split, advance to the end of it */
304 if (isolated) {
305 blockpfn += isolated - 1;
306 cursor += isolated - 1;
307 continue;
310 isolate_fail:
311 if (strict)
312 break;
313 else
314 continue;
318 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
321 * If strict isolation is requested by CMA then check that all the
322 * pages requested were isolated. If there were any failures, 0 is
323 * returned and CMA will fail.
325 if (strict && blockpfn < end_pfn)
326 total_isolated = 0;
328 if (locked)
329 spin_unlock_irqrestore(&cc->zone->lock, flags);
331 /* Update the pageblock-skip if the whole pageblock was scanned */
332 if (blockpfn == end_pfn)
333 update_pageblock_skip(cc, valid_page, total_isolated, false);
335 count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
336 if (total_isolated)
337 count_compact_events(COMPACTISOLATED, total_isolated);
338 return total_isolated;
342 * isolate_freepages_range() - isolate free pages.
343 * @start_pfn: The first PFN to start isolating.
344 * @end_pfn: The one-past-last PFN.
346 * Non-free pages, invalid PFNs, or zone boundaries within the
347 * [start_pfn, end_pfn) range are considered errors, cause function to
348 * undo its actions and return zero.
350 * Otherwise, function returns one-past-the-last PFN of isolated page
351 * (which may be greater then end_pfn if end fell in a middle of
352 * a free page).
354 unsigned long
355 isolate_freepages_range(struct compact_control *cc,
356 unsigned long start_pfn, unsigned long end_pfn)
358 unsigned long isolated, pfn, block_end_pfn;
359 LIST_HEAD(freelist);
361 for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
362 if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn)))
363 break;
366 * On subsequent iterations ALIGN() is actually not needed,
367 * but we keep it that we not to complicate the code.
369 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
370 block_end_pfn = min(block_end_pfn, end_pfn);
372 isolated = isolate_freepages_block(cc, pfn, block_end_pfn,
373 &freelist, true);
376 * In strict mode, isolate_freepages_block() returns 0 if
377 * there are any holes in the block (ie. invalid PFNs or
378 * non-free pages).
380 if (!isolated)
381 break;
384 * If we managed to isolate pages, it is always (1 << n) *
385 * pageblock_nr_pages for some non-negative n. (Max order
386 * page may span two pageblocks).
390 /* split_free_page does not map the pages */
391 map_pages(&freelist);
393 if (pfn < end_pfn) {
394 /* Loop terminated early, cleanup. */
395 release_freepages(&freelist);
396 return 0;
399 /* We don't use freelists for anything. */
400 return pfn;
403 /* Update the number of anon and file isolated pages in the zone */
404 static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc)
406 struct page *page;
407 unsigned int count[2] = { 0, };
409 list_for_each_entry(page, &cc->migratepages, lru)
410 count[!!page_is_file_cache(page)]++;
412 /* If locked we can use the interrupt unsafe versions */
413 if (locked) {
414 __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
415 __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
416 } else {
417 mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
418 mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
422 /* Similar to reclaim, but different enough that they don't share logic */
423 static bool too_many_isolated(struct zone *zone)
425 unsigned long active, inactive, isolated;
427 inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
428 zone_page_state(zone, NR_INACTIVE_ANON);
429 active = zone_page_state(zone, NR_ACTIVE_FILE) +
430 zone_page_state(zone, NR_ACTIVE_ANON);
431 isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
432 zone_page_state(zone, NR_ISOLATED_ANON);
434 return isolated > (inactive + active) / 2;
438 * isolate_migratepages_range() - isolate all migrate-able pages in range.
439 * @zone: Zone pages are in.
440 * @cc: Compaction control structure.
441 * @low_pfn: The first PFN of the range.
442 * @end_pfn: The one-past-the-last PFN of the range.
443 * @unevictable: true if it allows to isolate unevictable pages
445 * Isolate all pages that can be migrated from the range specified by
446 * [low_pfn, end_pfn). Returns zero if there is a fatal signal
447 * pending), otherwise PFN of the first page that was not scanned
448 * (which may be both less, equal to or more then end_pfn).
450 * Assumes that cc->migratepages is empty and cc->nr_migratepages is
451 * zero.
453 * Apart from cc->migratepages and cc->nr_migratetypes this function
454 * does not modify any cc's fields, in particular it does not modify
455 * (or read for that matter) cc->migrate_pfn.
457 unsigned long
458 isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
459 unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
461 unsigned long last_pageblock_nr = 0, pageblock_nr;
462 unsigned long nr_scanned = 0, nr_isolated = 0;
463 struct list_head *migratelist = &cc->migratepages;
464 isolate_mode_t mode = 0;
465 struct lruvec *lruvec;
466 unsigned long flags;
467 bool locked = false;
468 struct page *page = NULL, *valid_page = NULL;
471 * Ensure that there are not too many pages isolated from the LRU
472 * list by either parallel reclaimers or compaction. If there are,
473 * delay for some time until fewer pages are isolated
475 while (unlikely(too_many_isolated(zone))) {
476 /* async migration should just abort */
477 if (!cc->sync)
478 return 0;
480 congestion_wait(BLK_RW_ASYNC, HZ/10);
482 if (fatal_signal_pending(current))
483 return 0;
486 /* Time to isolate some pages for migration */
487 cond_resched();
488 for (; low_pfn < end_pfn; low_pfn++) {
489 /* give a chance to irqs before checking need_resched() */
490 if (locked && !((low_pfn+1) % SWAP_CLUSTER_MAX)) {
491 if (should_release_lock(&zone->lru_lock)) {
492 spin_unlock_irqrestore(&zone->lru_lock, flags);
493 locked = false;
498 * migrate_pfn does not necessarily start aligned to a
499 * pageblock. Ensure that pfn_valid is called when moving
500 * into a new MAX_ORDER_NR_PAGES range in case of large
501 * memory holes within the zone
503 if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
504 if (!pfn_valid(low_pfn)) {
505 low_pfn += MAX_ORDER_NR_PAGES - 1;
506 continue;
510 if (!pfn_valid_within(low_pfn))
511 continue;
512 nr_scanned++;
515 * Get the page and ensure the page is within the same zone.
516 * See the comment in isolate_freepages about overlapping
517 * nodes. It is deliberate that the new zone lock is not taken
518 * as memory compaction should not move pages between nodes.
520 page = pfn_to_page(low_pfn);
521 if (page_zone(page) != zone)
522 continue;
524 if (!valid_page)
525 valid_page = page;
527 /* If isolation recently failed, do not retry */
528 pageblock_nr = low_pfn >> pageblock_order;
529 if (!isolation_suitable(cc, page))
530 goto next_pageblock;
532 /* Skip if free */
533 if (PageBuddy(page))
534 continue;
537 * For async migration, also only scan in MOVABLE blocks. Async
538 * migration is optimistic to see if the minimum amount of work
539 * satisfies the allocation
541 if (!cc->sync && last_pageblock_nr != pageblock_nr &&
542 !migrate_async_suitable(get_pageblock_migratetype(page))) {
543 cc->finished_update_migrate = true;
544 goto next_pageblock;
548 * Check may be lockless but that's ok as we recheck later.
549 * It's possible to migrate LRU pages and balloon pages
550 * Skip any other type of page
552 if (!PageLRU(page)) {
553 if (unlikely(balloon_page_movable(page))) {
554 if (locked && balloon_page_isolate(page)) {
555 /* Successfully isolated */
556 cc->finished_update_migrate = true;
557 list_add(&page->lru, migratelist);
558 cc->nr_migratepages++;
559 nr_isolated++;
560 goto check_compact_cluster;
563 continue;
567 * PageLRU is set. lru_lock normally excludes isolation
568 * splitting and collapsing (collapsing has already happened
569 * if PageLRU is set) but the lock is not necessarily taken
570 * here and it is wasteful to take it just to check transhuge.
571 * Check TransHuge without lock and skip the whole pageblock if
572 * it's either a transhuge or hugetlbfs page, as calling
573 * compound_order() without preventing THP from splitting the
574 * page underneath us may return surprising results.
576 if (PageTransHuge(page)) {
577 if (!locked)
578 goto next_pageblock;
579 low_pfn += (1 << compound_order(page)) - 1;
580 continue;
583 /* Check if it is ok to still hold the lock */
584 locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
585 locked, cc);
586 if (!locked || fatal_signal_pending(current))
587 break;
589 /* Recheck PageLRU and PageTransHuge under lock */
590 if (!PageLRU(page))
591 continue;
592 if (PageTransHuge(page)) {
593 low_pfn += (1 << compound_order(page)) - 1;
594 continue;
597 if (!cc->sync)
598 mode |= ISOLATE_ASYNC_MIGRATE;
600 if (unevictable)
601 mode |= ISOLATE_UNEVICTABLE;
603 lruvec = mem_cgroup_page_lruvec(page, zone);
605 /* Try isolate the page */
606 if (__isolate_lru_page(page, mode) != 0)
607 continue;
609 VM_BUG_ON(PageTransCompound(page));
611 /* Successfully isolated */
612 cc->finished_update_migrate = true;
613 del_page_from_lru_list(page, lruvec, page_lru(page));
614 list_add(&page->lru, migratelist);
615 cc->nr_migratepages++;
616 nr_isolated++;
618 check_compact_cluster:
619 /* Avoid isolating too much */
620 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
621 ++low_pfn;
622 break;
625 continue;
627 next_pageblock:
628 low_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages) - 1;
629 last_pageblock_nr = pageblock_nr;
632 acct_isolated(zone, locked, cc);
634 if (locked)
635 spin_unlock_irqrestore(&zone->lru_lock, flags);
637 /* Update the pageblock-skip if the whole pageblock was scanned */
638 if (low_pfn == end_pfn)
639 update_pageblock_skip(cc, valid_page, nr_isolated, true);
641 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
643 count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
644 if (nr_isolated)
645 count_compact_events(COMPACTISOLATED, nr_isolated);
647 return low_pfn;
650 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
651 #ifdef CONFIG_COMPACTION
653 * Based on information in the current compact_control, find blocks
654 * suitable for isolating free pages from and then isolate them.
656 static void isolate_freepages(struct zone *zone,
657 struct compact_control *cc)
659 struct page *page;
660 unsigned long high_pfn, low_pfn, pfn, z_end_pfn;
661 int nr_freepages = cc->nr_freepages;
662 struct list_head *freelist = &cc->freepages;
665 * Initialise the free scanner. The starting point is where we last
666 * successfully isolated from, zone-cached value, or the end of the
667 * zone when isolating for the first time. We need this aligned to
668 * the pageblock boundary, because we do pfn -= pageblock_nr_pages
669 * in the for loop.
670 * The low boundary is the end of the pageblock the migration scanner
671 * is using.
673 pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
674 low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
677 * Take care that if the migration scanner is at the end of the zone
678 * that the free scanner does not accidentally move to the next zone
679 * in the next isolation cycle.
681 high_pfn = min(low_pfn, pfn);
683 z_end_pfn = zone_end_pfn(zone);
686 * Isolate free pages until enough are available to migrate the
687 * pages on cc->migratepages. We stop searching if the migrate
688 * and free page scanners meet or enough free pages are isolated.
690 for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
691 pfn -= pageblock_nr_pages) {
692 unsigned long isolated;
693 unsigned long end_pfn;
696 * This can iterate a massively long zone without finding any
697 * suitable migration targets, so periodically check if we need
698 * to schedule.
700 cond_resched();
702 if (!pfn_valid(pfn))
703 continue;
706 * Check for overlapping nodes/zones. It's possible on some
707 * configurations to have a setup like
708 * node0 node1 node0
709 * i.e. it's possible that all pages within a zones range of
710 * pages do not belong to a single zone.
712 page = pfn_to_page(pfn);
713 if (page_zone(page) != zone)
714 continue;
716 /* Check the block is suitable for migration */
717 if (!suitable_migration_target(page))
718 continue;
720 /* If isolation recently failed, do not retry */
721 if (!isolation_suitable(cc, page))
722 continue;
724 /* Found a block suitable for isolating free pages from */
725 isolated = 0;
728 * Take care when isolating in last pageblock of a zone which
729 * ends in the middle of a pageblock.
731 end_pfn = min(pfn + pageblock_nr_pages, z_end_pfn);
732 isolated = isolate_freepages_block(cc, pfn, end_pfn,
733 freelist, false);
734 nr_freepages += isolated;
737 * Record the highest PFN we isolated pages from. When next
738 * looking for free pages, the search will restart here as
739 * page migration may have returned some pages to the allocator
741 if (isolated) {
742 cc->finished_update_free = true;
743 high_pfn = max(high_pfn, pfn);
747 /* split_free_page does not map the pages */
748 map_pages(freelist);
751 * If we crossed the migrate scanner, we want to keep it that way
752 * so that compact_finished() may detect this
754 if (pfn < low_pfn)
755 cc->free_pfn = max(pfn, zone->zone_start_pfn);
756 else
757 cc->free_pfn = high_pfn;
758 cc->nr_freepages = nr_freepages;
762 * This is a migrate-callback that "allocates" freepages by taking pages
763 * from the isolated freelists in the block we are migrating to.
765 static struct page *compaction_alloc(struct page *migratepage,
766 unsigned long data,
767 int **result)
769 struct compact_control *cc = (struct compact_control *)data;
770 struct page *freepage;
772 /* Isolate free pages if necessary */
773 if (list_empty(&cc->freepages)) {
774 isolate_freepages(cc->zone, cc);
776 if (list_empty(&cc->freepages))
777 return NULL;
780 freepage = list_entry(cc->freepages.next, struct page, lru);
781 list_del(&freepage->lru);
782 cc->nr_freepages--;
784 return freepage;
788 * We cannot control nr_migratepages and nr_freepages fully when migration is
789 * running as migrate_pages() has no knowledge of compact_control. When
790 * migration is complete, we count the number of pages on the lists by hand.
792 static void update_nr_listpages(struct compact_control *cc)
794 int nr_migratepages = 0;
795 int nr_freepages = 0;
796 struct page *page;
798 list_for_each_entry(page, &cc->migratepages, lru)
799 nr_migratepages++;
800 list_for_each_entry(page, &cc->freepages, lru)
801 nr_freepages++;
803 cc->nr_migratepages = nr_migratepages;
804 cc->nr_freepages = nr_freepages;
807 /* possible outcome of isolate_migratepages */
808 typedef enum {
809 ISOLATE_ABORT, /* Abort compaction now */
810 ISOLATE_NONE, /* No pages isolated, continue scanning */
811 ISOLATE_SUCCESS, /* Pages isolated, migrate */
812 } isolate_migrate_t;
815 * Isolate all pages that can be migrated from the block pointed to by
816 * the migrate scanner within compact_control.
818 static isolate_migrate_t isolate_migratepages(struct zone *zone,
819 struct compact_control *cc)
821 unsigned long low_pfn, end_pfn;
823 /* Do not scan outside zone boundaries */
824 low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
826 /* Only scan within a pageblock boundary */
827 end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
829 /* Do not cross the free scanner or scan within a memory hole */
830 if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
831 cc->migrate_pfn = end_pfn;
832 return ISOLATE_NONE;
835 /* Perform the isolation */
836 low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
837 if (!low_pfn || cc->contended)
838 return ISOLATE_ABORT;
840 cc->migrate_pfn = low_pfn;
842 return ISOLATE_SUCCESS;
845 static int compact_finished(struct zone *zone,
846 struct compact_control *cc)
848 unsigned int order;
849 unsigned long watermark;
851 if (fatal_signal_pending(current))
852 return COMPACT_PARTIAL;
854 /* Compaction run completes if the migrate and free scanner meet */
855 if (cc->free_pfn <= cc->migrate_pfn) {
857 * Mark that the PG_migrate_skip information should be cleared
858 * by kswapd when it goes to sleep. kswapd does not set the
859 * flag itself as the decision to be clear should be directly
860 * based on an allocation request.
862 if (!current_is_kswapd())
863 zone->compact_blockskip_flush = true;
865 return COMPACT_COMPLETE;
869 * order == -1 is expected when compacting via
870 * /proc/sys/vm/compact_memory
872 if (cc->order == -1)
873 return COMPACT_CONTINUE;
875 /* Compaction run is not finished if the watermark is not met */
876 watermark = low_wmark_pages(zone);
877 watermark += (1 << cc->order);
879 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
880 return COMPACT_CONTINUE;
882 /* Direct compactor: Is a suitable page free? */
883 for (order = cc->order; order < MAX_ORDER; order++) {
884 struct free_area *area = &zone->free_area[order];
886 /* Job done if page is free of the right migratetype */
887 if (!list_empty(&area->free_list[cc->migratetype]))
888 return COMPACT_PARTIAL;
890 /* Job done if allocation would set block type */
891 if (cc->order >= pageblock_order && area->nr_free)
892 return COMPACT_PARTIAL;
895 return COMPACT_CONTINUE;
899 * compaction_suitable: Is this suitable to run compaction on this zone now?
900 * Returns
901 * COMPACT_SKIPPED - If there are too few free pages for compaction
902 * COMPACT_PARTIAL - If the allocation would succeed without compaction
903 * COMPACT_CONTINUE - If compaction should run now
905 unsigned long compaction_suitable(struct zone *zone, int order)
907 int fragindex;
908 unsigned long watermark;
911 * order == -1 is expected when compacting via
912 * /proc/sys/vm/compact_memory
914 if (order == -1)
915 return COMPACT_CONTINUE;
918 * Watermarks for order-0 must be met for compaction. Note the 2UL.
919 * This is because during migration, copies of pages need to be
920 * allocated and for a short time, the footprint is higher
922 watermark = low_wmark_pages(zone) + (2UL << order);
923 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
924 return COMPACT_SKIPPED;
927 * fragmentation index determines if allocation failures are due to
928 * low memory or external fragmentation
930 * index of -1000 implies allocations might succeed depending on
931 * watermarks
932 * index towards 0 implies failure is due to lack of memory
933 * index towards 1000 implies failure is due to fragmentation
935 * Only compact if a failure would be due to fragmentation.
937 fragindex = fragmentation_index(zone, order);
938 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
939 return COMPACT_SKIPPED;
941 if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
942 0, 0))
943 return COMPACT_PARTIAL;
945 return COMPACT_CONTINUE;
948 static int compact_zone(struct zone *zone, struct compact_control *cc)
950 int ret;
951 unsigned long start_pfn = zone->zone_start_pfn;
952 unsigned long end_pfn = zone_end_pfn(zone);
954 ret = compaction_suitable(zone, cc->order);
955 switch (ret) {
956 case COMPACT_PARTIAL:
957 case COMPACT_SKIPPED:
958 /* Compaction is likely to fail */
959 return ret;
960 case COMPACT_CONTINUE:
961 /* Fall through to compaction */
966 * Clear pageblock skip if there were failures recently and compaction
967 * is about to be retried after being deferred. kswapd does not do
968 * this reset as it'll reset the cached information when going to sleep.
970 if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
971 __reset_isolation_suitable(zone);
974 * Setup to move all movable pages to the end of the zone. Used cached
975 * information on where the scanners should start but check that it
976 * is initialised by ensuring the values are within zone boundaries.
978 cc->migrate_pfn = zone->compact_cached_migrate_pfn;
979 cc->free_pfn = zone->compact_cached_free_pfn;
980 if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
981 cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
982 zone->compact_cached_free_pfn = cc->free_pfn;
984 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
985 cc->migrate_pfn = start_pfn;
986 zone->compact_cached_migrate_pfn = cc->migrate_pfn;
989 migrate_prep_local();
991 while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
992 unsigned long nr_migrate, nr_remaining;
993 int err;
995 switch (isolate_migratepages(zone, cc)) {
996 case ISOLATE_ABORT:
997 ret = COMPACT_PARTIAL;
998 putback_movable_pages(&cc->migratepages);
999 cc->nr_migratepages = 0;
1000 goto out;
1001 case ISOLATE_NONE:
1002 continue;
1003 case ISOLATE_SUCCESS:
1007 nr_migrate = cc->nr_migratepages;
1008 err = migrate_pages(&cc->migratepages, compaction_alloc,
1009 (unsigned long)cc,
1010 cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC,
1011 MR_COMPACTION);
1012 update_nr_listpages(cc);
1013 nr_remaining = cc->nr_migratepages;
1015 trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
1016 nr_remaining);
1018 /* Release isolated pages not migrated */
1019 if (err) {
1020 putback_movable_pages(&cc->migratepages);
1021 cc->nr_migratepages = 0;
1023 * migrate_pages() may return -ENOMEM when scanners meet
1024 * and we want compact_finished() to detect it
1026 if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) {
1027 ret = COMPACT_PARTIAL;
1028 goto out;
1033 out:
1034 /* Release free pages and check accounting */
1035 cc->nr_freepages -= release_freepages(&cc->freepages);
1036 VM_BUG_ON(cc->nr_freepages != 0);
1038 return ret;
1041 static unsigned long compact_zone_order(struct zone *zone,
1042 int order, gfp_t gfp_mask,
1043 bool sync, bool *contended)
1045 unsigned long ret;
1046 struct compact_control cc = {
1047 .nr_freepages = 0,
1048 .nr_migratepages = 0,
1049 .order = order,
1050 .migratetype = allocflags_to_migratetype(gfp_mask),
1051 .zone = zone,
1052 .sync = sync,
1054 INIT_LIST_HEAD(&cc.freepages);
1055 INIT_LIST_HEAD(&cc.migratepages);
1057 ret = compact_zone(zone, &cc);
1059 VM_BUG_ON(!list_empty(&cc.freepages));
1060 VM_BUG_ON(!list_empty(&cc.migratepages));
1062 *contended = cc.contended;
1063 return ret;
1066 int sysctl_extfrag_threshold = 500;
1069 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
1070 * @zonelist: The zonelist used for the current allocation
1071 * @order: The order of the current allocation
1072 * @gfp_mask: The GFP mask of the current allocation
1073 * @nodemask: The allowed nodes to allocate from
1074 * @sync: Whether migration is synchronous or not
1075 * @contended: Return value that is true if compaction was aborted due to lock contention
1076 * @page: Optionally capture a free page of the requested order during compaction
1078 * This is the main entry point for direct page compaction.
1080 unsigned long try_to_compact_pages(struct zonelist *zonelist,
1081 int order, gfp_t gfp_mask, nodemask_t *nodemask,
1082 bool sync, bool *contended)
1084 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1085 int may_enter_fs = gfp_mask & __GFP_FS;
1086 int may_perform_io = gfp_mask & __GFP_IO;
1087 struct zoneref *z;
1088 struct zone *zone;
1089 int rc = COMPACT_SKIPPED;
1090 int alloc_flags = 0;
1092 /* Check if the GFP flags allow compaction */
1093 if (!order || !may_enter_fs || !may_perform_io)
1094 return rc;
1096 count_compact_event(COMPACTSTALL);
1098 #ifdef CONFIG_CMA
1099 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
1100 alloc_flags |= ALLOC_CMA;
1101 #endif
1102 /* Compact each zone in the list */
1103 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
1104 nodemask) {
1105 int status;
1107 status = compact_zone_order(zone, order, gfp_mask, sync,
1108 contended);
1109 rc = max(status, rc);
1111 /* If a normal allocation would succeed, stop compacting */
1112 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0,
1113 alloc_flags))
1114 break;
1117 return rc;
1121 /* Compact all zones within a node */
1122 static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
1124 int zoneid;
1125 struct zone *zone;
1127 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
1129 zone = &pgdat->node_zones[zoneid];
1130 if (!populated_zone(zone))
1131 continue;
1133 cc->nr_freepages = 0;
1134 cc->nr_migratepages = 0;
1135 cc->zone = zone;
1136 INIT_LIST_HEAD(&cc->freepages);
1137 INIT_LIST_HEAD(&cc->migratepages);
1139 if (cc->order == -1 || !compaction_deferred(zone, cc->order))
1140 compact_zone(zone, cc);
1142 if (cc->order > 0) {
1143 int ok = zone_watermark_ok(zone, cc->order,
1144 low_wmark_pages(zone), 0, 0);
1145 if (ok && cc->order >= zone->compact_order_failed)
1146 zone->compact_order_failed = cc->order + 1;
1147 /* Currently async compaction is never deferred. */
1148 else if (!ok && cc->sync)
1149 defer_compaction(zone, cc->order);
1152 VM_BUG_ON(!list_empty(&cc->freepages));
1153 VM_BUG_ON(!list_empty(&cc->migratepages));
1157 void compact_pgdat(pg_data_t *pgdat, int order)
1159 struct compact_control cc = {
1160 .order = order,
1161 .sync = false,
1164 if (!order)
1165 return;
1167 __compact_pgdat(pgdat, &cc);
1170 static void compact_node(int nid)
1172 struct compact_control cc = {
1173 .order = -1,
1174 .sync = true,
1177 __compact_pgdat(NODE_DATA(nid), &cc);
1180 /* Compact all nodes in the system */
1181 static void compact_nodes(void)
1183 int nid;
1185 /* Flush pending updates to the LRU lists */
1186 lru_add_drain_all();
1188 for_each_online_node(nid)
1189 compact_node(nid);
1192 /* The written value is actually unused, all memory is compacted */
1193 int sysctl_compact_memory;
1195 /* This is the entry point for compacting all nodes via /proc/sys/vm */
1196 int sysctl_compaction_handler(struct ctl_table *table, int write,
1197 void __user *buffer, size_t *length, loff_t *ppos)
1199 if (write)
1200 compact_nodes();
1202 return 0;
1205 int sysctl_extfrag_handler(struct ctl_table *table, int write,
1206 void __user *buffer, size_t *length, loff_t *ppos)
1208 proc_dointvec_minmax(table, write, buffer, length, ppos);
1210 return 0;
1213 #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
1214 ssize_t sysfs_compact_node(struct device *dev,
1215 struct device_attribute *attr,
1216 const char *buf, size_t count)
1218 int nid = dev->id;
1220 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1221 /* Flush pending updates to the LRU lists */
1222 lru_add_drain_all();
1224 compact_node(nid);
1227 return count;
1229 static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
1231 int compaction_register_node(struct node *node)
1233 return device_create_file(&node->dev, &dev_attr_compact);
1236 void compaction_unregister_node(struct node *node)
1238 return device_remove_file(&node->dev, &dev_attr_compact);
1240 #endif /* CONFIG_SYSFS && CONFIG_NUMA */
1242 #endif /* CONFIG_COMPACTION */