2 * linux/mm/compaction.c
4 * Memory compaction for the reduction of external fragmentation. Note that
5 * this heavily depends upon page migration to do all the real heavy
8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
10 #include <linux/swap.h>
11 #include <linux/migrate.h>
12 #include <linux/compaction.h>
13 #include <linux/mm_inline.h>
14 #include <linux/backing-dev.h>
15 #include <linux/sysctl.h>
16 #include <linux/sysfs.h>
17 #include <linux/balloon_compaction.h>
20 #ifdef CONFIG_COMPACTION
21 static inline void count_compact_event(enum vm_event_item item
)
26 static inline void count_compact_events(enum vm_event_item item
, long delta
)
28 count_vm_events(item
, delta
);
31 #define count_compact_event(item) do { } while (0)
32 #define count_compact_events(item, delta) do { } while (0)
35 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/compaction.h>
40 static unsigned long release_freepages(struct list_head
*freelist
)
42 struct page
*page
, *next
;
43 unsigned long count
= 0;
45 list_for_each_entry_safe(page
, next
, freelist
, lru
) {
54 static void map_pages(struct list_head
*list
)
58 list_for_each_entry(page
, list
, lru
) {
59 arch_alloc_page(page
, 0);
60 kernel_map_pages(page
, 1, 1);
64 static inline bool migrate_async_suitable(int migratetype
)
66 return is_migrate_cma(migratetype
) || migratetype
== MIGRATE_MOVABLE
;
69 #ifdef CONFIG_COMPACTION
70 /* Returns true if the pageblock should be scanned for pages to isolate. */
71 static inline bool isolation_suitable(struct compact_control
*cc
,
74 if (cc
->ignore_skip_hint
)
77 return !get_pageblock_skip(page
);
81 * This function is called to clear all cached information on pageblocks that
82 * should be skipped for page isolation when the migrate and free page scanner
85 static void __reset_isolation_suitable(struct zone
*zone
)
87 unsigned long start_pfn
= zone
->zone_start_pfn
;
88 unsigned long end_pfn
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
91 zone
->compact_cached_migrate_pfn
= start_pfn
;
92 zone
->compact_cached_free_pfn
= end_pfn
;
93 zone
->compact_blockskip_flush
= false;
95 /* Walk the zone and mark every pageblock as suitable for isolation */
96 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= pageblock_nr_pages
) {
104 page
= pfn_to_page(pfn
);
105 if (zone
!= page_zone(page
))
108 clear_pageblock_skip(page
);
112 void reset_isolation_suitable(pg_data_t
*pgdat
)
116 for (zoneid
= 0; zoneid
< MAX_NR_ZONES
; zoneid
++) {
117 struct zone
*zone
= &pgdat
->node_zones
[zoneid
];
118 if (!populated_zone(zone
))
121 /* Only flush if a full compaction finished recently */
122 if (zone
->compact_blockskip_flush
)
123 __reset_isolation_suitable(zone
);
128 * If no pages were isolated then mark this pageblock to be skipped in the
129 * future. The information is later cleared by __reset_isolation_suitable().
131 static void update_pageblock_skip(struct compact_control
*cc
,
132 struct page
*page
, unsigned long nr_isolated
,
133 bool migrate_scanner
)
135 struct zone
*zone
= cc
->zone
;
140 unsigned long pfn
= page_to_pfn(page
);
141 set_pageblock_skip(page
);
143 /* Update where compaction should restart */
144 if (migrate_scanner
) {
145 if (!cc
->finished_update_migrate
&&
146 pfn
> zone
->compact_cached_migrate_pfn
)
147 zone
->compact_cached_migrate_pfn
= pfn
;
149 if (!cc
->finished_update_free
&&
150 pfn
< zone
->compact_cached_free_pfn
)
151 zone
->compact_cached_free_pfn
= pfn
;
156 static inline bool isolation_suitable(struct compact_control
*cc
,
162 static void update_pageblock_skip(struct compact_control
*cc
,
163 struct page
*page
, unsigned long nr_isolated
,
164 bool migrate_scanner
)
167 #endif /* CONFIG_COMPACTION */
169 static inline bool should_release_lock(spinlock_t
*lock
)
171 return need_resched() || spin_is_contended(lock
);
175 * Compaction requires the taking of some coarse locks that are potentially
176 * very heavily contended. Check if the process needs to be scheduled or
177 * if the lock is contended. For async compaction, back out in the event
178 * if contention is severe. For sync compaction, schedule.
180 * Returns true if the lock is held.
181 * Returns false if the lock is released and compaction should abort
183 static bool compact_checklock_irqsave(spinlock_t
*lock
, unsigned long *flags
,
184 bool locked
, struct compact_control
*cc
)
186 if (should_release_lock(lock
)) {
188 spin_unlock_irqrestore(lock
, *flags
);
192 /* async aborts if taking too long or contended */
194 cc
->contended
= true;
202 spin_lock_irqsave(lock
, *flags
);
206 static inline bool compact_trylock_irqsave(spinlock_t
*lock
,
207 unsigned long *flags
, struct compact_control
*cc
)
209 return compact_checklock_irqsave(lock
, flags
, false, cc
);
212 /* Returns true if the page is within a block suitable for migration to */
213 static bool suitable_migration_target(struct page
*page
)
215 int migratetype
= get_pageblock_migratetype(page
);
217 /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
218 if (migratetype
== MIGRATE_ISOLATE
|| migratetype
== MIGRATE_RESERVE
)
221 /* If the page is a large free page, then allow migration */
222 if (PageBuddy(page
) && page_order(page
) >= pageblock_order
)
225 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
226 if (migrate_async_suitable(migratetype
))
229 /* Otherwise skip the block */
234 * Isolate free pages onto a private freelist. Caller must hold zone->lock.
235 * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
236 * pages inside of the pageblock (even though it may still end up isolating
239 static unsigned long isolate_freepages_block(struct compact_control
*cc
,
240 unsigned long blockpfn
,
241 unsigned long end_pfn
,
242 struct list_head
*freelist
,
245 int nr_scanned
= 0, total_isolated
= 0;
246 struct page
*cursor
, *valid_page
= NULL
;
247 unsigned long nr_strict_required
= end_pfn
- blockpfn
;
251 cursor
= pfn_to_page(blockpfn
);
253 /* Isolate free pages. */
254 for (; blockpfn
< end_pfn
; blockpfn
++, cursor
++) {
256 struct page
*page
= cursor
;
259 if (!pfn_valid_within(blockpfn
))
263 if (!PageBuddy(page
))
267 * The zone lock must be held to isolate freepages.
268 * Unfortunately this is a very coarse lock and can be
269 * heavily contended if there are parallel allocations
270 * or parallel compactions. For async compaction do not
271 * spin on the lock and we acquire the lock as late as
274 locked
= compact_checklock_irqsave(&cc
->zone
->lock
, &flags
,
279 /* Recheck this is a suitable migration target under lock */
280 if (!strict
&& !suitable_migration_target(page
))
283 /* Recheck this is a buddy page under lock */
284 if (!PageBuddy(page
))
287 /* Found a free page, break it into order-0 pages */
288 isolated
= split_free_page(page
);
289 if (!isolated
&& strict
)
291 total_isolated
+= isolated
;
292 for (i
= 0; i
< isolated
; i
++) {
293 list_add(&page
->lru
, freelist
);
297 /* If a page was split, advance to the end of it */
299 blockpfn
+= isolated
- 1;
300 cursor
+= isolated
- 1;
304 trace_mm_compaction_isolate_freepages(nr_scanned
, total_isolated
);
307 * If strict isolation is requested by CMA then check that all the
308 * pages requested were isolated. If there were any failures, 0 is
309 * returned and CMA will fail.
311 if (strict
&& nr_strict_required
> total_isolated
)
315 spin_unlock_irqrestore(&cc
->zone
->lock
, flags
);
317 /* Update the pageblock-skip if the whole pageblock was scanned */
318 if (blockpfn
== end_pfn
)
319 update_pageblock_skip(cc
, valid_page
, total_isolated
, false);
321 count_compact_events(COMPACTFREE_SCANNED
, nr_scanned
);
323 count_compact_events(COMPACTISOLATED
, total_isolated
);
324 return total_isolated
;
328 * isolate_freepages_range() - isolate free pages.
329 * @start_pfn: The first PFN to start isolating.
330 * @end_pfn: The one-past-last PFN.
332 * Non-free pages, invalid PFNs, or zone boundaries within the
333 * [start_pfn, end_pfn) range are considered errors, cause function to
334 * undo its actions and return zero.
336 * Otherwise, function returns one-past-the-last PFN of isolated page
337 * (which may be greater then end_pfn if end fell in a middle of
341 isolate_freepages_range(struct compact_control
*cc
,
342 unsigned long start_pfn
, unsigned long end_pfn
)
344 unsigned long isolated
, pfn
, block_end_pfn
;
347 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= isolated
) {
348 if (!pfn_valid(pfn
) || cc
->zone
!= page_zone(pfn_to_page(pfn
)))
352 * On subsequent iterations ALIGN() is actually not needed,
353 * but we keep it that we not to complicate the code.
355 block_end_pfn
= ALIGN(pfn
+ 1, pageblock_nr_pages
);
356 block_end_pfn
= min(block_end_pfn
, end_pfn
);
358 isolated
= isolate_freepages_block(cc
, pfn
, block_end_pfn
,
362 * In strict mode, isolate_freepages_block() returns 0 if
363 * there are any holes in the block (ie. invalid PFNs or
370 * If we managed to isolate pages, it is always (1 << n) *
371 * pageblock_nr_pages for some non-negative n. (Max order
372 * page may span two pageblocks).
376 /* split_free_page does not map the pages */
377 map_pages(&freelist
);
380 /* Loop terminated early, cleanup. */
381 release_freepages(&freelist
);
385 /* We don't use freelists for anything. */
389 /* Update the number of anon and file isolated pages in the zone */
390 static void acct_isolated(struct zone
*zone
, bool locked
, struct compact_control
*cc
)
393 unsigned int count
[2] = { 0, };
395 list_for_each_entry(page
, &cc
->migratepages
, lru
)
396 count
[!!page_is_file_cache(page
)]++;
398 /* If locked we can use the interrupt unsafe versions */
400 __mod_zone_page_state(zone
, NR_ISOLATED_ANON
, count
[0]);
401 __mod_zone_page_state(zone
, NR_ISOLATED_FILE
, count
[1]);
403 mod_zone_page_state(zone
, NR_ISOLATED_ANON
, count
[0]);
404 mod_zone_page_state(zone
, NR_ISOLATED_FILE
, count
[1]);
408 /* Similar to reclaim, but different enough that they don't share logic */
409 static bool too_many_isolated(struct zone
*zone
)
411 unsigned long active
, inactive
, isolated
;
413 inactive
= zone_page_state(zone
, NR_INACTIVE_FILE
) +
414 zone_page_state(zone
, NR_INACTIVE_ANON
);
415 active
= zone_page_state(zone
, NR_ACTIVE_FILE
) +
416 zone_page_state(zone
, NR_ACTIVE_ANON
);
417 isolated
= zone_page_state(zone
, NR_ISOLATED_FILE
) +
418 zone_page_state(zone
, NR_ISOLATED_ANON
);
420 return isolated
> (inactive
+ active
) / 2;
424 * isolate_migratepages_range() - isolate all migrate-able pages in range.
425 * @zone: Zone pages are in.
426 * @cc: Compaction control structure.
427 * @low_pfn: The first PFN of the range.
428 * @end_pfn: The one-past-the-last PFN of the range.
429 * @unevictable: true if it allows to isolate unevictable pages
431 * Isolate all pages that can be migrated from the range specified by
432 * [low_pfn, end_pfn). Returns zero if there is a fatal signal
433 * pending), otherwise PFN of the first page that was not scanned
434 * (which may be both less, equal to or more then end_pfn).
436 * Assumes that cc->migratepages is empty and cc->nr_migratepages is
439 * Apart from cc->migratepages and cc->nr_migratetypes this function
440 * does not modify any cc's fields, in particular it does not modify
441 * (or read for that matter) cc->migrate_pfn.
444 isolate_migratepages_range(struct zone
*zone
, struct compact_control
*cc
,
445 unsigned long low_pfn
, unsigned long end_pfn
, bool unevictable
)
447 unsigned long last_pageblock_nr
= 0, pageblock_nr
;
448 unsigned long nr_scanned
= 0, nr_isolated
= 0;
449 struct list_head
*migratelist
= &cc
->migratepages
;
450 isolate_mode_t mode
= 0;
451 struct lruvec
*lruvec
;
454 struct page
*page
= NULL
, *valid_page
= NULL
;
457 * Ensure that there are not too many pages isolated from the LRU
458 * list by either parallel reclaimers or compaction. If there are,
459 * delay for some time until fewer pages are isolated
461 while (unlikely(too_many_isolated(zone
))) {
462 /* async migration should just abort */
466 congestion_wait(BLK_RW_ASYNC
, HZ
/10);
468 if (fatal_signal_pending(current
))
472 /* Time to isolate some pages for migration */
474 for (; low_pfn
< end_pfn
; low_pfn
++) {
475 /* give a chance to irqs before checking need_resched() */
476 if (locked
&& !((low_pfn
+1) % SWAP_CLUSTER_MAX
)) {
477 if (should_release_lock(&zone
->lru_lock
)) {
478 spin_unlock_irqrestore(&zone
->lru_lock
, flags
);
484 * migrate_pfn does not necessarily start aligned to a
485 * pageblock. Ensure that pfn_valid is called when moving
486 * into a new MAX_ORDER_NR_PAGES range in case of large
487 * memory holes within the zone
489 if ((low_pfn
& (MAX_ORDER_NR_PAGES
- 1)) == 0) {
490 if (!pfn_valid(low_pfn
)) {
491 low_pfn
+= MAX_ORDER_NR_PAGES
- 1;
496 if (!pfn_valid_within(low_pfn
))
501 * Get the page and ensure the page is within the same zone.
502 * See the comment in isolate_freepages about overlapping
503 * nodes. It is deliberate that the new zone lock is not taken
504 * as memory compaction should not move pages between nodes.
506 page
= pfn_to_page(low_pfn
);
507 if (page_zone(page
) != zone
)
513 /* If isolation recently failed, do not retry */
514 pageblock_nr
= low_pfn
>> pageblock_order
;
515 if (!isolation_suitable(cc
, page
))
523 * For async migration, also only scan in MOVABLE blocks. Async
524 * migration is optimistic to see if the minimum amount of work
525 * satisfies the allocation
527 if (!cc
->sync
&& last_pageblock_nr
!= pageblock_nr
&&
528 !migrate_async_suitable(get_pageblock_migratetype(page
))) {
529 cc
->finished_update_migrate
= true;
534 * Check may be lockless but that's ok as we recheck later.
535 * It's possible to migrate LRU pages and balloon pages
536 * Skip any other type of page
538 if (!PageLRU(page
)) {
539 if (unlikely(balloon_page_movable(page
))) {
540 if (locked
&& balloon_page_isolate(page
)) {
541 /* Successfully isolated */
542 cc
->finished_update_migrate
= true;
543 list_add(&page
->lru
, migratelist
);
544 cc
->nr_migratepages
++;
546 goto check_compact_cluster
;
553 * PageLRU is set. lru_lock normally excludes isolation
554 * splitting and collapsing (collapsing has already happened
555 * if PageLRU is set) but the lock is not necessarily taken
556 * here and it is wasteful to take it just to check transhuge.
557 * Check TransHuge without lock and skip the whole pageblock if
558 * it's either a transhuge or hugetlbfs page, as calling
559 * compound_order() without preventing THP from splitting the
560 * page underneath us may return surprising results.
562 if (PageTransHuge(page
)) {
565 low_pfn
+= (1 << compound_order(page
)) - 1;
569 /* Check if it is ok to still hold the lock */
570 locked
= compact_checklock_irqsave(&zone
->lru_lock
, &flags
,
572 if (!locked
|| fatal_signal_pending(current
))
575 /* Recheck PageLRU and PageTransHuge under lock */
578 if (PageTransHuge(page
)) {
579 low_pfn
+= (1 << compound_order(page
)) - 1;
584 mode
|= ISOLATE_ASYNC_MIGRATE
;
587 mode
|= ISOLATE_UNEVICTABLE
;
589 lruvec
= mem_cgroup_page_lruvec(page
, zone
);
591 /* Try isolate the page */
592 if (__isolate_lru_page(page
, mode
) != 0)
595 VM_BUG_ON(PageTransCompound(page
));
597 /* Successfully isolated */
598 cc
->finished_update_migrate
= true;
599 del_page_from_lru_list(page
, lruvec
, page_lru(page
));
600 list_add(&page
->lru
, migratelist
);
601 cc
->nr_migratepages
++;
604 check_compact_cluster
:
605 /* Avoid isolating too much */
606 if (cc
->nr_migratepages
== COMPACT_CLUSTER_MAX
) {
614 low_pfn
+= pageblock_nr_pages
;
615 low_pfn
= ALIGN(low_pfn
, pageblock_nr_pages
) - 1;
616 last_pageblock_nr
= pageblock_nr
;
619 acct_isolated(zone
, locked
, cc
);
622 spin_unlock_irqrestore(&zone
->lru_lock
, flags
);
624 /* Update the pageblock-skip if the whole pageblock was scanned */
625 if (low_pfn
== end_pfn
)
626 update_pageblock_skip(cc
, valid_page
, nr_isolated
, true);
628 trace_mm_compaction_isolate_migratepages(nr_scanned
, nr_isolated
);
630 count_compact_events(COMPACTMIGRATE_SCANNED
, nr_scanned
);
632 count_compact_events(COMPACTISOLATED
, nr_isolated
);
637 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
638 #ifdef CONFIG_COMPACTION
640 * Based on information in the current compact_control, find blocks
641 * suitable for isolating free pages from and then isolate them.
643 static void isolate_freepages(struct zone
*zone
,
644 struct compact_control
*cc
)
647 unsigned long high_pfn
, low_pfn
, pfn
, zone_end_pfn
, end_pfn
;
648 int nr_freepages
= cc
->nr_freepages
;
649 struct list_head
*freelist
= &cc
->freepages
;
652 * Initialise the free scanner. The starting point is where we last
653 * scanned from (or the end of the zone if starting). The low point
654 * is the end of the pageblock the migration scanner is using.
657 low_pfn
= cc
->migrate_pfn
+ pageblock_nr_pages
;
660 * Take care that if the migration scanner is at the end of the zone
661 * that the free scanner does not accidentally move to the next zone
662 * in the next isolation cycle.
664 high_pfn
= min(low_pfn
, pfn
);
666 zone_end_pfn
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
669 * Isolate free pages until enough are available to migrate the
670 * pages on cc->migratepages. We stop searching if the migrate
671 * and free page scanners meet or enough free pages are isolated.
673 for (; pfn
> low_pfn
&& cc
->nr_migratepages
> nr_freepages
;
674 pfn
-= pageblock_nr_pages
) {
675 unsigned long isolated
;
681 * Check for overlapping nodes/zones. It's possible on some
682 * configurations to have a setup like
684 * i.e. it's possible that all pages within a zones range of
685 * pages do not belong to a single zone.
687 page
= pfn_to_page(pfn
);
688 if (page_zone(page
) != zone
)
691 /* Check the block is suitable for migration */
692 if (!suitable_migration_target(page
))
695 /* If isolation recently failed, do not retry */
696 if (!isolation_suitable(cc
, page
))
699 /* Found a block suitable for isolating free pages from */
703 * As pfn may not start aligned, pfn+pageblock_nr_page
704 * may cross a MAX_ORDER_NR_PAGES boundary and miss
705 * a pfn_valid check. Ensure isolate_freepages_block()
706 * only scans within a pageblock
708 end_pfn
= ALIGN(pfn
+ 1, pageblock_nr_pages
);
709 end_pfn
= min(end_pfn
, zone_end_pfn
);
710 isolated
= isolate_freepages_block(cc
, pfn
, end_pfn
,
712 nr_freepages
+= isolated
;
715 * Record the highest PFN we isolated pages from. When next
716 * looking for free pages, the search will restart here as
717 * page migration may have returned some pages to the allocator
720 cc
->finished_update_free
= true;
721 high_pfn
= max(high_pfn
, pfn
);
725 /* split_free_page does not map the pages */
728 cc
->free_pfn
= high_pfn
;
729 cc
->nr_freepages
= nr_freepages
;
733 * This is a migrate-callback that "allocates" freepages by taking pages
734 * from the isolated freelists in the block we are migrating to.
736 static struct page
*compaction_alloc(struct page
*migratepage
,
740 struct compact_control
*cc
= (struct compact_control
*)data
;
741 struct page
*freepage
;
743 /* Isolate free pages if necessary */
744 if (list_empty(&cc
->freepages
)) {
745 isolate_freepages(cc
->zone
, cc
);
747 if (list_empty(&cc
->freepages
))
751 freepage
= list_entry(cc
->freepages
.next
, struct page
, lru
);
752 list_del(&freepage
->lru
);
759 * We cannot control nr_migratepages and nr_freepages fully when migration is
760 * running as migrate_pages() has no knowledge of compact_control. When
761 * migration is complete, we count the number of pages on the lists by hand.
763 static void update_nr_listpages(struct compact_control
*cc
)
765 int nr_migratepages
= 0;
766 int nr_freepages
= 0;
769 list_for_each_entry(page
, &cc
->migratepages
, lru
)
771 list_for_each_entry(page
, &cc
->freepages
, lru
)
774 cc
->nr_migratepages
= nr_migratepages
;
775 cc
->nr_freepages
= nr_freepages
;
778 /* possible outcome of isolate_migratepages */
780 ISOLATE_ABORT
, /* Abort compaction now */
781 ISOLATE_NONE
, /* No pages isolated, continue scanning */
782 ISOLATE_SUCCESS
, /* Pages isolated, migrate */
786 * Isolate all pages that can be migrated from the block pointed to by
787 * the migrate scanner within compact_control.
789 static isolate_migrate_t
isolate_migratepages(struct zone
*zone
,
790 struct compact_control
*cc
)
792 unsigned long low_pfn
, end_pfn
;
794 /* Do not scan outside zone boundaries */
795 low_pfn
= max(cc
->migrate_pfn
, zone
->zone_start_pfn
);
797 /* Only scan within a pageblock boundary */
798 end_pfn
= ALIGN(low_pfn
+ pageblock_nr_pages
, pageblock_nr_pages
);
800 /* Do not cross the free scanner or scan within a memory hole */
801 if (end_pfn
> cc
->free_pfn
|| !pfn_valid(low_pfn
)) {
802 cc
->migrate_pfn
= end_pfn
;
806 /* Perform the isolation */
807 low_pfn
= isolate_migratepages_range(zone
, cc
, low_pfn
, end_pfn
, false);
808 if (!low_pfn
|| cc
->contended
)
809 return ISOLATE_ABORT
;
811 cc
->migrate_pfn
= low_pfn
;
813 return ISOLATE_SUCCESS
;
816 static int compact_finished(struct zone
*zone
,
817 struct compact_control
*cc
)
819 unsigned long watermark
;
821 if (fatal_signal_pending(current
))
822 return COMPACT_PARTIAL
;
824 /* Compaction run completes if the migrate and free scanner meet */
825 if (cc
->free_pfn
<= cc
->migrate_pfn
) {
827 * Mark that the PG_migrate_skip information should be cleared
828 * by kswapd when it goes to sleep. kswapd does not set the
829 * flag itself as the decision to be clear should be directly
830 * based on an allocation request.
832 if (!current_is_kswapd())
833 zone
->compact_blockskip_flush
= true;
835 return COMPACT_COMPLETE
;
839 * order == -1 is expected when compacting via
840 * /proc/sys/vm/compact_memory
843 return COMPACT_CONTINUE
;
845 /* Compaction run is not finished if the watermark is not met */
846 watermark
= low_wmark_pages(zone
);
847 watermark
+= (1 << cc
->order
);
849 if (!zone_watermark_ok(zone
, cc
->order
, watermark
, 0, 0))
850 return COMPACT_CONTINUE
;
852 /* Direct compactor: Is a suitable page free? */
854 /* Was a suitable page captured? */
856 return COMPACT_PARTIAL
;
859 for (order
= cc
->order
; order
< MAX_ORDER
; order
++) {
860 struct free_area
*area
= &zone
->free_area
[cc
->order
];
861 /* Job done if page is free of the right migratetype */
862 if (!list_empty(&area
->free_list
[cc
->migratetype
]))
863 return COMPACT_PARTIAL
;
865 /* Job done if allocation would set block type */
866 if (cc
->order
>= pageblock_order
&& area
->nr_free
)
867 return COMPACT_PARTIAL
;
871 return COMPACT_CONTINUE
;
875 * compaction_suitable: Is this suitable to run compaction on this zone now?
877 * COMPACT_SKIPPED - If there are too few free pages for compaction
878 * COMPACT_PARTIAL - If the allocation would succeed without compaction
879 * COMPACT_CONTINUE - If compaction should run now
881 unsigned long compaction_suitable(struct zone
*zone
, int order
)
884 unsigned long watermark
;
887 * order == -1 is expected when compacting via
888 * /proc/sys/vm/compact_memory
891 return COMPACT_CONTINUE
;
894 * Watermarks for order-0 must be met for compaction. Note the 2UL.
895 * This is because during migration, copies of pages need to be
896 * allocated and for a short time, the footprint is higher
898 watermark
= low_wmark_pages(zone
) + (2UL << order
);
899 if (!zone_watermark_ok(zone
, 0, watermark
, 0, 0))
900 return COMPACT_SKIPPED
;
903 * fragmentation index determines if allocation failures are due to
904 * low memory or external fragmentation
906 * index of -1000 implies allocations might succeed depending on
908 * index towards 0 implies failure is due to lack of memory
909 * index towards 1000 implies failure is due to fragmentation
911 * Only compact if a failure would be due to fragmentation.
913 fragindex
= fragmentation_index(zone
, order
);
914 if (fragindex
>= 0 && fragindex
<= sysctl_extfrag_threshold
)
915 return COMPACT_SKIPPED
;
917 if (fragindex
== -1000 && zone_watermark_ok(zone
, order
, watermark
,
919 return COMPACT_PARTIAL
;
921 return COMPACT_CONTINUE
;
924 static void compact_capture_page(struct compact_control
*cc
)
927 int mtype
, mtype_low
, mtype_high
;
929 if (!cc
->page
|| *cc
->page
)
933 * For MIGRATE_MOVABLE allocations we capture a suitable page ASAP
934 * regardless of the migratetype of the freelist is is captured from.
935 * This is fine because the order for a high-order MIGRATE_MOVABLE
936 * allocation is typically at least a pageblock size and overall
937 * fragmentation is not impaired. Other allocation types must
938 * capture pages from their own migratelist because otherwise they
939 * could pollute other pageblocks like MIGRATE_MOVABLE with
940 * difficult to move pages and making fragmentation worse overall.
942 if (cc
->migratetype
== MIGRATE_MOVABLE
) {
944 mtype_high
= MIGRATE_PCPTYPES
;
946 mtype_low
= cc
->migratetype
;
947 mtype_high
= cc
->migratetype
+ 1;
950 /* Speculatively examine the free lists without zone lock */
951 for (mtype
= mtype_low
; mtype
< mtype_high
; mtype
++) {
953 for (order
= cc
->order
; order
< MAX_ORDER
; order
++) {
955 struct free_area
*area
;
956 area
= &(cc
->zone
->free_area
[order
]);
957 if (list_empty(&area
->free_list
[mtype
]))
960 /* Take the lock and attempt capture of the page */
961 if (!compact_trylock_irqsave(&cc
->zone
->lock
, &flags
, cc
))
963 if (!list_empty(&area
->free_list
[mtype
])) {
964 page
= list_entry(area
->free_list
[mtype
].next
,
966 if (capture_free_page(page
, cc
->order
, mtype
)) {
967 spin_unlock_irqrestore(&cc
->zone
->lock
,
973 spin_unlock_irqrestore(&cc
->zone
->lock
, flags
);
978 static int compact_zone(struct zone
*zone
, struct compact_control
*cc
)
981 unsigned long start_pfn
= zone
->zone_start_pfn
;
982 unsigned long end_pfn
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
984 ret
= compaction_suitable(zone
, cc
->order
);
986 case COMPACT_PARTIAL
:
987 case COMPACT_SKIPPED
:
988 /* Compaction is likely to fail */
990 case COMPACT_CONTINUE
:
991 /* Fall through to compaction */
996 * Setup to move all movable pages to the end of the zone. Used cached
997 * information on where the scanners should start but check that it
998 * is initialised by ensuring the values are within zone boundaries.
1000 cc
->migrate_pfn
= zone
->compact_cached_migrate_pfn
;
1001 cc
->free_pfn
= zone
->compact_cached_free_pfn
;
1002 if (cc
->free_pfn
< start_pfn
|| cc
->free_pfn
> end_pfn
) {
1003 cc
->free_pfn
= end_pfn
& ~(pageblock_nr_pages
-1);
1004 zone
->compact_cached_free_pfn
= cc
->free_pfn
;
1006 if (cc
->migrate_pfn
< start_pfn
|| cc
->migrate_pfn
> end_pfn
) {
1007 cc
->migrate_pfn
= start_pfn
;
1008 zone
->compact_cached_migrate_pfn
= cc
->migrate_pfn
;
1012 * Clear pageblock skip if there were failures recently and compaction
1013 * is about to be retried after being deferred. kswapd does not do
1014 * this reset as it'll reset the cached information when going to sleep.
1016 if (compaction_restarting(zone
, cc
->order
) && !current_is_kswapd())
1017 __reset_isolation_suitable(zone
);
1019 migrate_prep_local();
1021 while ((ret
= compact_finished(zone
, cc
)) == COMPACT_CONTINUE
) {
1022 unsigned long nr_migrate
, nr_remaining
;
1025 switch (isolate_migratepages(zone
, cc
)) {
1027 ret
= COMPACT_PARTIAL
;
1028 putback_movable_pages(&cc
->migratepages
);
1029 cc
->nr_migratepages
= 0;
1033 case ISOLATE_SUCCESS
:
1037 nr_migrate
= cc
->nr_migratepages
;
1038 err
= migrate_pages(&cc
->migratepages
, compaction_alloc
,
1039 (unsigned long)cc
, false,
1040 cc
->sync
? MIGRATE_SYNC_LIGHT
: MIGRATE_ASYNC
,
1042 update_nr_listpages(cc
);
1043 nr_remaining
= cc
->nr_migratepages
;
1045 trace_mm_compaction_migratepages(nr_migrate
- nr_remaining
,
1048 /* Release isolated pages not migrated */
1050 putback_movable_pages(&cc
->migratepages
);
1051 cc
->nr_migratepages
= 0;
1052 if (err
== -ENOMEM
) {
1053 ret
= COMPACT_PARTIAL
;
1058 /* Capture a page now if it is a suitable size */
1059 compact_capture_page(cc
);
1063 /* Release free pages and check accounting */
1064 cc
->nr_freepages
-= release_freepages(&cc
->freepages
);
1065 VM_BUG_ON(cc
->nr_freepages
!= 0);
1070 static unsigned long compact_zone_order(struct zone
*zone
,
1071 int order
, gfp_t gfp_mask
,
1072 bool sync
, bool *contended
,
1076 struct compact_control cc
= {
1078 .nr_migratepages
= 0,
1080 .migratetype
= allocflags_to_migratetype(gfp_mask
),
1085 INIT_LIST_HEAD(&cc
.freepages
);
1086 INIT_LIST_HEAD(&cc
.migratepages
);
1088 ret
= compact_zone(zone
, &cc
);
1090 VM_BUG_ON(!list_empty(&cc
.freepages
));
1091 VM_BUG_ON(!list_empty(&cc
.migratepages
));
1093 *contended
= cc
.contended
;
1097 int sysctl_extfrag_threshold
= 500;
1100 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
1101 * @zonelist: The zonelist used for the current allocation
1102 * @order: The order of the current allocation
1103 * @gfp_mask: The GFP mask of the current allocation
1104 * @nodemask: The allowed nodes to allocate from
1105 * @sync: Whether migration is synchronous or not
1106 * @contended: Return value that is true if compaction was aborted due to lock contention
1107 * @page: Optionally capture a free page of the requested order during compaction
1109 * This is the main entry point for direct page compaction.
1111 unsigned long try_to_compact_pages(struct zonelist
*zonelist
,
1112 int order
, gfp_t gfp_mask
, nodemask_t
*nodemask
,
1113 bool sync
, bool *contended
, struct page
**page
)
1115 enum zone_type high_zoneidx
= gfp_zone(gfp_mask
);
1116 int may_enter_fs
= gfp_mask
& __GFP_FS
;
1117 int may_perform_io
= gfp_mask
& __GFP_IO
;
1120 int rc
= COMPACT_SKIPPED
;
1121 int alloc_flags
= 0;
1123 /* Check if the GFP flags allow compaction */
1124 if (!order
|| !may_enter_fs
|| !may_perform_io
)
1127 count_compact_event(COMPACTSTALL
);
1130 if (allocflags_to_migratetype(gfp_mask
) == MIGRATE_MOVABLE
)
1131 alloc_flags
|= ALLOC_CMA
;
1133 /* Compact each zone in the list */
1134 for_each_zone_zonelist_nodemask(zone
, z
, zonelist
, high_zoneidx
,
1138 status
= compact_zone_order(zone
, order
, gfp_mask
, sync
,
1140 rc
= max(status
, rc
);
1142 /* If a normal allocation would succeed, stop compacting */
1143 if (zone_watermark_ok(zone
, order
, low_wmark_pages(zone
), 0,
1152 /* Compact all zones within a node */
1153 static int __compact_pgdat(pg_data_t
*pgdat
, struct compact_control
*cc
)
1158 for (zoneid
= 0; zoneid
< MAX_NR_ZONES
; zoneid
++) {
1160 zone
= &pgdat
->node_zones
[zoneid
];
1161 if (!populated_zone(zone
))
1164 cc
->nr_freepages
= 0;
1165 cc
->nr_migratepages
= 0;
1167 INIT_LIST_HEAD(&cc
->freepages
);
1168 INIT_LIST_HEAD(&cc
->migratepages
);
1170 if (cc
->order
== -1 || !compaction_deferred(zone
, cc
->order
))
1171 compact_zone(zone
, cc
);
1173 if (cc
->order
> 0) {
1174 int ok
= zone_watermark_ok(zone
, cc
->order
,
1175 low_wmark_pages(zone
), 0, 0);
1176 if (ok
&& cc
->order
>= zone
->compact_order_failed
)
1177 zone
->compact_order_failed
= cc
->order
+ 1;
1178 /* Currently async compaction is never deferred. */
1179 else if (!ok
&& cc
->sync
)
1180 defer_compaction(zone
, cc
->order
);
1183 VM_BUG_ON(!list_empty(&cc
->freepages
));
1184 VM_BUG_ON(!list_empty(&cc
->migratepages
));
1190 int compact_pgdat(pg_data_t
*pgdat
, int order
)
1192 struct compact_control cc
= {
1198 return __compact_pgdat(pgdat
, &cc
);
1201 static int compact_node(int nid
)
1203 struct compact_control cc
= {
1209 return __compact_pgdat(NODE_DATA(nid
), &cc
);
1212 /* Compact all nodes in the system */
1213 static int compact_nodes(void)
1217 /* Flush pending updates to the LRU lists */
1218 lru_add_drain_all();
1220 for_each_online_node(nid
)
1223 return COMPACT_COMPLETE
;
1226 /* The written value is actually unused, all memory is compacted */
1227 int sysctl_compact_memory
;
1229 /* This is the entry point for compacting all nodes via /proc/sys/vm */
1230 int sysctl_compaction_handler(struct ctl_table
*table
, int write
,
1231 void __user
*buffer
, size_t *length
, loff_t
*ppos
)
1234 return compact_nodes();
1239 int sysctl_extfrag_handler(struct ctl_table
*table
, int write
,
1240 void __user
*buffer
, size_t *length
, loff_t
*ppos
)
1242 proc_dointvec_minmax(table
, write
, buffer
, length
, ppos
);
1247 #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
1248 ssize_t
sysfs_compact_node(struct device
*dev
,
1249 struct device_attribute
*attr
,
1250 const char *buf
, size_t count
)
1254 if (nid
>= 0 && nid
< nr_node_ids
&& node_online(nid
)) {
1255 /* Flush pending updates to the LRU lists */
1256 lru_add_drain_all();
1263 static DEVICE_ATTR(compact
, S_IWUSR
, NULL
, sysfs_compact_node
);
1265 int compaction_register_node(struct node
*node
)
1267 return device_create_file(&node
->dev
, &dev_attr_compact
);
1270 void compaction_unregister_node(struct node
*node
)
1272 return device_remove_file(&node
->dev
, &dev_attr_compact
);
1274 #endif /* CONFIG_SYSFS && CONFIG_NUMA */
1276 #endif /* CONFIG_COMPACTION */