1 // SPDX-License-Identifier: GPL-2.0
3 * linux/mm/page_isolation.c
7 #include <linux/page-isolation.h>
8 #include <linux/pageblock-flags.h>
9 #include <linux/memory.h>
10 #include <linux/hugetlb.h>
11 #include <linux/page_owner.h>
12 #include <linux/migrate.h>
15 #define CREATE_TRACE_POINTS
16 #include <trace/events/page_isolation.h>
19 * This function checks whether the range [start_pfn, end_pfn) includes
20 * unmovable pages or not. The range must fall into a single pageblock and
21 * consequently belong to a single zone.
23 * PageLRU check without isolation or lru_lock could race so that
24 * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
25 * check without lock_page also may miss some movable non-lru pages at
26 * race condition. So you can't expect this function should be exact.
28 * Returns a page without holding a reference. If the caller wants to
29 * dereference that page (e.g., dumping), it has to make sure that it
30 * cannot get removed (e.g., via memory unplug) concurrently.
33 static struct page
*has_unmovable_pages(unsigned long start_pfn
, unsigned long end_pfn
,
34 int migratetype
, int flags
)
36 struct page
*page
= pfn_to_page(start_pfn
);
37 struct zone
*zone
= page_zone(page
);
40 VM_BUG_ON(pageblock_start_pfn(start_pfn
) !=
41 pageblock_start_pfn(end_pfn
- 1));
43 if (is_migrate_cma_page(page
)) {
45 * CMA allocations (alloc_contig_range) really need to mark
46 * isolate CMA pageblocks even when they are not movable in fact
47 * so consider them movable here.
49 if (is_migrate_cma(migratetype
))
55 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
++) {
56 page
= pfn_to_page(pfn
);
59 * Both, bootmem allocations and memory holes are marked
60 * PG_reserved and are unmovable. We can even have unmovable
61 * allocations inside ZONE_MOVABLE, for example when
62 * specifying "movablecore".
64 if (PageReserved(page
))
68 * If the zone is movable and we have ruled out all reserved
69 * pages then it should be reasonably safe to assume the rest
72 if (zone_idx(zone
) == ZONE_MOVABLE
)
76 * Hugepages are not in LRU lists, but they're movable.
77 * THPs are on the LRU, but need to be counted as #small pages.
78 * We need not scan over tail pages because we don't
79 * handle each tail page individually in migration.
81 if (PageHuge(page
) || PageTransCompound(page
)) {
82 struct folio
*folio
= page_folio(page
);
83 unsigned int skip_pages
;
86 if (!hugepage_migration_supported(folio_hstate(folio
)))
88 } else if (!folio_test_lru(folio
) && !__folio_test_movable(folio
)) {
92 skip_pages
= folio_nr_pages(folio
) - folio_page_idx(folio
, page
);
93 pfn
+= skip_pages
- 1;
98 * We can't use page_count without pin a page
99 * because another CPU can free compound page.
100 * This check already skips compound tails of THP
101 * because their page->_refcount is zero at all time.
103 if (!page_ref_count(page
)) {
105 pfn
+= (1 << buddy_order(page
)) - 1;
110 * The HWPoisoned page may be not in buddy system, and
111 * page_count() is not 0.
113 if ((flags
& MEMORY_OFFLINE
) && PageHWPoison(page
))
117 * We treat all PageOffline() pages as movable when offlining
118 * to give drivers a chance to decrement their reference count
119 * in MEM_GOING_OFFLINE in order to indicate that these pages
120 * can be offlined as there are no direct references anymore.
121 * For actually unmovable PageOffline() where the driver does
122 * not support this, we will fail later when trying to actually
123 * move these pages that still have a reference count > 0.
124 * (false negatives in this function only)
126 if ((flags
& MEMORY_OFFLINE
) && PageOffline(page
))
129 if (__PageMovable(page
) || PageLRU(page
))
133 * If there are RECLAIMABLE pages, we need to check
134 * it. But now, memory offline itself doesn't call
135 * shrink_node_slabs() and it still to be fixed.
143 * This function set pageblock migratetype to isolate if no unmovable page is
144 * present in [start_pfn, end_pfn). The pageblock must intersect with
145 * [start_pfn, end_pfn).
147 static int set_migratetype_isolate(struct page
*page
, int migratetype
, int isol_flags
,
148 unsigned long start_pfn
, unsigned long end_pfn
)
150 struct zone
*zone
= page_zone(page
);
151 struct page
*unmovable
;
153 unsigned long check_unmovable_start
, check_unmovable_end
;
155 if (PageUnaccepted(page
))
158 spin_lock_irqsave(&zone
->lock
, flags
);
161 * We assume the caller intended to SET migrate type to isolate.
162 * If it is already set, then someone else must have raced and
165 if (is_migrate_isolate_page(page
)) {
166 spin_unlock_irqrestore(&zone
->lock
, flags
);
171 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
172 * We just check MOVABLE pages.
174 * Pass the intersection of [start_pfn, end_pfn) and the page's pageblock
175 * to avoid redundant checks.
177 check_unmovable_start
= max(page_to_pfn(page
), start_pfn
);
178 check_unmovable_end
= min(pageblock_end_pfn(page_to_pfn(page
)),
181 unmovable
= has_unmovable_pages(check_unmovable_start
, check_unmovable_end
,
182 migratetype
, isol_flags
);
184 if (!move_freepages_block_isolate(zone
, page
, MIGRATE_ISOLATE
)) {
185 spin_unlock_irqrestore(&zone
->lock
, flags
);
188 zone
->nr_isolate_pageblock
++;
189 spin_unlock_irqrestore(&zone
->lock
, flags
);
193 spin_unlock_irqrestore(&zone
->lock
, flags
);
194 if (isol_flags
& REPORT_FAILURE
) {
196 * printk() with zone->lock held will likely trigger a
197 * lockdep splat, so defer it here.
199 dump_page(unmovable
, "unmovable page");
205 static void unset_migratetype_isolate(struct page
*page
, int migratetype
)
209 bool isolated_page
= false;
213 zone
= page_zone(page
);
214 spin_lock_irqsave(&zone
->lock
, flags
);
215 if (!is_migrate_isolate_page(page
))
219 * Because freepage with more than pageblock_order on isolated
220 * pageblock is restricted to merge due to freepage counting problem,
221 * it is possible that there is free buddy page.
222 * move_freepages_block() doesn't care of merge so we need other
223 * approach in order to merge them. Isolation and free will make
224 * these pages to be merged.
226 if (PageBuddy(page
)) {
227 order
= buddy_order(page
);
228 if (order
>= pageblock_order
&& order
< MAX_PAGE_ORDER
) {
229 buddy
= find_buddy_page_pfn(page
, page_to_pfn(page
),
231 if (buddy
&& !is_migrate_isolate_page(buddy
)) {
232 isolated_page
= !!__isolate_free_page(page
, order
);
234 * Isolating a free page in an isolated pageblock
235 * is expected to always work as watermarks don't
238 VM_WARN_ON(!isolated_page
);
244 * If we isolate freepage with more than pageblock_order, there
245 * should be no freepage in the range, so we could avoid costly
246 * pageblock scanning for freepage moving.
248 * We didn't actually touch any of the isolated pages, so place them
249 * to the tail of the freelist. This is an optimization for memory
250 * onlining - just onlined memory won't immediately be considered for
253 if (!isolated_page
) {
255 * Isolating this block already succeeded, so this
256 * should not fail on zone boundaries.
258 WARN_ON_ONCE(!move_freepages_block_isolate(zone
, page
, migratetype
));
260 set_pageblock_migratetype(page
, migratetype
);
261 __putback_isolated_page(page
, order
, migratetype
);
263 zone
->nr_isolate_pageblock
--;
265 spin_unlock_irqrestore(&zone
->lock
, flags
);
268 static inline struct page
*
269 __first_valid_page(unsigned long pfn
, unsigned long nr_pages
)
273 for (i
= 0; i
< nr_pages
; i
++) {
276 page
= pfn_to_online_page(pfn
+ i
);
285 * isolate_single_pageblock() -- tries to isolate a pageblock that might be
286 * within a free or in-use page.
287 * @boundary_pfn: pageblock-aligned pfn that a page might cross
288 * @flags: isolation flags
289 * @gfp_flags: GFP flags used for migrating pages
290 * @isolate_before: isolate the pageblock before the boundary_pfn
291 * @skip_isolation: the flag to skip the pageblock isolation in second
292 * isolate_single_pageblock()
293 * @migratetype: migrate type to set in error recovery.
295 * Free and in-use pages can be as big as MAX_PAGE_ORDER and contain more than one
296 * pageblock. When not all pageblocks within a page are isolated at the same
297 * time, free page accounting can go wrong. For example, in the case of
298 * MAX_PAGE_ORDER = pageblock_order + 1, a MAX_PAGE_ORDER page has two
301 * [ pageblock0 | pageblock1 ]
302 * When either pageblock is isolated, if it is a free page, the page is not
303 * split into separate migratetype lists, which is supposed to; if it is an
304 * in-use page and freed later, __free_one_page() does not split the free page
305 * either. The function handles this by splitting the free page or migrating
306 * the in-use page then splitting the free page.
308 static int isolate_single_pageblock(unsigned long boundary_pfn
, int flags
,
309 gfp_t gfp_flags
, bool isolate_before
, bool skip_isolation
,
312 unsigned long start_pfn
;
313 unsigned long isolate_pageblock
;
318 VM_BUG_ON(!pageblock_aligned(boundary_pfn
));
321 isolate_pageblock
= boundary_pfn
- pageblock_nr_pages
;
323 isolate_pageblock
= boundary_pfn
;
326 * scan at the beginning of MAX_ORDER_NR_PAGES aligned range to avoid
327 * only isolating a subset of pageblocks from a bigger than pageblock
328 * free or in-use page. Also make sure all to-be-isolated pageblocks
329 * are within the same zone.
331 zone
= page_zone(pfn_to_page(isolate_pageblock
));
332 start_pfn
= max(ALIGN_DOWN(isolate_pageblock
, MAX_ORDER_NR_PAGES
),
333 zone
->zone_start_pfn
);
335 if (skip_isolation
) {
336 int mt __maybe_unused
= get_pageblock_migratetype(pfn_to_page(isolate_pageblock
));
338 VM_BUG_ON(!is_migrate_isolate(mt
));
340 ret
= set_migratetype_isolate(pfn_to_page(isolate_pageblock
), migratetype
,
341 flags
, isolate_pageblock
, isolate_pageblock
+ pageblock_nr_pages
);
348 * Bail out early when the to-be-isolated pageblock does not form
349 * a free or in-use page across boundary_pfn:
351 * 1. isolate before boundary_pfn: the page after is not online
352 * 2. isolate after boundary_pfn: the page before is not online
354 * This also ensures correctness. Without it, when isolate after
355 * boundary_pfn and [start_pfn, boundary_pfn) are not online,
356 * __first_valid_page() will return unexpected NULL in the for loop
359 if (isolate_before
) {
360 if (!pfn_to_online_page(boundary_pfn
))
363 if (!pfn_to_online_page(boundary_pfn
- 1))
367 for (pfn
= start_pfn
; pfn
< boundary_pfn
;) {
368 struct page
*page
= __first_valid_page(pfn
, boundary_pfn
- pfn
);
371 pfn
= page_to_pfn(page
);
373 if (PageUnaccepted(page
)) {
374 pfn
+= MAX_ORDER_NR_PAGES
;
378 if (PageBuddy(page
)) {
379 int order
= buddy_order(page
);
381 /* move_freepages_block_isolate() handled this */
382 VM_WARN_ON_ONCE(pfn
+ (1 << order
) > boundary_pfn
);
389 * If a compound page is straddling our block, attempt
390 * to migrate it out of the way.
392 * We don't have to worry about this creating a large
393 * free page that straddles into our block: gigantic
394 * pages are freed as order-0 chunks, and LRU pages
395 * (currently) do not exceed pageblock_order.
397 * The block of interest has already been marked
398 * MIGRATE_ISOLATE above, so when migration is done it
399 * will free its pages onto the correct freelists.
401 if (PageCompound(page
)) {
402 struct page
*head
= compound_head(page
);
403 unsigned long head_pfn
= page_to_pfn(head
);
404 unsigned long nr_pages
= compound_nr(head
);
406 if (head_pfn
+ nr_pages
<= boundary_pfn
||
408 pfn
= head_pfn
+ nr_pages
;
413 * These pages are movable too, but they're
414 * not expected to exceed pageblock_order.
416 * Let us know when they do, so we can add
417 * proper free and split handling for them.
419 VM_WARN_ON_ONCE_PAGE(PageLRU(page
), page
);
420 VM_WARN_ON_ONCE_PAGE(__PageMovable(page
), page
);
429 /* restore the original migratetype */
431 unset_migratetype_isolate(pfn_to_page(isolate_pageblock
), migratetype
);
436 * start_isolate_page_range() - mark page range MIGRATE_ISOLATE
437 * @start_pfn: The first PFN of the range to be isolated.
438 * @end_pfn: The last PFN of the range to be isolated.
439 * @migratetype: Migrate type to set in error recovery.
440 * @flags: The following flags are allowed (they can be combined in
442 * MEMORY_OFFLINE - isolate to offline (!allocate) memory
443 * e.g., skip over PageHWPoison() pages
444 * and PageOffline() pages.
445 * REPORT_FAILURE - report details about the failure to
447 * @gfp_flags: GFP flags used for migrating pages that sit across the
450 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
451 * the range will never be allocated. Any free pages and pages freed in the
452 * future will not be allocated again. If specified range includes migrate types
453 * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
454 * pages in the range finally, the caller have to free all pages in the range.
455 * test_page_isolated() can be used for test it.
457 * The function first tries to isolate the pageblocks at the beginning and end
458 * of the range, since there might be pages across the range boundaries.
459 * Afterwards, it isolates the rest of the range.
461 * There is no high level synchronization mechanism that prevents two threads
462 * from trying to isolate overlapping ranges. If this happens, one thread
463 * will notice pageblocks in the overlapping range already set to isolate.
464 * This happens in set_migratetype_isolate, and set_migratetype_isolate
465 * returns an error. We then clean up by restoring the migration type on
466 * pageblocks we may have modified and return -EBUSY to caller. This
467 * prevents two threads from simultaneously working on overlapping ranges.
469 * Please note that there is no strong synchronization with the page allocator
470 * either. Pages might be freed while their page blocks are marked ISOLATED.
471 * A call to drain_all_pages() after isolation can flush most of them. However
472 * in some cases pages might still end up on pcp lists and that would allow
473 * for their allocation even when they are in fact isolated already. Depending
474 * on how strong of a guarantee the caller needs, zone_pcp_disable/enable()
475 * might be used to flush and disable pcplist before isolation and enable after
478 * Return: 0 on success and -EBUSY if any part of range cannot be isolated.
480 int start_isolate_page_range(unsigned long start_pfn
, unsigned long end_pfn
,
481 int migratetype
, int flags
, gfp_t gfp_flags
)
485 /* isolation is done at page block granularity */
486 unsigned long isolate_start
= pageblock_start_pfn(start_pfn
);
487 unsigned long isolate_end
= pageblock_align(end_pfn
);
489 bool skip_isolation
= false;
491 /* isolate [isolate_start, isolate_start + pageblock_nr_pages) pageblock */
492 ret
= isolate_single_pageblock(isolate_start
, flags
, gfp_flags
, false,
493 skip_isolation
, migratetype
);
497 if (isolate_start
== isolate_end
- pageblock_nr_pages
)
498 skip_isolation
= true;
500 /* isolate [isolate_end - pageblock_nr_pages, isolate_end) pageblock */
501 ret
= isolate_single_pageblock(isolate_end
, flags
, gfp_flags
, true,
502 skip_isolation
, migratetype
);
504 unset_migratetype_isolate(pfn_to_page(isolate_start
), migratetype
);
508 /* skip isolated pageblocks at the beginning and end */
509 for (pfn
= isolate_start
+ pageblock_nr_pages
;
510 pfn
< isolate_end
- pageblock_nr_pages
;
511 pfn
+= pageblock_nr_pages
) {
512 page
= __first_valid_page(pfn
, pageblock_nr_pages
);
513 if (page
&& set_migratetype_isolate(page
, migratetype
, flags
,
514 start_pfn
, end_pfn
)) {
515 undo_isolate_page_range(isolate_start
, pfn
, migratetype
);
516 unset_migratetype_isolate(
517 pfn_to_page(isolate_end
- pageblock_nr_pages
),
526 * undo_isolate_page_range - undo effects of start_isolate_page_range()
527 * @start_pfn: The first PFN of the isolated range
528 * @end_pfn: The last PFN of the isolated range
529 * @migratetype: New migrate type to set on the range
531 * This finds every MIGRATE_ISOLATE page block in the given range
532 * and switches it to @migratetype.
534 void undo_isolate_page_range(unsigned long start_pfn
, unsigned long end_pfn
,
539 unsigned long isolate_start
= pageblock_start_pfn(start_pfn
);
540 unsigned long isolate_end
= pageblock_align(end_pfn
);
542 for (pfn
= isolate_start
;
544 pfn
+= pageblock_nr_pages
) {
545 page
= __first_valid_page(pfn
, pageblock_nr_pages
);
546 if (!page
|| !is_migrate_isolate_page(page
))
548 unset_migratetype_isolate(page
, migratetype
);
552 * Test all pages in the range is free(means isolated) or not.
553 * all pages in [start_pfn...end_pfn) must be in the same zone.
554 * zone->lock must be held before call this.
556 * Returns the last tested pfn.
559 __test_page_isolated_in_pageblock(unsigned long pfn
, unsigned long end_pfn
,
564 while (pfn
< end_pfn
) {
565 page
= pfn_to_page(pfn
);
568 * If the page is on a free list, it has to be on
569 * the correct MIGRATE_ISOLATE freelist. There is no
570 * simple way to verify that as VM_BUG_ON(), though.
572 pfn
+= 1 << buddy_order(page
);
573 else if ((flags
& MEMORY_OFFLINE
) && PageHWPoison(page
))
574 /* A HWPoisoned page cannot be also PageBuddy */
576 else if ((flags
& MEMORY_OFFLINE
) && PageOffline(page
) &&
579 * The responsible driver agreed to skip PageOffline()
580 * pages when offlining memory by dropping its
581 * reference in MEM_GOING_OFFLINE.
592 * test_pages_isolated - check if pageblocks in range are isolated
593 * @start_pfn: The first PFN of the isolated range
594 * @end_pfn: The first PFN *after* the isolated range
595 * @isol_flags: Testing mode flags
597 * This tests if all in the specified range are free.
599 * If %MEMORY_OFFLINE is specified in @flags, it will consider
600 * poisoned and offlined pages free as well.
602 * Caller must ensure the requested range doesn't span zones.
604 * Returns 0 if true, -EBUSY if one or more pages are in use.
606 int test_pages_isolated(unsigned long start_pfn
, unsigned long end_pfn
,
609 unsigned long pfn
, flags
;
615 * Note: pageblock_nr_pages != MAX_PAGE_ORDER. Then, chunks of free
616 * pages are not aligned to pageblock_nr_pages.
617 * Then we just check migratetype first.
619 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= pageblock_nr_pages
) {
620 page
= __first_valid_page(pfn
, pageblock_nr_pages
);
621 if (page
&& !is_migrate_isolate_page(page
))
624 page
= __first_valid_page(start_pfn
, end_pfn
- start_pfn
);
625 if ((pfn
< end_pfn
) || !page
) {
630 /* Check all pages are free or marked as ISOLATED */
631 zone
= page_zone(page
);
632 spin_lock_irqsave(&zone
->lock
, flags
);
633 pfn
= __test_page_isolated_in_pageblock(start_pfn
, end_pfn
, isol_flags
);
634 spin_unlock_irqrestore(&zone
->lock
, flags
);
636 ret
= pfn
< end_pfn
? -EBUSY
: 0;
639 trace_test_pages_isolated(start_pfn
, end_pfn
, pfn
);