1 // SPDX-License-Identifier: GPL-2.0
3 * linux/mm/page_isolation.c
7 #include <linux/page-isolation.h>
8 #include <linux/pageblock-flags.h>
9 #include <linux/memory.h>
10 #include <linux/hugetlb.h>
11 #include <linux/page_owner.h>
12 #include <linux/migrate.h>
15 #define CREATE_TRACE_POINTS
16 #include <trace/events/page_isolation.h>
18 static int set_migratetype_isolate(struct page
*page
, int migratetype
, int isol_flags
)
20 struct page
*unmovable
= NULL
;
25 zone
= page_zone(page
);
27 spin_lock_irqsave(&zone
->lock
, flags
);
30 * We assume the caller intended to SET migrate type to isolate.
31 * If it is already set, then someone else must have raced and
32 * set it before us. Return -EBUSY
34 if (is_migrate_isolate_page(page
))
38 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
39 * We just check MOVABLE pages.
41 unmovable
= has_unmovable_pages(zone
, page
, migratetype
, isol_flags
);
43 unsigned long nr_pages
;
44 int mt
= get_pageblock_migratetype(page
);
46 set_pageblock_migratetype(page
, MIGRATE_ISOLATE
);
47 zone
->nr_isolate_pageblock
++;
48 nr_pages
= move_freepages_block(zone
, page
, MIGRATE_ISOLATE
,
51 __mod_zone_freepage_state(zone
, -nr_pages
, mt
);
56 spin_unlock_irqrestore(&zone
->lock
, flags
);
58 drain_all_pages(zone
);
60 WARN_ON_ONCE(zone_idx(zone
) == ZONE_MOVABLE
);
62 if ((isol_flags
& REPORT_FAILURE
) && unmovable
)
64 * printk() with zone->lock held will likely trigger a
65 * lockdep splat, so defer it here.
67 dump_page(unmovable
, "unmovable page");
73 static void unset_migratetype_isolate(struct page
*page
, unsigned migratetype
)
76 unsigned long flags
, nr_pages
;
77 bool isolated_page
= false;
79 unsigned long pfn
, buddy_pfn
;
82 zone
= page_zone(page
);
83 spin_lock_irqsave(&zone
->lock
, flags
);
84 if (!is_migrate_isolate_page(page
))
88 * Because freepage with more than pageblock_order on isolated
89 * pageblock is restricted to merge due to freepage counting problem,
90 * it is possible that there is free buddy page.
91 * move_freepages_block() doesn't care of merge so we need other
92 * approach in order to merge them. Isolation and free will make
93 * these pages to be merged.
95 if (PageBuddy(page
)) {
96 order
= page_order(page
);
97 if (order
>= pageblock_order
) {
98 pfn
= page_to_pfn(page
);
99 buddy_pfn
= __find_buddy_pfn(pfn
, order
);
100 buddy
= page
+ (buddy_pfn
- pfn
);
102 if (pfn_valid_within(buddy_pfn
) &&
103 !is_migrate_isolate_page(buddy
)) {
104 __isolate_free_page(page
, order
);
105 isolated_page
= true;
111 * If we isolate freepage with more than pageblock_order, there
112 * should be no freepage in the range, so we could avoid costly
113 * pageblock scanning for freepage moving.
115 if (!isolated_page
) {
116 nr_pages
= move_freepages_block(zone
, page
, migratetype
, NULL
);
117 __mod_zone_freepage_state(zone
, nr_pages
, migratetype
);
119 set_pageblock_migratetype(page
, migratetype
);
121 __putback_isolated_page(page
, order
, migratetype
);
122 zone
->nr_isolate_pageblock
--;
124 spin_unlock_irqrestore(&zone
->lock
, flags
);
127 static inline struct page
*
128 __first_valid_page(unsigned long pfn
, unsigned long nr_pages
)
132 for (i
= 0; i
< nr_pages
; i
++) {
135 page
= pfn_to_online_page(pfn
+ i
);
144 * start_isolate_page_range() - make page-allocation-type of range of pages to
145 * be MIGRATE_ISOLATE.
146 * @start_pfn: The lower PFN of the range to be isolated.
147 * @end_pfn: The upper PFN of the range to be isolated.
148 * start_pfn/end_pfn must be aligned to pageblock_order.
149 * @migratetype: Migrate type to set in error recovery.
150 * @flags: The following flags are allowed (they can be combined in
152 * MEMORY_OFFLINE - isolate to offline (!allocate) memory
153 * e.g., skip over PageHWPoison() pages
154 * REPORT_FAILURE - report details about the failure to
157 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
158 * the range will never be allocated. Any free pages and pages freed in the
159 * future will not be allocated again. If specified range includes migrate types
160 * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
161 * pages in the range finally, the caller have to free all pages in the range.
162 * test_page_isolated() can be used for test it.
164 * There is no high level synchronization mechanism that prevents two threads
165 * from trying to isolate overlapping ranges. If this happens, one thread
166 * will notice pageblocks in the overlapping range already set to isolate.
167 * This happens in set_migratetype_isolate, and set_migratetype_isolate
168 * returns an error. We then clean up by restoring the migration type on
169 * pageblocks we may have modified and return -EBUSY to caller. This
170 * prevents two threads from simultaneously working on overlapping ranges.
172 * Return: the number of isolated pageblocks on success and -EBUSY if any part
173 * of range cannot be isolated.
175 int start_isolate_page_range(unsigned long start_pfn
, unsigned long end_pfn
,
176 unsigned migratetype
, int flags
)
179 unsigned long undo_pfn
;
181 int nr_isolate_pageblock
= 0;
183 BUG_ON(!IS_ALIGNED(start_pfn
, pageblock_nr_pages
));
184 BUG_ON(!IS_ALIGNED(end_pfn
, pageblock_nr_pages
));
186 for (pfn
= start_pfn
;
188 pfn
+= pageblock_nr_pages
) {
189 page
= __first_valid_page(pfn
, pageblock_nr_pages
);
191 if (set_migratetype_isolate(page
, migratetype
, flags
)) {
195 nr_isolate_pageblock
++;
198 return nr_isolate_pageblock
;
200 for (pfn
= start_pfn
;
202 pfn
+= pageblock_nr_pages
) {
203 struct page
*page
= pfn_to_online_page(pfn
);
206 unset_migratetype_isolate(page
, migratetype
);
213 * Make isolated pages available again.
215 void undo_isolate_page_range(unsigned long start_pfn
, unsigned long end_pfn
,
216 unsigned migratetype
)
221 BUG_ON(!IS_ALIGNED(start_pfn
, pageblock_nr_pages
));
222 BUG_ON(!IS_ALIGNED(end_pfn
, pageblock_nr_pages
));
224 for (pfn
= start_pfn
;
226 pfn
+= pageblock_nr_pages
) {
227 page
= __first_valid_page(pfn
, pageblock_nr_pages
);
228 if (!page
|| !is_migrate_isolate_page(page
))
230 unset_migratetype_isolate(page
, migratetype
);
234 * Test all pages in the range is free(means isolated) or not.
235 * all pages in [start_pfn...end_pfn) must be in the same zone.
236 * zone->lock must be held before call this.
238 * Returns the last tested pfn.
241 __test_page_isolated_in_pageblock(unsigned long pfn
, unsigned long end_pfn
,
246 while (pfn
< end_pfn
) {
247 if (!pfn_valid_within(pfn
)) {
251 page
= pfn_to_page(pfn
);
254 * If the page is on a free list, it has to be on
255 * the correct MIGRATE_ISOLATE freelist. There is no
256 * simple way to verify that as VM_BUG_ON(), though.
258 pfn
+= 1 << page_order(page
);
259 else if ((flags
& MEMORY_OFFLINE
) && PageHWPoison(page
))
260 /* A HWPoisoned page cannot be also PageBuddy */
269 /* Caller should ensure that requested range is in a single zone */
270 int test_pages_isolated(unsigned long start_pfn
, unsigned long end_pfn
,
273 unsigned long pfn
, flags
;
278 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
279 * are not aligned to pageblock_nr_pages.
280 * Then we just check migratetype first.
282 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= pageblock_nr_pages
) {
283 page
= __first_valid_page(pfn
, pageblock_nr_pages
);
284 if (page
&& !is_migrate_isolate_page(page
))
287 page
= __first_valid_page(start_pfn
, end_pfn
- start_pfn
);
288 if ((pfn
< end_pfn
) || !page
)
290 /* Check all pages are free or marked as ISOLATED */
291 zone
= page_zone(page
);
292 spin_lock_irqsave(&zone
->lock
, flags
);
293 pfn
= __test_page_isolated_in_pageblock(start_pfn
, end_pfn
, isol_flags
);
294 spin_unlock_irqrestore(&zone
->lock
, flags
);
296 trace_test_pages_isolated(start_pfn
, end_pfn
, pfn
);
298 return pfn
< end_pfn
? -EBUSY
: 0;
301 struct page
*alloc_migrate_target(struct page
*page
, unsigned long private)
303 return new_page_nodemask(page
, numa_node_id(), &node_states
[N_MEMORY
]);