Linux 5.6.12
[linux/fpc-iii.git] / mm / page_isolation.c
bloba9fd7c740c23894bc94e57fc380778880f923722
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/mm/page_isolation.c
4 */
6 #include <linux/mm.h>
7 #include <linux/page-isolation.h>
8 #include <linux/pageblock-flags.h>
9 #include <linux/memory.h>
10 #include <linux/hugetlb.h>
11 #include <linux/page_owner.h>
12 #include <linux/migrate.h>
13 #include "internal.h"
15 #define CREATE_TRACE_POINTS
16 #include <trace/events/page_isolation.h>
18 static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
20 struct page *unmovable = NULL;
21 struct zone *zone;
22 unsigned long flags;
23 int ret = -EBUSY;
25 zone = page_zone(page);
27 spin_lock_irqsave(&zone->lock, flags);
30 * We assume the caller intended to SET migrate type to isolate.
31 * If it is already set, then someone else must have raced and
32 * set it before us. Return -EBUSY
34 if (is_migrate_isolate_page(page))
35 goto out;
38 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
39 * We just check MOVABLE pages.
41 unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags);
42 if (!unmovable) {
43 unsigned long nr_pages;
44 int mt = get_pageblock_migratetype(page);
46 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
47 zone->nr_isolate_pageblock++;
48 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
49 NULL);
51 __mod_zone_freepage_state(zone, -nr_pages, mt);
52 ret = 0;
55 out:
56 spin_unlock_irqrestore(&zone->lock, flags);
57 if (!ret) {
58 drain_all_pages(zone);
59 } else {
60 WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
62 if ((isol_flags & REPORT_FAILURE) && unmovable)
64 * printk() with zone->lock held will likely trigger a
65 * lockdep splat, so defer it here.
67 dump_page(unmovable, "unmovable page");
70 return ret;
73 static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
75 struct zone *zone;
76 unsigned long flags, nr_pages;
77 bool isolated_page = false;
78 unsigned int order;
79 unsigned long pfn, buddy_pfn;
80 struct page *buddy;
82 zone = page_zone(page);
83 spin_lock_irqsave(&zone->lock, flags);
84 if (!is_migrate_isolate_page(page))
85 goto out;
88 * Because freepage with more than pageblock_order on isolated
89 * pageblock is restricted to merge due to freepage counting problem,
90 * it is possible that there is free buddy page.
91 * move_freepages_block() doesn't care of merge so we need other
92 * approach in order to merge them. Isolation and free will make
93 * these pages to be merged.
95 if (PageBuddy(page)) {
96 order = page_order(page);
97 if (order >= pageblock_order) {
98 pfn = page_to_pfn(page);
99 buddy_pfn = __find_buddy_pfn(pfn, order);
100 buddy = page + (buddy_pfn - pfn);
102 if (pfn_valid_within(buddy_pfn) &&
103 !is_migrate_isolate_page(buddy)) {
104 __isolate_free_page(page, order);
105 isolated_page = true;
111 * If we isolate freepage with more than pageblock_order, there
112 * should be no freepage in the range, so we could avoid costly
113 * pageblock scanning for freepage moving.
115 if (!isolated_page) {
116 nr_pages = move_freepages_block(zone, page, migratetype, NULL);
117 __mod_zone_freepage_state(zone, nr_pages, migratetype);
119 set_pageblock_migratetype(page, migratetype);
120 zone->nr_isolate_pageblock--;
121 out:
122 spin_unlock_irqrestore(&zone->lock, flags);
123 if (isolated_page) {
124 post_alloc_hook(page, order, __GFP_MOVABLE);
125 __free_pages(page, order);
129 static inline struct page *
130 __first_valid_page(unsigned long pfn, unsigned long nr_pages)
132 int i;
134 for (i = 0; i < nr_pages; i++) {
135 struct page *page;
137 page = pfn_to_online_page(pfn + i);
138 if (!page)
139 continue;
140 return page;
142 return NULL;
146 * start_isolate_page_range() - make page-allocation-type of range of pages to
147 * be MIGRATE_ISOLATE.
148 * @start_pfn: The lower PFN of the range to be isolated.
149 * @end_pfn: The upper PFN of the range to be isolated.
150 * start_pfn/end_pfn must be aligned to pageblock_order.
151 * @migratetype: Migrate type to set in error recovery.
152 * @flags: The following flags are allowed (they can be combined in
153 * a bit mask)
154 * MEMORY_OFFLINE - isolate to offline (!allocate) memory
155 * e.g., skip over PageHWPoison() pages
156 * REPORT_FAILURE - report details about the failure to
157 * isolate the range
159 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
160 * the range will never be allocated. Any free pages and pages freed in the
161 * future will not be allocated again. If specified range includes migrate types
162 * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
163 * pages in the range finally, the caller have to free all pages in the range.
164 * test_page_isolated() can be used for test it.
166 * There is no high level synchronization mechanism that prevents two threads
167 * from trying to isolate overlapping ranges. If this happens, one thread
168 * will notice pageblocks in the overlapping range already set to isolate.
169 * This happens in set_migratetype_isolate, and set_migratetype_isolate
170 * returns an error. We then clean up by restoring the migration type on
171 * pageblocks we may have modified and return -EBUSY to caller. This
172 * prevents two threads from simultaneously working on overlapping ranges.
174 * Return: the number of isolated pageblocks on success and -EBUSY if any part
175 * of range cannot be isolated.
177 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
178 unsigned migratetype, int flags)
180 unsigned long pfn;
181 unsigned long undo_pfn;
182 struct page *page;
183 int nr_isolate_pageblock = 0;
185 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
186 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
188 for (pfn = start_pfn;
189 pfn < end_pfn;
190 pfn += pageblock_nr_pages) {
191 page = __first_valid_page(pfn, pageblock_nr_pages);
192 if (page) {
193 if (set_migratetype_isolate(page, migratetype, flags)) {
194 undo_pfn = pfn;
195 goto undo;
197 nr_isolate_pageblock++;
200 return nr_isolate_pageblock;
201 undo:
202 for (pfn = start_pfn;
203 pfn < undo_pfn;
204 pfn += pageblock_nr_pages) {
205 struct page *page = pfn_to_online_page(pfn);
206 if (!page)
207 continue;
208 unset_migratetype_isolate(page, migratetype);
211 return -EBUSY;
215 * Make isolated pages available again.
217 void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
218 unsigned migratetype)
220 unsigned long pfn;
221 struct page *page;
223 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
224 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
226 for (pfn = start_pfn;
227 pfn < end_pfn;
228 pfn += pageblock_nr_pages) {
229 page = __first_valid_page(pfn, pageblock_nr_pages);
230 if (!page || !is_migrate_isolate_page(page))
231 continue;
232 unset_migratetype_isolate(page, migratetype);
236 * Test all pages in the range is free(means isolated) or not.
237 * all pages in [start_pfn...end_pfn) must be in the same zone.
238 * zone->lock must be held before call this.
240 * Returns the last tested pfn.
242 static unsigned long
243 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
244 int flags)
246 struct page *page;
248 while (pfn < end_pfn) {
249 if (!pfn_valid_within(pfn)) {
250 pfn++;
251 continue;
253 page = pfn_to_page(pfn);
254 if (PageBuddy(page))
256 * If the page is on a free list, it has to be on
257 * the correct MIGRATE_ISOLATE freelist. There is no
258 * simple way to verify that as VM_BUG_ON(), though.
260 pfn += 1 << page_order(page);
261 else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
262 /* A HWPoisoned page cannot be also PageBuddy */
263 pfn++;
264 else
265 break;
268 return pfn;
271 /* Caller should ensure that requested range is in a single zone */
272 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
273 int isol_flags)
275 unsigned long pfn, flags;
276 struct page *page;
277 struct zone *zone;
280 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
281 * are not aligned to pageblock_nr_pages.
282 * Then we just check migratetype first.
284 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
285 page = __first_valid_page(pfn, pageblock_nr_pages);
286 if (page && !is_migrate_isolate_page(page))
287 break;
289 page = __first_valid_page(start_pfn, end_pfn - start_pfn);
290 if ((pfn < end_pfn) || !page)
291 return -EBUSY;
292 /* Check all pages are free or marked as ISOLATED */
293 zone = page_zone(page);
294 spin_lock_irqsave(&zone->lock, flags);
295 pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, isol_flags);
296 spin_unlock_irqrestore(&zone->lock, flags);
298 trace_test_pages_isolated(start_pfn, end_pfn, pfn);
300 return pfn < end_pfn ? -EBUSY : 0;
303 struct page *alloc_migrate_target(struct page *page, unsigned long private)
305 return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);