x86/speculation/mds: Fix documentation typo
[linux/fpc-iii.git] / mm / page_isolation.c
blob44f213935bf68722eb85271520faa6b7581ed3c6
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/mm/page_isolation.c
4 */
6 #include <linux/mm.h>
7 #include <linux/page-isolation.h>
8 #include <linux/pageblock-flags.h>
9 #include <linux/memory.h>
10 #include <linux/hugetlb.h>
11 #include <linux/page_owner.h>
12 #include <linux/migrate.h>
13 #include "internal.h"
15 #define CREATE_TRACE_POINTS
16 #include <trace/events/page_isolation.h>
18 static int set_migratetype_isolate(struct page *page,
19 bool skip_hwpoisoned_pages)
21 struct zone *zone;
22 unsigned long flags, pfn;
23 struct memory_isolate_notify arg;
24 int notifier_ret;
25 int ret = -EBUSY;
27 zone = page_zone(page);
29 spin_lock_irqsave(&zone->lock, flags);
31 pfn = page_to_pfn(page);
32 arg.start_pfn = pfn;
33 arg.nr_pages = pageblock_nr_pages;
34 arg.pages_found = 0;
37 * It may be possible to isolate a pageblock even if the
38 * migratetype is not MIGRATE_MOVABLE. The memory isolation
39 * notifier chain is used by balloon drivers to return the
40 * number of pages in a range that are held by the balloon
41 * driver to shrink memory. If all the pages are accounted for
42 * by balloons, are free, or on the LRU, isolation can continue.
43 * Later, for example, when memory hotplug notifier runs, these
44 * pages reported as "can be isolated" should be isolated(freed)
45 * by the balloon driver through the memory notifier chain.
47 notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
48 notifier_ret = notifier_to_errno(notifier_ret);
49 if (notifier_ret)
50 goto out;
52 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
53 * We just check MOVABLE pages.
55 if (!has_unmovable_pages(zone, page, arg.pages_found,
56 skip_hwpoisoned_pages))
57 ret = 0;
60 * immobile means "not-on-lru" pages. If immobile is larger than
61 * removable-by-driver pages reported by notifier, we'll fail.
64 out:
65 if (!ret) {
66 unsigned long nr_pages;
67 int migratetype = get_pageblock_migratetype(page);
69 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
70 zone->nr_isolate_pageblock++;
71 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
72 NULL);
74 __mod_zone_freepage_state(zone, -nr_pages, migratetype);
77 spin_unlock_irqrestore(&zone->lock, flags);
78 if (!ret)
79 drain_all_pages(zone);
80 return ret;
83 static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
85 struct zone *zone;
86 unsigned long flags, nr_pages;
87 bool isolated_page = false;
88 unsigned int order;
89 unsigned long pfn, buddy_pfn;
90 struct page *buddy;
92 zone = page_zone(page);
93 spin_lock_irqsave(&zone->lock, flags);
94 if (!is_migrate_isolate_page(page))
95 goto out;
98 * Because freepage with more than pageblock_order on isolated
99 * pageblock is restricted to merge due to freepage counting problem,
100 * it is possible that there is free buddy page.
101 * move_freepages_block() doesn't care of merge so we need other
102 * approach in order to merge them. Isolation and free will make
103 * these pages to be merged.
105 if (PageBuddy(page)) {
106 order = page_order(page);
107 if (order >= pageblock_order) {
108 pfn = page_to_pfn(page);
109 buddy_pfn = __find_buddy_pfn(pfn, order);
110 buddy = page + (buddy_pfn - pfn);
112 if (pfn_valid_within(buddy_pfn) &&
113 !is_migrate_isolate_page(buddy)) {
114 __isolate_free_page(page, order);
115 isolated_page = true;
121 * If we isolate freepage with more than pageblock_order, there
122 * should be no freepage in the range, so we could avoid costly
123 * pageblock scanning for freepage moving.
125 if (!isolated_page) {
126 nr_pages = move_freepages_block(zone, page, migratetype, NULL);
127 __mod_zone_freepage_state(zone, nr_pages, migratetype);
129 set_pageblock_migratetype(page, migratetype);
130 zone->nr_isolate_pageblock--;
131 out:
132 spin_unlock_irqrestore(&zone->lock, flags);
133 if (isolated_page) {
134 post_alloc_hook(page, order, __GFP_MOVABLE);
135 __free_pages(page, order);
139 static inline struct page *
140 __first_valid_page(unsigned long pfn, unsigned long nr_pages)
142 int i;
144 for (i = 0; i < nr_pages; i++) {
145 struct page *page;
147 if (!pfn_valid_within(pfn + i))
148 continue;
149 page = pfn_to_online_page(pfn + i);
150 if (!page)
151 continue;
152 return page;
154 return NULL;
158 * start_isolate_page_range() -- make page-allocation-type of range of pages
159 * to be MIGRATE_ISOLATE.
160 * @start_pfn: The lower PFN of the range to be isolated.
161 * @end_pfn: The upper PFN of the range to be isolated.
162 * @migratetype: migrate type to set in error recovery.
164 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
165 * the range will never be allocated. Any free pages and pages freed in the
166 * future will not be allocated again.
168 * start_pfn/end_pfn must be aligned to pageblock_order.
169 * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
171 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
172 unsigned migratetype, bool skip_hwpoisoned_pages)
174 unsigned long pfn;
175 unsigned long undo_pfn;
176 struct page *page;
178 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
179 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
181 for (pfn = start_pfn;
182 pfn < end_pfn;
183 pfn += pageblock_nr_pages) {
184 page = __first_valid_page(pfn, pageblock_nr_pages);
185 if (page &&
186 set_migratetype_isolate(page, skip_hwpoisoned_pages)) {
187 undo_pfn = pfn;
188 goto undo;
191 return 0;
192 undo:
193 for (pfn = start_pfn;
194 pfn < undo_pfn;
195 pfn += pageblock_nr_pages) {
196 struct page *page = pfn_to_online_page(pfn);
197 if (!page)
198 continue;
199 unset_migratetype_isolate(page, migratetype);
202 return -EBUSY;
206 * Make isolated pages available again.
208 int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
209 unsigned migratetype)
211 unsigned long pfn;
212 struct page *page;
214 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
215 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
217 for (pfn = start_pfn;
218 pfn < end_pfn;
219 pfn += pageblock_nr_pages) {
220 page = __first_valid_page(pfn, pageblock_nr_pages);
221 if (!page || !is_migrate_isolate_page(page))
222 continue;
223 unset_migratetype_isolate(page, migratetype);
225 return 0;
228 * Test all pages in the range is free(means isolated) or not.
229 * all pages in [start_pfn...end_pfn) must be in the same zone.
230 * zone->lock must be held before call this.
232 * Returns the last tested pfn.
234 static unsigned long
235 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
236 bool skip_hwpoisoned_pages)
238 struct page *page;
240 while (pfn < end_pfn) {
241 if (!pfn_valid_within(pfn)) {
242 pfn++;
243 continue;
245 page = pfn_to_page(pfn);
246 if (PageBuddy(page))
248 * If the page is on a free list, it has to be on
249 * the correct MIGRATE_ISOLATE freelist. There is no
250 * simple way to verify that as VM_BUG_ON(), though.
252 pfn += 1 << page_order(page);
253 else if (skip_hwpoisoned_pages && PageHWPoison(page))
254 /* A HWPoisoned page cannot be also PageBuddy */
255 pfn++;
256 else
257 break;
260 return pfn;
263 /* Caller should ensure that requested range is in a single zone */
264 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
265 bool skip_hwpoisoned_pages)
267 unsigned long pfn, flags;
268 struct page *page;
269 struct zone *zone;
272 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
273 * are not aligned to pageblock_nr_pages.
274 * Then we just check migratetype first.
276 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
277 page = __first_valid_page(pfn, pageblock_nr_pages);
278 if (page && !is_migrate_isolate_page(page))
279 break;
281 page = __first_valid_page(start_pfn, end_pfn - start_pfn);
282 if ((pfn < end_pfn) || !page)
283 return -EBUSY;
284 /* Check all pages are free or marked as ISOLATED */
285 zone = page_zone(page);
286 spin_lock_irqsave(&zone->lock, flags);
287 pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
288 skip_hwpoisoned_pages);
289 spin_unlock_irqrestore(&zone->lock, flags);
291 trace_test_pages_isolated(start_pfn, end_pfn, pfn);
293 return pfn < end_pfn ? -EBUSY : 0;
296 struct page *alloc_migrate_target(struct page *page, unsigned long private,
297 int **resultp)
299 return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);