2 * linux/mm/page_isolation.c
6 #include <linux/page-isolation.h>
7 #include <linux/pageblock-flags.h>
8 #include <linux/memory.h>
9 #include <linux/hugetlb.h>
12 static int set_migratetype_isolate(struct page
*page
,
13 bool skip_hwpoisoned_pages
)
16 unsigned long flags
, pfn
;
17 struct memory_isolate_notify arg
;
21 zone
= page_zone(page
);
23 spin_lock_irqsave(&zone
->lock
, flags
);
25 pfn
= page_to_pfn(page
);
27 arg
.nr_pages
= pageblock_nr_pages
;
31 * It may be possible to isolate a pageblock even if the
32 * migratetype is not MIGRATE_MOVABLE. The memory isolation
33 * notifier chain is used by balloon drivers to return the
34 * number of pages in a range that are held by the balloon
35 * driver to shrink memory. If all the pages are accounted for
36 * by balloons, are free, or on the LRU, isolation can continue.
37 * Later, for example, when memory hotplug notifier runs, these
38 * pages reported as "can be isolated" should be isolated(freed)
39 * by the balloon driver through the memory notifier chain.
41 notifier_ret
= memory_isolate_notify(MEM_ISOLATE_COUNT
, &arg
);
42 notifier_ret
= notifier_to_errno(notifier_ret
);
46 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
47 * We just check MOVABLE pages.
49 if (!has_unmovable_pages(zone
, page
, arg
.pages_found
,
50 skip_hwpoisoned_pages
))
54 * immobile means "not-on-lru" paes. If immobile is larger than
55 * removable-by-driver pages reported by notifier, we'll fail.
60 unsigned long nr_pages
;
61 int migratetype
= get_pageblock_migratetype(page
);
63 set_pageblock_migratetype(page
, MIGRATE_ISOLATE
);
64 zone
->nr_isolate_pageblock
++;
65 nr_pages
= move_freepages_block(zone
, page
, MIGRATE_ISOLATE
);
67 __mod_zone_freepage_state(zone
, -nr_pages
, migratetype
);
70 spin_unlock_irqrestore(&zone
->lock
, flags
);
72 drain_all_pages(zone
);
76 static void unset_migratetype_isolate(struct page
*page
, unsigned migratetype
)
79 unsigned long flags
, nr_pages
;
80 struct page
*isolated_page
= NULL
;
82 unsigned long page_idx
, buddy_idx
;
85 zone
= page_zone(page
);
86 spin_lock_irqsave(&zone
->lock
, flags
);
87 if (get_pageblock_migratetype(page
) != MIGRATE_ISOLATE
)
91 * Because freepage with more than pageblock_order on isolated
92 * pageblock is restricted to merge due to freepage counting problem,
93 * it is possible that there is free buddy page.
94 * move_freepages_block() doesn't care of merge so we need other
95 * approach in order to merge them. Isolation and free will make
96 * these pages to be merged.
98 if (PageBuddy(page
)) {
99 order
= page_order(page
);
100 if (order
>= pageblock_order
) {
101 page_idx
= page_to_pfn(page
) & ((1 << MAX_ORDER
) - 1);
102 buddy_idx
= __find_buddy_index(page_idx
, order
);
103 buddy
= page
+ (buddy_idx
- page_idx
);
105 if (pfn_valid_within(page_to_pfn(buddy
)) &&
106 !is_migrate_isolate_page(buddy
)) {
107 __isolate_free_page(page
, order
);
108 kernel_map_pages(page
, (1 << order
), 1);
109 set_page_refcounted(page
);
110 isolated_page
= page
;
116 * If we isolate freepage with more than pageblock_order, there
117 * should be no freepage in the range, so we could avoid costly
118 * pageblock scanning for freepage moving.
120 if (!isolated_page
) {
121 nr_pages
= move_freepages_block(zone
, page
, migratetype
);
122 __mod_zone_freepage_state(zone
, nr_pages
, migratetype
);
124 set_pageblock_migratetype(page
, migratetype
);
125 zone
->nr_isolate_pageblock
--;
127 spin_unlock_irqrestore(&zone
->lock
, flags
);
129 __free_pages(isolated_page
, order
);
132 static inline struct page
*
133 __first_valid_page(unsigned long pfn
, unsigned long nr_pages
)
136 for (i
= 0; i
< nr_pages
; i
++)
137 if (pfn_valid_within(pfn
+ i
))
139 if (unlikely(i
== nr_pages
))
141 return pfn_to_page(pfn
+ i
);
145 * start_isolate_page_range() -- make page-allocation-type of range of pages
146 * to be MIGRATE_ISOLATE.
147 * @start_pfn: The lower PFN of the range to be isolated.
148 * @end_pfn: The upper PFN of the range to be isolated.
149 * @migratetype: migrate type to set in error recovery.
151 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
152 * the range will never be allocated. Any free pages and pages freed in the
153 * future will not be allocated again.
155 * start_pfn/end_pfn must be aligned to pageblock_order.
156 * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
158 int start_isolate_page_range(unsigned long start_pfn
, unsigned long end_pfn
,
159 unsigned migratetype
, bool skip_hwpoisoned_pages
)
162 unsigned long undo_pfn
;
165 BUG_ON((start_pfn
) & (pageblock_nr_pages
- 1));
166 BUG_ON((end_pfn
) & (pageblock_nr_pages
- 1));
168 for (pfn
= start_pfn
;
170 pfn
+= pageblock_nr_pages
) {
171 page
= __first_valid_page(pfn
, pageblock_nr_pages
);
173 set_migratetype_isolate(page
, skip_hwpoisoned_pages
)) {
180 for (pfn
= start_pfn
;
182 pfn
+= pageblock_nr_pages
)
183 unset_migratetype_isolate(pfn_to_page(pfn
), migratetype
);
189 * Make isolated pages available again.
191 int undo_isolate_page_range(unsigned long start_pfn
, unsigned long end_pfn
,
192 unsigned migratetype
)
196 BUG_ON((start_pfn
) & (pageblock_nr_pages
- 1));
197 BUG_ON((end_pfn
) & (pageblock_nr_pages
- 1));
198 for (pfn
= start_pfn
;
200 pfn
+= pageblock_nr_pages
) {
201 page
= __first_valid_page(pfn
, pageblock_nr_pages
);
202 if (!page
|| get_pageblock_migratetype(page
) != MIGRATE_ISOLATE
)
204 unset_migratetype_isolate(page
, migratetype
);
209 * Test all pages in the range is free(means isolated) or not.
210 * all pages in [start_pfn...end_pfn) must be in the same zone.
211 * zone->lock must be held before call this.
213 * Returns 1 if all pages in the range are isolated.
216 __test_page_isolated_in_pageblock(unsigned long pfn
, unsigned long end_pfn
,
217 bool skip_hwpoisoned_pages
)
221 while (pfn
< end_pfn
) {
222 if (!pfn_valid_within(pfn
)) {
226 page
= pfn_to_page(pfn
);
229 * If the page is on a free list, it has to be on
230 * the correct MIGRATE_ISOLATE freelist. There is no
231 * simple way to verify that as VM_BUG_ON(), though.
233 pfn
+= 1 << page_order(page
);
234 else if (skip_hwpoisoned_pages
&& PageHWPoison(page
))
235 /* A HWPoisoned page cannot be also PageBuddy */
245 int test_pages_isolated(unsigned long start_pfn
, unsigned long end_pfn
,
246 bool skip_hwpoisoned_pages
)
248 unsigned long pfn
, flags
;
254 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
255 * are not aligned to pageblock_nr_pages.
256 * Then we just check migratetype first.
258 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= pageblock_nr_pages
) {
259 page
= __first_valid_page(pfn
, pageblock_nr_pages
);
260 if (page
&& get_pageblock_migratetype(page
) != MIGRATE_ISOLATE
)
263 page
= __first_valid_page(start_pfn
, end_pfn
- start_pfn
);
264 if ((pfn
< end_pfn
) || !page
)
266 /* Check all pages are free or marked as ISOLATED */
267 zone
= page_zone(page
);
268 spin_lock_irqsave(&zone
->lock
, flags
);
269 ret
= __test_page_isolated_in_pageblock(start_pfn
, end_pfn
,
270 skip_hwpoisoned_pages
);
271 spin_unlock_irqrestore(&zone
->lock
, flags
);
272 return ret
? 0 : -EBUSY
;
275 struct page
*alloc_migrate_target(struct page
*page
, unsigned long private,
278 gfp_t gfp_mask
= GFP_USER
| __GFP_MOVABLE
;
281 * TODO: allocate a destination hugepage from a nearest neighbor node,
282 * accordance with memory policy of the user process if possible. For
283 * now as a simple work-around, we use the next node for destination.
285 if (PageHuge(page
)) {
286 nodemask_t src
= nodemask_of_node(page_to_nid(page
));
288 nodes_complement(dst
, src
);
289 return alloc_huge_page_node(page_hstate(compound_head(page
)),
290 next_node(page_to_nid(page
), dst
));
293 if (PageHighMem(page
))
294 gfp_mask
|= __GFP_HIGHMEM
;
296 return alloc_page(gfp_mask
);