dmaengine: imx-dma: fix slow path issue in prep_dma_cyclic
[linux/fpc-iii.git] / mm / page_isolation.c
blob383bdbb98b04ca22726fe3d243db2ce789642975
1 /*
2 * linux/mm/page_isolation.c
3 */
5 #include <linux/mm.h>
6 #include <linux/page-isolation.h>
7 #include <linux/pageblock-flags.h>
8 #include <linux/memory.h>
9 #include "internal.h"
11 int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages)
13 struct zone *zone;
14 unsigned long flags, pfn;
15 struct memory_isolate_notify arg;
16 int notifier_ret;
17 int ret = -EBUSY;
19 zone = page_zone(page);
21 spin_lock_irqsave(&zone->lock, flags);
23 pfn = page_to_pfn(page);
24 arg.start_pfn = pfn;
25 arg.nr_pages = pageblock_nr_pages;
26 arg.pages_found = 0;
29 * It may be possible to isolate a pageblock even if the
30 * migratetype is not MIGRATE_MOVABLE. The memory isolation
31 * notifier chain is used by balloon drivers to return the
32 * number of pages in a range that are held by the balloon
33 * driver to shrink memory. If all the pages are accounted for
34 * by balloons, are free, or on the LRU, isolation can continue.
35 * Later, for example, when memory hotplug notifier runs, these
36 * pages reported as "can be isolated" should be isolated(freed)
37 * by the balloon driver through the memory notifier chain.
39 notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
40 notifier_ret = notifier_to_errno(notifier_ret);
41 if (notifier_ret)
42 goto out;
44 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
45 * We just check MOVABLE pages.
47 if (!has_unmovable_pages(zone, page, arg.pages_found,
48 skip_hwpoisoned_pages))
49 ret = 0;
52 * immobile means "not-on-lru" paes. If immobile is larger than
53 * removable-by-driver pages reported by notifier, we'll fail.
56 out:
57 if (!ret) {
58 unsigned long nr_pages;
59 int migratetype = get_pageblock_migratetype(page);
61 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
62 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
64 __mod_zone_freepage_state(zone, -nr_pages, migratetype);
67 spin_unlock_irqrestore(&zone->lock, flags);
68 if (!ret)
69 drain_all_pages();
70 return ret;
73 void unset_migratetype_isolate(struct page *page, unsigned migratetype)
75 struct zone *zone;
76 unsigned long flags, nr_pages;
78 zone = page_zone(page);
79 spin_lock_irqsave(&zone->lock, flags);
80 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
81 goto out;
82 nr_pages = move_freepages_block(zone, page, migratetype);
83 __mod_zone_freepage_state(zone, nr_pages, migratetype);
84 set_pageblock_migratetype(page, migratetype);
85 out:
86 spin_unlock_irqrestore(&zone->lock, flags);
89 static inline struct page *
90 __first_valid_page(unsigned long pfn, unsigned long nr_pages)
92 int i;
93 for (i = 0; i < nr_pages; i++)
94 if (pfn_valid_within(pfn + i))
95 break;
96 if (unlikely(i == nr_pages))
97 return NULL;
98 return pfn_to_page(pfn + i);
102 * start_isolate_page_range() -- make page-allocation-type of range of pages
103 * to be MIGRATE_ISOLATE.
104 * @start_pfn: The lower PFN of the range to be isolated.
105 * @end_pfn: The upper PFN of the range to be isolated.
106 * @migratetype: migrate type to set in error recovery.
108 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
109 * the range will never be allocated. Any free pages and pages freed in the
110 * future will not be allocated again.
112 * start_pfn/end_pfn must be aligned to pageblock_order.
113 * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
115 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
116 unsigned migratetype, bool skip_hwpoisoned_pages)
118 unsigned long pfn;
119 unsigned long undo_pfn;
120 struct page *page;
122 BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
123 BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
125 for (pfn = start_pfn;
126 pfn < end_pfn;
127 pfn += pageblock_nr_pages) {
128 page = __first_valid_page(pfn, pageblock_nr_pages);
129 if (page &&
130 set_migratetype_isolate(page, skip_hwpoisoned_pages)) {
131 undo_pfn = pfn;
132 goto undo;
135 return 0;
136 undo:
137 for (pfn = start_pfn;
138 pfn < undo_pfn;
139 pfn += pageblock_nr_pages)
140 unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
142 return -EBUSY;
146 * Make isolated pages available again.
148 int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
149 unsigned migratetype)
151 unsigned long pfn;
152 struct page *page;
153 BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
154 BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
155 for (pfn = start_pfn;
156 pfn < end_pfn;
157 pfn += pageblock_nr_pages) {
158 page = __first_valid_page(pfn, pageblock_nr_pages);
159 if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
160 continue;
161 unset_migratetype_isolate(page, migratetype);
163 return 0;
166 * Test all pages in the range is free(means isolated) or not.
167 * all pages in [start_pfn...end_pfn) must be in the same zone.
168 * zone->lock must be held before call this.
170 * Returns 1 if all pages in the range are isolated.
172 static int
173 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
174 bool skip_hwpoisoned_pages)
176 struct page *page;
178 while (pfn < end_pfn) {
179 if (!pfn_valid_within(pfn)) {
180 pfn++;
181 continue;
183 page = pfn_to_page(pfn);
184 if (PageBuddy(page)) {
186 * If race between isolatation and allocation happens,
187 * some free pages could be in MIGRATE_MOVABLE list
188 * although pageblock's migratation type of the page
189 * is MIGRATE_ISOLATE. Catch it and move the page into
190 * MIGRATE_ISOLATE list.
192 if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) {
193 struct page *end_page;
195 end_page = page + (1 << page_order(page)) - 1;
196 move_freepages(page_zone(page), page, end_page,
197 MIGRATE_ISOLATE);
199 pfn += 1 << page_order(page);
201 else if (page_count(page) == 0 &&
202 get_freepage_migratetype(page) == MIGRATE_ISOLATE)
203 pfn += 1;
204 else if (skip_hwpoisoned_pages && PageHWPoison(page)) {
206 * The HWPoisoned page may be not in buddy
207 * system, and page_count() is not 0.
209 pfn++;
210 continue;
212 else
213 break;
215 if (pfn < end_pfn)
216 return 0;
217 return 1;
220 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
221 bool skip_hwpoisoned_pages)
223 unsigned long pfn, flags;
224 struct page *page;
225 struct zone *zone;
226 int ret;
229 * Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page
230 * is not aligned to pageblock_nr_pages.
231 * Then we just check pagetype fist.
233 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
234 page = __first_valid_page(pfn, pageblock_nr_pages);
235 if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
236 break;
238 page = __first_valid_page(start_pfn, end_pfn - start_pfn);
239 if ((pfn < end_pfn) || !page)
240 return -EBUSY;
241 /* Check all pages are free or Marked as ISOLATED */
242 zone = page_zone(page);
243 spin_lock_irqsave(&zone->lock, flags);
244 ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
245 skip_hwpoisoned_pages);
246 spin_unlock_irqrestore(&zone->lock, flags);
247 return ret ? 0 : -EBUSY;
250 struct page *alloc_migrate_target(struct page *page, unsigned long private,
251 int **resultp)
253 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
255 if (PageHighMem(page))
256 gfp_mask |= __GFP_HIGHMEM;
258 return alloc_page(gfp_mask);