thp: introduce deferred_split_huge_page()
[linux/fpc-iii.git] / mm / page_ext.c
blob292ca7b8debd2c27c87d056e1ea4872d3094c756
1 #include <linux/mm.h>
2 #include <linux/mmzone.h>
3 #include <linux/bootmem.h>
4 #include <linux/page_ext.h>
5 #include <linux/memory.h>
6 #include <linux/vmalloc.h>
7 #include <linux/kmemleak.h>
8 #include <linux/page_owner.h>
9 #include <linux/page_idle.h>
12 * struct page extension
14 * This is the feature to manage memory for extended data per page.
16 * Until now, we must modify struct page itself to store extra data per page.
17 * This requires rebuilding the kernel and it is really time consuming process.
18 * And, sometimes, rebuild is impossible due to third party module dependency.
19 * At last, enlarging struct page could cause un-wanted system behaviour change.
21 * This feature is intended to overcome above mentioned problems. This feature
22 * allocates memory for extended data per page in certain place rather than
23 * the struct page itself. This memory can be accessed by the accessor
24 * functions provided by this code. During the boot process, it checks whether
25 * allocation of huge chunk of memory is needed or not. If not, it avoids
26 * allocating memory at all. With this advantage, we can include this feature
27 * into the kernel in default and can avoid rebuild and solve related problems.
29 * To help these things to work well, there are two callbacks for clients. One
30 * is the need callback which is mandatory if user wants to avoid useless
31 * memory allocation at boot-time. The other is optional, init callback, which
32 * is used to do proper initialization after memory is allocated.
34 * The need callback is used to decide whether extended memory allocation is
35 * needed or not. Sometimes users want to deactivate some features in this
36 * boot and extra memory would be unneccessary. In this case, to avoid
37 * allocating huge chunk of memory, each clients represent their need of
38 * extra memory through the need callback. If one of the need callbacks
39 * returns true, it means that someone needs extra memory so that
40 * page extension core should allocates memory for page extension. If
41 * none of need callbacks return true, memory isn't needed at all in this boot
42 * and page extension core can skip to allocate memory. As result,
43 * none of memory is wasted.
45 * The init callback is used to do proper initialization after page extension
46 * is completely initialized. In sparse memory system, extra memory is
47 * allocated some time later than memmap is allocated. In other words, lifetime
48 * of memory for page extension isn't same with memmap for struct page.
49 * Therefore, clients can't store extra data until page extension is
50 * initialized, even if pages are allocated and used freely. This could
51 * cause inadequate state of extra data per page, so, to prevent it, client
52 * can utilize this callback to initialize the state of it correctly.
55 static struct page_ext_operations *page_ext_ops[] = {
56 &debug_guardpage_ops,
57 #ifdef CONFIG_PAGE_POISONING
58 &page_poisoning_ops,
59 #endif
60 #ifdef CONFIG_PAGE_OWNER
61 &page_owner_ops,
62 #endif
63 #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
64 &page_idle_ops,
65 #endif
68 static unsigned long total_usage;
70 static bool __init invoke_need_callbacks(void)
72 int i;
73 int entries = ARRAY_SIZE(page_ext_ops);
75 for (i = 0; i < entries; i++) {
76 if (page_ext_ops[i]->need && page_ext_ops[i]->need())
77 return true;
80 return false;
83 static void __init invoke_init_callbacks(void)
85 int i;
86 int entries = ARRAY_SIZE(page_ext_ops);
88 for (i = 0; i < entries; i++) {
89 if (page_ext_ops[i]->init)
90 page_ext_ops[i]->init();
94 #if !defined(CONFIG_SPARSEMEM)
97 void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
99 pgdat->node_page_ext = NULL;
102 struct page_ext *lookup_page_ext(struct page *page)
104 unsigned long pfn = page_to_pfn(page);
105 unsigned long offset;
106 struct page_ext *base;
108 base = NODE_DATA(page_to_nid(page))->node_page_ext;
109 #ifdef CONFIG_DEBUG_VM
111 * The sanity checks the page allocator does upon freeing a
112 * page can reach here before the page_ext arrays are
113 * allocated when feeding a range of pages to the allocator
114 * for the first time during bootup or memory hotplug.
116 if (unlikely(!base))
117 return NULL;
118 #endif
119 offset = pfn - round_down(node_start_pfn(page_to_nid(page)),
120 MAX_ORDER_NR_PAGES);
121 return base + offset;
124 static int __init alloc_node_page_ext(int nid)
126 struct page_ext *base;
127 unsigned long table_size;
128 unsigned long nr_pages;
130 nr_pages = NODE_DATA(nid)->node_spanned_pages;
131 if (!nr_pages)
132 return 0;
135 * Need extra space if node range is not aligned with
136 * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm
137 * checks buddy's status, range could be out of exact node range.
139 if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) ||
140 !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
141 nr_pages += MAX_ORDER_NR_PAGES;
143 table_size = sizeof(struct page_ext) * nr_pages;
145 base = memblock_virt_alloc_try_nid_nopanic(
146 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
147 BOOTMEM_ALLOC_ACCESSIBLE, nid);
148 if (!base)
149 return -ENOMEM;
150 NODE_DATA(nid)->node_page_ext = base;
151 total_usage += table_size;
152 return 0;
155 void __init page_ext_init_flatmem(void)
158 int nid, fail;
160 if (!invoke_need_callbacks())
161 return;
163 for_each_online_node(nid) {
164 fail = alloc_node_page_ext(nid);
165 if (fail)
166 goto fail;
168 pr_info("allocated %ld bytes of page_ext\n", total_usage);
169 invoke_init_callbacks();
170 return;
172 fail:
173 pr_crit("allocation of page_ext failed.\n");
174 panic("Out of memory");
177 #else /* CONFIG_FLAT_NODE_MEM_MAP */
179 struct page_ext *lookup_page_ext(struct page *page)
181 unsigned long pfn = page_to_pfn(page);
182 struct mem_section *section = __pfn_to_section(pfn);
183 #ifdef CONFIG_DEBUG_VM
185 * The sanity checks the page allocator does upon freeing a
186 * page can reach here before the page_ext arrays are
187 * allocated when feeding a range of pages to the allocator
188 * for the first time during bootup or memory hotplug.
190 if (!section->page_ext)
191 return NULL;
192 #endif
193 return section->page_ext + pfn;
196 static void *__meminit alloc_page_ext(size_t size, int nid)
198 gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
199 void *addr = NULL;
201 addr = alloc_pages_exact_nid(nid, size, flags);
202 if (addr) {
203 kmemleak_alloc(addr, size, 1, flags);
204 return addr;
207 if (node_state(nid, N_HIGH_MEMORY))
208 addr = vzalloc_node(size, nid);
209 else
210 addr = vzalloc(size);
212 return addr;
215 static int __meminit init_section_page_ext(unsigned long pfn, int nid)
217 struct mem_section *section;
218 struct page_ext *base;
219 unsigned long table_size;
221 section = __pfn_to_section(pfn);
223 if (section->page_ext)
224 return 0;
226 table_size = sizeof(struct page_ext) * PAGES_PER_SECTION;
227 base = alloc_page_ext(table_size, nid);
230 * The value stored in section->page_ext is (base - pfn)
231 * and it does not point to the memory block allocated above,
232 * causing kmemleak false positives.
234 kmemleak_not_leak(base);
236 if (!base) {
237 pr_err("page ext allocation failure\n");
238 return -ENOMEM;
242 * The passed "pfn" may not be aligned to SECTION. For the calculation
243 * we need to apply a mask.
245 pfn &= PAGE_SECTION_MASK;
246 section->page_ext = base - pfn;
247 total_usage += table_size;
248 return 0;
250 #ifdef CONFIG_MEMORY_HOTPLUG
251 static void free_page_ext(void *addr)
253 if (is_vmalloc_addr(addr)) {
254 vfree(addr);
255 } else {
256 struct page *page = virt_to_page(addr);
257 size_t table_size;
259 table_size = sizeof(struct page_ext) * PAGES_PER_SECTION;
261 BUG_ON(PageReserved(page));
262 free_pages_exact(addr, table_size);
266 static void __free_page_ext(unsigned long pfn)
268 struct mem_section *ms;
269 struct page_ext *base;
271 ms = __pfn_to_section(pfn);
272 if (!ms || !ms->page_ext)
273 return;
274 base = ms->page_ext + pfn;
275 free_page_ext(base);
276 ms->page_ext = NULL;
279 static int __meminit online_page_ext(unsigned long start_pfn,
280 unsigned long nr_pages,
281 int nid)
283 unsigned long start, end, pfn;
284 int fail = 0;
286 start = SECTION_ALIGN_DOWN(start_pfn);
287 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
289 if (nid == -1) {
291 * In this case, "nid" already exists and contains valid memory.
292 * "start_pfn" passed to us is a pfn which is an arg for
293 * online__pages(), and start_pfn should exist.
295 nid = pfn_to_nid(start_pfn);
296 VM_BUG_ON(!node_state(nid, N_ONLINE));
299 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
300 if (!pfn_present(pfn))
301 continue;
302 fail = init_section_page_ext(pfn, nid);
304 if (!fail)
305 return 0;
307 /* rollback */
308 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
309 __free_page_ext(pfn);
311 return -ENOMEM;
314 static int __meminit offline_page_ext(unsigned long start_pfn,
315 unsigned long nr_pages, int nid)
317 unsigned long start, end, pfn;
319 start = SECTION_ALIGN_DOWN(start_pfn);
320 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
322 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
323 __free_page_ext(pfn);
324 return 0;
328 static int __meminit page_ext_callback(struct notifier_block *self,
329 unsigned long action, void *arg)
331 struct memory_notify *mn = arg;
332 int ret = 0;
334 switch (action) {
335 case MEM_GOING_ONLINE:
336 ret = online_page_ext(mn->start_pfn,
337 mn->nr_pages, mn->status_change_nid);
338 break;
339 case MEM_OFFLINE:
340 offline_page_ext(mn->start_pfn,
341 mn->nr_pages, mn->status_change_nid);
342 break;
343 case MEM_CANCEL_ONLINE:
344 offline_page_ext(mn->start_pfn,
345 mn->nr_pages, mn->status_change_nid);
346 break;
347 case MEM_GOING_OFFLINE:
348 break;
349 case MEM_ONLINE:
350 case MEM_CANCEL_OFFLINE:
351 break;
354 return notifier_from_errno(ret);
357 #endif
359 void __init page_ext_init(void)
361 unsigned long pfn;
362 int nid;
364 if (!invoke_need_callbacks())
365 return;
367 for_each_node_state(nid, N_MEMORY) {
368 unsigned long start_pfn, end_pfn;
370 start_pfn = node_start_pfn(nid);
371 end_pfn = node_end_pfn(nid);
373 * start_pfn and end_pfn may not be aligned to SECTION and the
374 * page->flags of out of node pages are not initialized. So we
375 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
377 for (pfn = start_pfn; pfn < end_pfn;
378 pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
380 if (!pfn_valid(pfn))
381 continue;
383 * Nodes's pfns can be overlapping.
384 * We know some arch can have a nodes layout such as
385 * -------------pfn-------------->
386 * N0 | N1 | N2 | N0 | N1 | N2|....
388 if (pfn_to_nid(pfn) != nid)
389 continue;
390 if (init_section_page_ext(pfn, nid))
391 goto oom;
394 hotplug_memory_notifier(page_ext_callback, 0);
395 pr_info("allocated %ld bytes of page_ext\n", total_usage);
396 invoke_init_callbacks();
397 return;
399 oom:
400 panic("Out of memory");
403 void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
407 #endif