mm: Handle MADV_WILLNEED through vfs_fadvise()
[linux/fpc-iii.git] / mm / page_ext.c
blob5f5769c7db3b2f87b78ebc0b07425edecc7d0846
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/mmzone.h>
4 #include <linux/memblock.h>
5 #include <linux/page_ext.h>
6 #include <linux/memory.h>
7 #include <linux/vmalloc.h>
8 #include <linux/kmemleak.h>
9 #include <linux/page_owner.h>
10 #include <linux/page_idle.h>
13 * struct page extension
15 * This is the feature to manage memory for extended data per page.
17 * Until now, we must modify struct page itself to store extra data per page.
18 * This requires rebuilding the kernel and it is really time consuming process.
19 * And, sometimes, rebuild is impossible due to third party module dependency.
20 * At last, enlarging struct page could cause un-wanted system behaviour change.
22 * This feature is intended to overcome above mentioned problems. This feature
23 * allocates memory for extended data per page in certain place rather than
24 * the struct page itself. This memory can be accessed by the accessor
25 * functions provided by this code. During the boot process, it checks whether
26 * allocation of huge chunk of memory is needed or not. If not, it avoids
27 * allocating memory at all. With this advantage, we can include this feature
28 * into the kernel in default and can avoid rebuild and solve related problems.
30 * To help these things to work well, there are two callbacks for clients. One
31 * is the need callback which is mandatory if user wants to avoid useless
32 * memory allocation at boot-time. The other is optional, init callback, which
33 * is used to do proper initialization after memory is allocated.
35 * The need callback is used to decide whether extended memory allocation is
36 * needed or not. Sometimes users want to deactivate some features in this
37 * boot and extra memory would be unneccessary. In this case, to avoid
38 * allocating huge chunk of memory, each clients represent their need of
39 * extra memory through the need callback. If one of the need callbacks
40 * returns true, it means that someone needs extra memory so that
41 * page extension core should allocates memory for page extension. If
42 * none of need callbacks return true, memory isn't needed at all in this boot
43 * and page extension core can skip to allocate memory. As result,
44 * none of memory is wasted.
46 * When need callback returns true, page_ext checks if there is a request for
47 * extra memory through size in struct page_ext_operations. If it is non-zero,
48 * extra space is allocated for each page_ext entry and offset is returned to
49 * user through offset in struct page_ext_operations.
51 * The init callback is used to do proper initialization after page extension
52 * is completely initialized. In sparse memory system, extra memory is
53 * allocated some time later than memmap is allocated. In other words, lifetime
54 * of memory for page extension isn't same with memmap for struct page.
55 * Therefore, clients can't store extra data until page extension is
56 * initialized, even if pages are allocated and used freely. This could
57 * cause inadequate state of extra data per page, so, to prevent it, client
58 * can utilize this callback to initialize the state of it correctly.
61 static struct page_ext_operations *page_ext_ops[] = {
62 #ifdef CONFIG_PAGE_OWNER
63 &page_owner_ops,
64 #endif
65 #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
66 &page_idle_ops,
67 #endif
70 static unsigned long total_usage;
71 static unsigned long extra_mem;
73 static bool __init invoke_need_callbacks(void)
75 int i;
76 int entries = ARRAY_SIZE(page_ext_ops);
77 bool need = false;
79 for (i = 0; i < entries; i++) {
80 if (page_ext_ops[i]->need && page_ext_ops[i]->need()) {
81 page_ext_ops[i]->offset = sizeof(struct page_ext) +
82 extra_mem;
83 extra_mem += page_ext_ops[i]->size;
84 need = true;
88 return need;
91 static void __init invoke_init_callbacks(void)
93 int i;
94 int entries = ARRAY_SIZE(page_ext_ops);
96 for (i = 0; i < entries; i++) {
97 if (page_ext_ops[i]->init)
98 page_ext_ops[i]->init();
102 static unsigned long get_entry_size(void)
104 return sizeof(struct page_ext) + extra_mem;
107 static inline struct page_ext *get_entry(void *base, unsigned long index)
109 return base + get_entry_size() * index;
112 #if !defined(CONFIG_SPARSEMEM)
115 void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
117 pgdat->node_page_ext = NULL;
120 struct page_ext *lookup_page_ext(const struct page *page)
122 unsigned long pfn = page_to_pfn(page);
123 unsigned long index;
124 struct page_ext *base;
126 base = NODE_DATA(page_to_nid(page))->node_page_ext;
128 * The sanity checks the page allocator does upon freeing a
129 * page can reach here before the page_ext arrays are
130 * allocated when feeding a range of pages to the allocator
131 * for the first time during bootup or memory hotplug.
133 if (unlikely(!base))
134 return NULL;
135 index = pfn - round_down(node_start_pfn(page_to_nid(page)),
136 MAX_ORDER_NR_PAGES);
137 return get_entry(base, index);
140 static int __init alloc_node_page_ext(int nid)
142 struct page_ext *base;
143 unsigned long table_size;
144 unsigned long nr_pages;
146 nr_pages = NODE_DATA(nid)->node_spanned_pages;
147 if (!nr_pages)
148 return 0;
151 * Need extra space if node range is not aligned with
152 * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm
153 * checks buddy's status, range could be out of exact node range.
155 if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) ||
156 !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
157 nr_pages += MAX_ORDER_NR_PAGES;
159 table_size = get_entry_size() * nr_pages;
161 base = memblock_alloc_try_nid(
162 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
163 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
164 if (!base)
165 return -ENOMEM;
166 NODE_DATA(nid)->node_page_ext = base;
167 total_usage += table_size;
168 return 0;
171 void __init page_ext_init_flatmem(void)
174 int nid, fail;
176 if (!invoke_need_callbacks())
177 return;
179 for_each_online_node(nid) {
180 fail = alloc_node_page_ext(nid);
181 if (fail)
182 goto fail;
184 pr_info("allocated %ld bytes of page_ext\n", total_usage);
185 invoke_init_callbacks();
186 return;
188 fail:
189 pr_crit("allocation of page_ext failed.\n");
190 panic("Out of memory");
193 #else /* CONFIG_FLAT_NODE_MEM_MAP */
195 struct page_ext *lookup_page_ext(const struct page *page)
197 unsigned long pfn = page_to_pfn(page);
198 struct mem_section *section = __pfn_to_section(pfn);
200 * The sanity checks the page allocator does upon freeing a
201 * page can reach here before the page_ext arrays are
202 * allocated when feeding a range of pages to the allocator
203 * for the first time during bootup or memory hotplug.
205 if (!section->page_ext)
206 return NULL;
207 return get_entry(section->page_ext, pfn);
210 static void *__meminit alloc_page_ext(size_t size, int nid)
212 gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
213 void *addr = NULL;
215 addr = alloc_pages_exact_nid(nid, size, flags);
216 if (addr) {
217 kmemleak_alloc(addr, size, 1, flags);
218 return addr;
221 addr = vzalloc_node(size, nid);
223 return addr;
226 static int __meminit init_section_page_ext(unsigned long pfn, int nid)
228 struct mem_section *section;
229 struct page_ext *base;
230 unsigned long table_size;
232 section = __pfn_to_section(pfn);
234 if (section->page_ext)
235 return 0;
237 table_size = get_entry_size() * PAGES_PER_SECTION;
238 base = alloc_page_ext(table_size, nid);
241 * The value stored in section->page_ext is (base - pfn)
242 * and it does not point to the memory block allocated above,
243 * causing kmemleak false positives.
245 kmemleak_not_leak(base);
247 if (!base) {
248 pr_err("page ext allocation failure\n");
249 return -ENOMEM;
253 * The passed "pfn" may not be aligned to SECTION. For the calculation
254 * we need to apply a mask.
256 pfn &= PAGE_SECTION_MASK;
257 section->page_ext = (void *)base - get_entry_size() * pfn;
258 total_usage += table_size;
259 return 0;
261 #ifdef CONFIG_MEMORY_HOTPLUG
262 static void free_page_ext(void *addr)
264 if (is_vmalloc_addr(addr)) {
265 vfree(addr);
266 } else {
267 struct page *page = virt_to_page(addr);
268 size_t table_size;
270 table_size = get_entry_size() * PAGES_PER_SECTION;
272 BUG_ON(PageReserved(page));
273 kmemleak_free(addr);
274 free_pages_exact(addr, table_size);
278 static void __free_page_ext(unsigned long pfn)
280 struct mem_section *ms;
281 struct page_ext *base;
283 ms = __pfn_to_section(pfn);
284 if (!ms || !ms->page_ext)
285 return;
286 base = get_entry(ms->page_ext, pfn);
287 free_page_ext(base);
288 ms->page_ext = NULL;
291 static int __meminit online_page_ext(unsigned long start_pfn,
292 unsigned long nr_pages,
293 int nid)
295 unsigned long start, end, pfn;
296 int fail = 0;
298 start = SECTION_ALIGN_DOWN(start_pfn);
299 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
301 if (nid == NUMA_NO_NODE) {
303 * In this case, "nid" already exists and contains valid memory.
304 * "start_pfn" passed to us is a pfn which is an arg for
305 * online__pages(), and start_pfn should exist.
307 nid = pfn_to_nid(start_pfn);
308 VM_BUG_ON(!node_state(nid, N_ONLINE));
311 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
312 if (!pfn_present(pfn))
313 continue;
314 fail = init_section_page_ext(pfn, nid);
316 if (!fail)
317 return 0;
319 /* rollback */
320 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
321 __free_page_ext(pfn);
323 return -ENOMEM;
326 static int __meminit offline_page_ext(unsigned long start_pfn,
327 unsigned long nr_pages, int nid)
329 unsigned long start, end, pfn;
331 start = SECTION_ALIGN_DOWN(start_pfn);
332 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
334 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
335 __free_page_ext(pfn);
336 return 0;
340 static int __meminit page_ext_callback(struct notifier_block *self,
341 unsigned long action, void *arg)
343 struct memory_notify *mn = arg;
344 int ret = 0;
346 switch (action) {
347 case MEM_GOING_ONLINE:
348 ret = online_page_ext(mn->start_pfn,
349 mn->nr_pages, mn->status_change_nid);
350 break;
351 case MEM_OFFLINE:
352 offline_page_ext(mn->start_pfn,
353 mn->nr_pages, mn->status_change_nid);
354 break;
355 case MEM_CANCEL_ONLINE:
356 offline_page_ext(mn->start_pfn,
357 mn->nr_pages, mn->status_change_nid);
358 break;
359 case MEM_GOING_OFFLINE:
360 break;
361 case MEM_ONLINE:
362 case MEM_CANCEL_OFFLINE:
363 break;
366 return notifier_from_errno(ret);
369 #endif
371 void __init page_ext_init(void)
373 unsigned long pfn;
374 int nid;
376 if (!invoke_need_callbacks())
377 return;
379 for_each_node_state(nid, N_MEMORY) {
380 unsigned long start_pfn, end_pfn;
382 start_pfn = node_start_pfn(nid);
383 end_pfn = node_end_pfn(nid);
385 * start_pfn and end_pfn may not be aligned to SECTION and the
386 * page->flags of out of node pages are not initialized. So we
387 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
389 for (pfn = start_pfn; pfn < end_pfn;
390 pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
392 if (!pfn_valid(pfn))
393 continue;
395 * Nodes's pfns can be overlapping.
396 * We know some arch can have a nodes layout such as
397 * -------------pfn-------------->
398 * N0 | N1 | N2 | N0 | N1 | N2|....
400 if (pfn_to_nid(pfn) != nid)
401 continue;
402 if (init_section_page_ext(pfn, nid))
403 goto oom;
404 cond_resched();
407 hotplug_memory_notifier(page_ext_callback, 0);
408 pr_info("allocated %ld bytes of page_ext\n", total_usage);
409 invoke_init_callbacks();
410 return;
412 oom:
413 panic("Out of memory");
416 void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
420 #endif