1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/mmzone.h>
4 #include <linux/memblock.h>
5 #include <linux/page_ext.h>
6 #include <linux/memory.h>
7 #include <linux/vmalloc.h>
8 #include <linux/kmemleak.h>
9 #include <linux/page_owner.h>
10 #include <linux/page_idle.h>
11 #include <linux/page_table_check.h>
12 #include <linux/rcupdate.h>
13 #include <linux/pgalloc_tag.h>
16 * struct page extension
18 * This is the feature to manage memory for extended data per page.
20 * Until now, we must modify struct page itself to store extra data per page.
21 * This requires rebuilding the kernel and it is really time consuming process.
22 * And, sometimes, rebuild is impossible due to third party module dependency.
23 * At last, enlarging struct page could cause un-wanted system behaviour change.
25 * This feature is intended to overcome above mentioned problems. This feature
26 * allocates memory for extended data per page in certain place rather than
27 * the struct page itself. This memory can be accessed by the accessor
28 * functions provided by this code. During the boot process, it checks whether
29 * allocation of huge chunk of memory is needed or not. If not, it avoids
30 * allocating memory at all. With this advantage, we can include this feature
31 * into the kernel in default and can avoid rebuild and solve related problems.
33 * To help these things to work well, there are two callbacks for clients. One
34 * is the need callback which is mandatory if user wants to avoid useless
35 * memory allocation at boot-time. The other is optional, init callback, which
36 * is used to do proper initialization after memory is allocated.
38 * The need callback is used to decide whether extended memory allocation is
39 * needed or not. Sometimes users want to deactivate some features in this
40 * boot and extra memory would be unnecessary. In this case, to avoid
41 * allocating huge chunk of memory, each clients represent their need of
42 * extra memory through the need callback. If one of the need callbacks
43 * returns true, it means that someone needs extra memory so that
44 * page extension core should allocates memory for page extension. If
45 * none of need callbacks return true, memory isn't needed at all in this boot
46 * and page extension core can skip to allocate memory. As result,
47 * none of memory is wasted.
49 * When need callback returns true, page_ext checks if there is a request for
50 * extra memory through size in struct page_ext_operations. If it is non-zero,
51 * extra space is allocated for each page_ext entry and offset is returned to
52 * user through offset in struct page_ext_operations.
54 * The init callback is used to do proper initialization after page extension
55 * is completely initialized. In sparse memory system, extra memory is
56 * allocated some time later than memmap is allocated. In other words, lifetime
57 * of memory for page extension isn't same with memmap for struct page.
58 * Therefore, clients can't store extra data until page extension is
59 * initialized, even if pages are allocated and used freely. This could
60 * cause inadequate state of extra data per page, so, to prevent it, client
61 * can utilize this callback to initialize the state of it correctly.
64 #ifdef CONFIG_SPARSEMEM
65 #define PAGE_EXT_INVALID (0x1)
68 #if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
69 static bool need_page_idle(void)
73 static struct page_ext_operations page_idle_ops __initdata
= {
74 .need
= need_page_idle
,
75 .need_shared_flags
= true,
79 static struct page_ext_operations
*page_ext_ops
[] __initdata
= {
80 #ifdef CONFIG_PAGE_OWNER
83 #if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
86 #ifdef CONFIG_MEM_ALLOC_PROFILING
87 &page_alloc_tagging_ops
,
89 #ifdef CONFIG_PAGE_TABLE_CHECK
90 &page_table_check_ops
,
94 unsigned long page_ext_size
;
96 static unsigned long total_usage
;
98 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
100 * To ensure correct allocation tagging for pages, page_ext should be available
101 * before the first page allocation. Otherwise early task stacks will be
102 * allocated before page_ext initialization and missing tags will be flagged.
104 bool early_page_ext __meminitdata
= true;
106 bool early_page_ext __meminitdata
;
108 static int __init
setup_early_page_ext(char *str
)
110 early_page_ext
= true;
113 early_param("early_page_ext", setup_early_page_ext
);
115 static bool __init
invoke_need_callbacks(void)
118 int entries
= ARRAY_SIZE(page_ext_ops
);
121 for (i
= 0; i
< entries
; i
++) {
122 if (page_ext_ops
[i
]->need()) {
123 if (page_ext_ops
[i
]->need_shared_flags
) {
124 page_ext_size
= sizeof(struct page_ext
);
130 for (i
= 0; i
< entries
; i
++) {
131 if (page_ext_ops
[i
]->need()) {
132 page_ext_ops
[i
]->offset
= page_ext_size
;
133 page_ext_size
+= page_ext_ops
[i
]->size
;
141 static void __init
invoke_init_callbacks(void)
144 int entries
= ARRAY_SIZE(page_ext_ops
);
146 for (i
= 0; i
< entries
; i
++) {
147 if (page_ext_ops
[i
]->init
)
148 page_ext_ops
[i
]->init();
152 static inline struct page_ext
*get_entry(void *base
, unsigned long index
)
154 return base
+ page_ext_size
* index
;
157 #ifndef CONFIG_SPARSEMEM
158 void __init
page_ext_init_flatmem_late(void)
160 invoke_init_callbacks();
163 void __meminit
pgdat_page_ext_init(struct pglist_data
*pgdat
)
165 pgdat
->node_page_ext
= NULL
;
168 static struct page_ext
*lookup_page_ext(const struct page
*page
)
170 unsigned long pfn
= page_to_pfn(page
);
172 struct page_ext
*base
;
174 WARN_ON_ONCE(!rcu_read_lock_held());
175 base
= NODE_DATA(page_to_nid(page
))->node_page_ext
;
177 * The sanity checks the page allocator does upon freeing a
178 * page can reach here before the page_ext arrays are
179 * allocated when feeding a range of pages to the allocator
180 * for the first time during bootup or memory hotplug.
184 index
= pfn
- round_down(node_start_pfn(page_to_nid(page
)),
186 return get_entry(base
, index
);
189 static int __init
alloc_node_page_ext(int nid
)
191 struct page_ext
*base
;
192 unsigned long table_size
;
193 unsigned long nr_pages
;
195 nr_pages
= NODE_DATA(nid
)->node_spanned_pages
;
200 * Need extra space if node range is not aligned with
201 * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm
202 * checks buddy's status, range could be out of exact node range.
204 if (!IS_ALIGNED(node_start_pfn(nid
), MAX_ORDER_NR_PAGES
) ||
205 !IS_ALIGNED(node_end_pfn(nid
), MAX_ORDER_NR_PAGES
))
206 nr_pages
+= MAX_ORDER_NR_PAGES
;
208 table_size
= page_ext_size
* nr_pages
;
210 base
= memblock_alloc_try_nid(
211 table_size
, PAGE_SIZE
, __pa(MAX_DMA_ADDRESS
),
212 MEMBLOCK_ALLOC_ACCESSIBLE
, nid
);
215 NODE_DATA(nid
)->node_page_ext
= base
;
216 total_usage
+= table_size
;
217 memmap_boot_pages_add(DIV_ROUND_UP(table_size
, PAGE_SIZE
));
221 void __init
page_ext_init_flatmem(void)
226 if (!invoke_need_callbacks())
229 for_each_online_node(nid
) {
230 fail
= alloc_node_page_ext(nid
);
234 pr_info("allocated %ld bytes of page_ext\n", total_usage
);
238 pr_crit("allocation of page_ext failed.\n");
239 panic("Out of memory");
242 #else /* CONFIG_SPARSEMEM */
243 static bool page_ext_invalid(struct page_ext
*page_ext
)
245 return !page_ext
|| (((unsigned long)page_ext
& PAGE_EXT_INVALID
) == PAGE_EXT_INVALID
);
248 static struct page_ext
*lookup_page_ext(const struct page
*page
)
250 unsigned long pfn
= page_to_pfn(page
);
251 struct mem_section
*section
= __pfn_to_section(pfn
);
252 struct page_ext
*page_ext
= READ_ONCE(section
->page_ext
);
254 WARN_ON_ONCE(!rcu_read_lock_held());
256 * The sanity checks the page allocator does upon freeing a
257 * page can reach here before the page_ext arrays are
258 * allocated when feeding a range of pages to the allocator
259 * for the first time during bootup or memory hotplug.
261 if (page_ext_invalid(page_ext
))
263 return get_entry(page_ext
, pfn
);
266 static void *__meminit
alloc_page_ext(size_t size
, int nid
)
268 gfp_t flags
= GFP_KERNEL
| __GFP_ZERO
| __GFP_NOWARN
;
271 addr
= alloc_pages_exact_nid(nid
, size
, flags
);
273 kmemleak_alloc(addr
, size
, 1, flags
);
275 addr
= vzalloc_node(size
, nid
);
278 memmap_pages_add(DIV_ROUND_UP(size
, PAGE_SIZE
));
283 static int __meminit
init_section_page_ext(unsigned long pfn
, int nid
)
285 struct mem_section
*section
;
286 struct page_ext
*base
;
287 unsigned long table_size
;
289 section
= __pfn_to_section(pfn
);
291 if (section
->page_ext
)
294 table_size
= page_ext_size
* PAGES_PER_SECTION
;
295 base
= alloc_page_ext(table_size
, nid
);
298 * The value stored in section->page_ext is (base - pfn)
299 * and it does not point to the memory block allocated above,
300 * causing kmemleak false positives.
302 kmemleak_not_leak(base
);
305 pr_err("page ext allocation failure\n");
310 * The passed "pfn" may not be aligned to SECTION. For the calculation
311 * we need to apply a mask.
313 pfn
&= PAGE_SECTION_MASK
;
314 section
->page_ext
= (void *)base
- page_ext_size
* pfn
;
315 total_usage
+= table_size
;
319 static void free_page_ext(void *addr
)
324 table_size
= page_ext_size
* PAGES_PER_SECTION
;
325 memmap_pages_add(-1L * (DIV_ROUND_UP(table_size
, PAGE_SIZE
)));
327 if (is_vmalloc_addr(addr
)) {
330 page
= virt_to_page(addr
);
331 BUG_ON(PageReserved(page
));
333 free_pages_exact(addr
, table_size
);
337 static void __free_page_ext(unsigned long pfn
)
339 struct mem_section
*ms
;
340 struct page_ext
*base
;
342 ms
= __pfn_to_section(pfn
);
343 if (!ms
|| !ms
->page_ext
)
346 base
= READ_ONCE(ms
->page_ext
);
348 * page_ext here can be valid while doing the roll back
349 * operation in online_page_ext().
351 if (page_ext_invalid(base
))
352 base
= (void *)base
- PAGE_EXT_INVALID
;
353 WRITE_ONCE(ms
->page_ext
, NULL
);
355 base
= get_entry(base
, pfn
);
359 static void __invalidate_page_ext(unsigned long pfn
)
361 struct mem_section
*ms
;
364 ms
= __pfn_to_section(pfn
);
365 if (!ms
|| !ms
->page_ext
)
367 val
= (void *)ms
->page_ext
+ PAGE_EXT_INVALID
;
368 WRITE_ONCE(ms
->page_ext
, val
);
371 static int __meminit
online_page_ext(unsigned long start_pfn
,
372 unsigned long nr_pages
,
375 unsigned long start
, end
, pfn
;
378 start
= SECTION_ALIGN_DOWN(start_pfn
);
379 end
= SECTION_ALIGN_UP(start_pfn
+ nr_pages
);
381 if (nid
== NUMA_NO_NODE
) {
383 * In this case, "nid" already exists and contains valid memory.
384 * "start_pfn" passed to us is a pfn which is an arg for
385 * online__pages(), and start_pfn should exist.
387 nid
= pfn_to_nid(start_pfn
);
388 VM_BUG_ON(!node_online(nid
));
391 for (pfn
= start
; !fail
&& pfn
< end
; pfn
+= PAGES_PER_SECTION
)
392 fail
= init_section_page_ext(pfn
, nid
);
397 end
= pfn
- PAGES_PER_SECTION
;
398 for (pfn
= start
; pfn
< end
; pfn
+= PAGES_PER_SECTION
)
399 __free_page_ext(pfn
);
404 static void __meminit
offline_page_ext(unsigned long start_pfn
,
405 unsigned long nr_pages
)
407 unsigned long start
, end
, pfn
;
409 start
= SECTION_ALIGN_DOWN(start_pfn
);
410 end
= SECTION_ALIGN_UP(start_pfn
+ nr_pages
);
413 * Freeing of page_ext is done in 3 steps to avoid
414 * use-after-free of it:
415 * 1) Traverse all the sections and mark their page_ext
417 * 2) Wait for all the existing users of page_ext who
418 * started before invalidation to finish.
419 * 3) Free the page_ext.
421 for (pfn
= start
; pfn
< end
; pfn
+= PAGES_PER_SECTION
)
422 __invalidate_page_ext(pfn
);
426 for (pfn
= start
; pfn
< end
; pfn
+= PAGES_PER_SECTION
)
427 __free_page_ext(pfn
);
430 static int __meminit
page_ext_callback(struct notifier_block
*self
,
431 unsigned long action
, void *arg
)
433 struct memory_notify
*mn
= arg
;
437 case MEM_GOING_ONLINE
:
438 ret
= online_page_ext(mn
->start_pfn
,
439 mn
->nr_pages
, mn
->status_change_nid
);
442 offline_page_ext(mn
->start_pfn
,
445 case MEM_CANCEL_ONLINE
:
446 offline_page_ext(mn
->start_pfn
,
449 case MEM_GOING_OFFLINE
:
452 case MEM_CANCEL_OFFLINE
:
456 return notifier_from_errno(ret
);
459 void __init
page_ext_init(void)
464 if (!invoke_need_callbacks())
467 for_each_node_state(nid
, N_MEMORY
) {
468 unsigned long start_pfn
, end_pfn
;
470 start_pfn
= node_start_pfn(nid
);
471 end_pfn
= node_end_pfn(nid
);
473 * start_pfn and end_pfn may not be aligned to SECTION and the
474 * page->flags of out of node pages are not initialized. So we
475 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
477 for (pfn
= start_pfn
; pfn
< end_pfn
;
478 pfn
= ALIGN(pfn
+ 1, PAGES_PER_SECTION
)) {
483 * Nodes's pfns can be overlapping.
484 * We know some arch can have a nodes layout such as
485 * -------------pfn-------------->
486 * N0 | N1 | N2 | N0 | N1 | N2|....
488 if (pfn_to_nid(pfn
) != nid
)
490 if (init_section_page_ext(pfn
, nid
))
495 hotplug_memory_notifier(page_ext_callback
, DEFAULT_CALLBACK_PRI
);
496 pr_info("allocated %ld bytes of page_ext\n", total_usage
);
497 invoke_init_callbacks();
501 panic("Out of memory");
504 void __meminit
pgdat_page_ext_init(struct pglist_data
*pgdat
)
511 * page_ext_get() - Get the extended information for a page.
512 * @page: The page we're interested in.
514 * Ensures that the page_ext will remain valid until page_ext_put()
517 * Return: NULL if no page_ext exists for this page.
518 * Context: Any context. Caller may not sleep until they have called
521 struct page_ext
*page_ext_get(const struct page
*page
)
523 struct page_ext
*page_ext
;
526 page_ext
= lookup_page_ext(page
);
536 * page_ext_put() - Working with page extended information is done.
537 * @page_ext: Page extended information received from page_ext_get().
539 * The page extended information of the page may not be valid after this
540 * function is called.
543 * Context: Any context with corresponding page_ext_get() is called.
545 void page_ext_put(struct page_ext
*page_ext
)
547 if (unlikely(!page_ext
))