2 #include <linux/mmzone.h>
3 #include <linux/bootmem.h>
4 #include <linux/page_ext.h>
5 #include <linux/memory.h>
6 #include <linux/vmalloc.h>
7 #include <linux/kmemleak.h>
8 #include <linux/page_owner.h>
9 #include <linux/page_idle.h>
12 * struct page extension
14 * This is the feature to manage memory for extended data per page.
16 * Until now, we must modify struct page itself to store extra data per page.
17 * This requires rebuilding the kernel and it is really time consuming process.
18 * And, sometimes, rebuild is impossible due to third party module dependency.
19 * At last, enlarging struct page could cause un-wanted system behaviour change.
21 * This feature is intended to overcome above mentioned problems. This feature
22 * allocates memory for extended data per page in certain place rather than
23 * the struct page itself. This memory can be accessed by the accessor
24 * functions provided by this code. During the boot process, it checks whether
25 * allocation of huge chunk of memory is needed or not. If not, it avoids
26 * allocating memory at all. With this advantage, we can include this feature
27 * into the kernel in default and can avoid rebuild and solve related problems.
29 * To help these things to work well, there are two callbacks for clients. One
30 * is the need callback which is mandatory if user wants to avoid useless
31 * memory allocation at boot-time. The other is optional, init callback, which
32 * is used to do proper initialization after memory is allocated.
34 * The need callback is used to decide whether extended memory allocation is
35 * needed or not. Sometimes users want to deactivate some features in this
36 * boot and extra memory would be unneccessary. In this case, to avoid
37 * allocating huge chunk of memory, each clients represent their need of
38 * extra memory through the need callback. If one of the need callbacks
39 * returns true, it means that someone needs extra memory so that
40 * page extension core should allocates memory for page extension. If
41 * none of need callbacks return true, memory isn't needed at all in this boot
42 * and page extension core can skip to allocate memory. As result,
43 * none of memory is wasted.
45 * When need callback returns true, page_ext checks if there is a request for
46 * extra memory through size in struct page_ext_operations. If it is non-zero,
47 * extra space is allocated for each page_ext entry and offset is returned to
48 * user through offset in struct page_ext_operations.
50 * The init callback is used to do proper initialization after page extension
51 * is completely initialized. In sparse memory system, extra memory is
52 * allocated some time later than memmap is allocated. In other words, lifetime
53 * of memory for page extension isn't same with memmap for struct page.
54 * Therefore, clients can't store extra data until page extension is
55 * initialized, even if pages are allocated and used freely. This could
56 * cause inadequate state of extra data per page, so, to prevent it, client
57 * can utilize this callback to initialize the state of it correctly.
60 static struct page_ext_operations
*page_ext_ops
[] = {
62 #ifdef CONFIG_PAGE_POISONING
65 #ifdef CONFIG_PAGE_OWNER
68 #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
73 static unsigned long total_usage
;
74 static unsigned long extra_mem
;
76 static bool __init
invoke_need_callbacks(void)
79 int entries
= ARRAY_SIZE(page_ext_ops
);
82 for (i
= 0; i
< entries
; i
++) {
83 if (page_ext_ops
[i
]->need
&& page_ext_ops
[i
]->need()) {
84 page_ext_ops
[i
]->offset
= sizeof(struct page_ext
) +
86 extra_mem
+= page_ext_ops
[i
]->size
;
94 static void __init
invoke_init_callbacks(void)
97 int entries
= ARRAY_SIZE(page_ext_ops
);
99 for (i
= 0; i
< entries
; i
++) {
100 if (page_ext_ops
[i
]->init
)
101 page_ext_ops
[i
]->init();
105 static unsigned long get_entry_size(void)
107 return sizeof(struct page_ext
) + extra_mem
;
110 static inline struct page_ext
*get_entry(void *base
, unsigned long index
)
112 return base
+ get_entry_size() * index
;
115 #if !defined(CONFIG_SPARSEMEM)
118 void __meminit
pgdat_page_ext_init(struct pglist_data
*pgdat
)
120 pgdat
->node_page_ext
= NULL
;
123 struct page_ext
*lookup_page_ext(struct page
*page
)
125 unsigned long pfn
= page_to_pfn(page
);
127 struct page_ext
*base
;
129 base
= NODE_DATA(page_to_nid(page
))->node_page_ext
;
130 #if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING)
132 * The sanity checks the page allocator does upon freeing a
133 * page can reach here before the page_ext arrays are
134 * allocated when feeding a range of pages to the allocator
135 * for the first time during bootup or memory hotplug.
137 * This check is also necessary for ensuring page poisoning
138 * works as expected when enabled
143 index
= pfn
- round_down(node_start_pfn(page_to_nid(page
)),
145 return get_entry(base
, index
);
148 static int __init
alloc_node_page_ext(int nid
)
150 struct page_ext
*base
;
151 unsigned long table_size
;
152 unsigned long nr_pages
;
154 nr_pages
= NODE_DATA(nid
)->node_spanned_pages
;
159 * Need extra space if node range is not aligned with
160 * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm
161 * checks buddy's status, range could be out of exact node range.
163 if (!IS_ALIGNED(node_start_pfn(nid
), MAX_ORDER_NR_PAGES
) ||
164 !IS_ALIGNED(node_end_pfn(nid
), MAX_ORDER_NR_PAGES
))
165 nr_pages
+= MAX_ORDER_NR_PAGES
;
167 table_size
= get_entry_size() * nr_pages
;
169 base
= memblock_virt_alloc_try_nid_nopanic(
170 table_size
, PAGE_SIZE
, __pa(MAX_DMA_ADDRESS
),
171 BOOTMEM_ALLOC_ACCESSIBLE
, nid
);
174 NODE_DATA(nid
)->node_page_ext
= base
;
175 total_usage
+= table_size
;
179 void __init
page_ext_init_flatmem(void)
184 if (!invoke_need_callbacks())
187 for_each_online_node(nid
) {
188 fail
= alloc_node_page_ext(nid
);
192 pr_info("allocated %ld bytes of page_ext\n", total_usage
);
193 invoke_init_callbacks();
197 pr_crit("allocation of page_ext failed.\n");
198 panic("Out of memory");
201 #else /* CONFIG_FLAT_NODE_MEM_MAP */
203 struct page_ext
*lookup_page_ext(struct page
*page
)
205 unsigned long pfn
= page_to_pfn(page
);
206 struct mem_section
*section
= __pfn_to_section(pfn
);
207 #if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING)
209 * The sanity checks the page allocator does upon freeing a
210 * page can reach here before the page_ext arrays are
211 * allocated when feeding a range of pages to the allocator
212 * for the first time during bootup or memory hotplug.
214 * This check is also necessary for ensuring page poisoning
215 * works as expected when enabled
217 if (!section
->page_ext
)
220 return get_entry(section
->page_ext
, pfn
);
223 static void *__meminit
alloc_page_ext(size_t size
, int nid
)
225 gfp_t flags
= GFP_KERNEL
| __GFP_ZERO
| __GFP_NOWARN
;
228 addr
= alloc_pages_exact_nid(nid
, size
, flags
);
230 kmemleak_alloc(addr
, size
, 1, flags
);
234 if (node_state(nid
, N_HIGH_MEMORY
))
235 addr
= vzalloc_node(size
, nid
);
237 addr
= vzalloc(size
);
242 static int __meminit
init_section_page_ext(unsigned long pfn
, int nid
)
244 struct mem_section
*section
;
245 struct page_ext
*base
;
246 unsigned long table_size
;
248 section
= __pfn_to_section(pfn
);
250 if (section
->page_ext
)
253 table_size
= get_entry_size() * PAGES_PER_SECTION
;
254 base
= alloc_page_ext(table_size
, nid
);
257 * The value stored in section->page_ext is (base - pfn)
258 * and it does not point to the memory block allocated above,
259 * causing kmemleak false positives.
261 kmemleak_not_leak(base
);
264 pr_err("page ext allocation failure\n");
269 * The passed "pfn" may not be aligned to SECTION. For the calculation
270 * we need to apply a mask.
272 pfn
&= PAGE_SECTION_MASK
;
273 section
->page_ext
= (void *)base
- get_entry_size() * pfn
;
274 total_usage
+= table_size
;
277 #ifdef CONFIG_MEMORY_HOTPLUG
278 static void free_page_ext(void *addr
)
280 if (is_vmalloc_addr(addr
)) {
283 struct page
*page
= virt_to_page(addr
);
286 table_size
= get_entry_size() * PAGES_PER_SECTION
;
288 BUG_ON(PageReserved(page
));
290 free_pages_exact(addr
, table_size
);
294 static void __free_page_ext(unsigned long pfn
)
296 struct mem_section
*ms
;
297 struct page_ext
*base
;
299 ms
= __pfn_to_section(pfn
);
300 if (!ms
|| !ms
->page_ext
)
302 base
= get_entry(ms
->page_ext
, pfn
);
307 static int __meminit
online_page_ext(unsigned long start_pfn
,
308 unsigned long nr_pages
,
311 unsigned long start
, end
, pfn
;
314 start
= SECTION_ALIGN_DOWN(start_pfn
);
315 end
= SECTION_ALIGN_UP(start_pfn
+ nr_pages
);
319 * In this case, "nid" already exists and contains valid memory.
320 * "start_pfn" passed to us is a pfn which is an arg for
321 * online__pages(), and start_pfn should exist.
323 nid
= pfn_to_nid(start_pfn
);
324 VM_BUG_ON(!node_state(nid
, N_ONLINE
));
327 for (pfn
= start
; !fail
&& pfn
< end
; pfn
+= PAGES_PER_SECTION
) {
328 if (!pfn_present(pfn
))
330 fail
= init_section_page_ext(pfn
, nid
);
336 for (pfn
= start
; pfn
< end
; pfn
+= PAGES_PER_SECTION
)
337 __free_page_ext(pfn
);
342 static int __meminit
offline_page_ext(unsigned long start_pfn
,
343 unsigned long nr_pages
, int nid
)
345 unsigned long start
, end
, pfn
;
347 start
= SECTION_ALIGN_DOWN(start_pfn
);
348 end
= SECTION_ALIGN_UP(start_pfn
+ nr_pages
);
350 for (pfn
= start
; pfn
< end
; pfn
+= PAGES_PER_SECTION
)
351 __free_page_ext(pfn
);
356 static int __meminit
page_ext_callback(struct notifier_block
*self
,
357 unsigned long action
, void *arg
)
359 struct memory_notify
*mn
= arg
;
363 case MEM_GOING_ONLINE
:
364 ret
= online_page_ext(mn
->start_pfn
,
365 mn
->nr_pages
, mn
->status_change_nid
);
368 offline_page_ext(mn
->start_pfn
,
369 mn
->nr_pages
, mn
->status_change_nid
);
371 case MEM_CANCEL_ONLINE
:
372 offline_page_ext(mn
->start_pfn
,
373 mn
->nr_pages
, mn
->status_change_nid
);
375 case MEM_GOING_OFFLINE
:
378 case MEM_CANCEL_OFFLINE
:
382 return notifier_from_errno(ret
);
387 void __init
page_ext_init(void)
392 if (!invoke_need_callbacks())
395 for_each_node_state(nid
, N_MEMORY
) {
396 unsigned long start_pfn
, end_pfn
;
398 start_pfn
= node_start_pfn(nid
);
399 end_pfn
= node_end_pfn(nid
);
401 * start_pfn and end_pfn may not be aligned to SECTION and the
402 * page->flags of out of node pages are not initialized. So we
403 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
405 for (pfn
= start_pfn
; pfn
< end_pfn
;
406 pfn
= ALIGN(pfn
+ 1, PAGES_PER_SECTION
)) {
411 * Nodes's pfns can be overlapping.
412 * We know some arch can have a nodes layout such as
413 * -------------pfn-------------->
414 * N0 | N1 | N2 | N0 | N1 | N2|....
416 * Take into account DEFERRED_STRUCT_PAGE_INIT.
418 if (early_pfn_to_nid(pfn
) != nid
)
420 if (init_section_page_ext(pfn
, nid
))
424 hotplug_memory_notifier(page_ext_callback
, 0);
425 pr_info("allocated %ld bytes of page_ext\n", total_usage
);
426 invoke_init_callbacks();
430 panic("Out of memory");
433 void __meminit
pgdat_page_ext_init(struct pglist_data
*pgdat
)