1 // SPDX-License-Identifier: GPL-2.0
3 * Bootmem core functions.
5 * Copyright (c) 2020, Bytedance.
7 * Author: Muchun Song <songmuchun@bytedance.com>
11 #include <linux/compiler.h>
12 #include <linux/memblock.h>
13 #include <linux/bootmem_info.h>
14 #include <linux/memory_hotplug.h>
15 #include <linux/kmemleak.h>
17 void get_page_bootmem(unsigned long info
, struct page
*page
,
18 enum bootmem_type type
)
21 BUG_ON(info
> (ULONG_MAX
>> 4));
23 set_page_private(page
, info
<< 4 | type
);
27 void put_page_bootmem(struct page
*page
)
29 enum bootmem_type type
= bootmem_type(page
);
31 BUG_ON(type
< MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE
||
32 type
> MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE
);
34 if (page_ref_dec_return(page
) == 1) {
35 ClearPagePrivate(page
);
36 set_page_private(page
, 0);
37 INIT_LIST_HEAD(&page
->lru
);
38 kmemleak_free_part_phys(PFN_PHYS(page_to_pfn(page
)), PAGE_SIZE
);
39 free_reserved_page(page
);
43 #ifndef CONFIG_SPARSEMEM_VMEMMAP
44 static void __init
register_page_bootmem_info_section(unsigned long start_pfn
)
46 unsigned long mapsize
, section_nr
, i
;
47 struct mem_section
*ms
;
48 struct page
*page
, *memmap
;
49 struct mem_section_usage
*usage
;
51 section_nr
= pfn_to_section_nr(start_pfn
);
52 ms
= __nr_to_section(section_nr
);
54 /* Get section's memmap address */
55 memmap
= sparse_decode_mem_map(ms
->section_mem_map
, section_nr
);
58 * Get page for the memmap's phys address
59 * XXX: need more consideration for sparse_vmemmap...
61 page
= virt_to_page(memmap
);
62 mapsize
= sizeof(struct page
) * PAGES_PER_SECTION
;
63 mapsize
= PAGE_ALIGN(mapsize
) >> PAGE_SHIFT
;
65 /* remember memmap's page */
66 for (i
= 0; i
< mapsize
; i
++, page
++)
67 get_page_bootmem(section_nr
, page
, SECTION_INFO
);
70 page
= virt_to_page(usage
);
72 mapsize
= PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT
;
74 for (i
= 0; i
< mapsize
; i
++, page
++)
75 get_page_bootmem(section_nr
, page
, MIX_SECTION_INFO
);
78 #else /* CONFIG_SPARSEMEM_VMEMMAP */
79 static void __init
register_page_bootmem_info_section(unsigned long start_pfn
)
81 unsigned long mapsize
, section_nr
, i
;
82 struct mem_section
*ms
;
83 struct page
*page
, *memmap
;
84 struct mem_section_usage
*usage
;
86 section_nr
= pfn_to_section_nr(start_pfn
);
87 ms
= __nr_to_section(section_nr
);
89 memmap
= sparse_decode_mem_map(ms
->section_mem_map
, section_nr
);
91 register_page_bootmem_memmap(section_nr
, memmap
, PAGES_PER_SECTION
);
94 page
= virt_to_page(usage
);
96 mapsize
= PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT
;
98 for (i
= 0; i
< mapsize
; i
++, page
++)
99 get_page_bootmem(section_nr
, page
, MIX_SECTION_INFO
);
101 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
103 void __init
register_page_bootmem_info_node(struct pglist_data
*pgdat
)
105 unsigned long i
, pfn
, end_pfn
, nr_pages
;
106 int node
= pgdat
->node_id
;
109 nr_pages
= PAGE_ALIGN(sizeof(struct pglist_data
)) >> PAGE_SHIFT
;
110 page
= virt_to_page(pgdat
);
112 for (i
= 0; i
< nr_pages
; i
++, page
++)
113 get_page_bootmem(node
, page
, NODE_INFO
);
115 pfn
= pgdat
->node_start_pfn
;
116 end_pfn
= pgdat_end_pfn(pgdat
);
118 /* register section info */
119 for (; pfn
< end_pfn
; pfn
+= PAGES_PER_SECTION
) {
121 * Some platforms can assign the same pfn to multiple nodes - on
122 * node0 as well as nodeN. To avoid registering a pfn against
123 * multiple nodes we check that this pfn does not already
124 * reside in some other nodes.
126 if (pfn_valid(pfn
) && (early_pfn_to_nid(pfn
) == node
))
127 register_page_bootmem_info_section(pfn
);