1 // SPDX-License-Identifier: GPL-2.0
3 * Virtual Memory Map support
5 * (C) 2007 sgi. Christoph Lameter.
7 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
8 * virt_to_page, page_address() to be implemented as a base offset
9 * calculation without memory access.
11 * However, virtual mappings need a page table and TLBs. Many Linux
12 * architectures already map their physical space using 1-1 mappings
13 * via TLBs. For those arches the virtual memory map is essentially
14 * for free if we use the same page size as the 1-1 mappings. In that
15 * case the overhead consists of a few additional pages that are
16 * allocated to create a view of memory for vmemmap.
18 * The architecture is expected to provide a vmemmap_populate() function
19 * to instantiate the mapping.
22 #include <linux/mmzone.h>
23 #include <linux/memblock.h>
24 #include <linux/memremap.h>
25 #include <linux/highmem.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched.h>
31 #include <asm/pgalloc.h>
34 * Allocate a block of memory to be used to back the virtual memory map
35 * or to back the page tables that are used to create the mapping.
36 * Uses the main allocators if they are available, else bootmem.
39 static void * __ref
__earlyonly_bootmem_alloc(int node
,
44 return memblock_alloc_try_nid_raw(size
, align
, goal
,
45 MEMBLOCK_ALLOC_ACCESSIBLE
, node
);
48 void * __meminit
vmemmap_alloc_block(unsigned long size
, int node
)
50 /* If the main allocator is up use that, fallback to bootmem. */
51 if (slab_is_available()) {
52 gfp_t gfp_mask
= GFP_KERNEL
|__GFP_RETRY_MAYFAIL
|__GFP_NOWARN
;
53 int order
= get_order(size
);
57 page
= alloc_pages_node(node
, gfp_mask
, order
);
59 return page_address(page
);
62 warn_alloc(gfp_mask
& ~__GFP_NOWARN
, NULL
,
63 "vmemmap alloc failure: order:%u", order
);
68 return __earlyonly_bootmem_alloc(node
, size
, size
,
69 __pa(MAX_DMA_ADDRESS
));
72 static void * __meminit
altmap_alloc_block_buf(unsigned long size
,
73 struct vmem_altmap
*altmap
);
75 /* need to make sure size is all the same during early stage */
76 void * __meminit
vmemmap_alloc_block_buf(unsigned long size
, int node
,
77 struct vmem_altmap
*altmap
)
82 return altmap_alloc_block_buf(size
, altmap
);
84 ptr
= sparse_buffer_alloc(size
);
86 ptr
= vmemmap_alloc_block(size
, node
);
90 static unsigned long __meminit
vmem_altmap_next_pfn(struct vmem_altmap
*altmap
)
92 return altmap
->base_pfn
+ altmap
->reserve
+ altmap
->alloc
96 static unsigned long __meminit
vmem_altmap_nr_free(struct vmem_altmap
*altmap
)
98 unsigned long allocated
= altmap
->alloc
+ altmap
->align
;
100 if (altmap
->free
> allocated
)
101 return altmap
->free
- allocated
;
105 static void * __meminit
altmap_alloc_block_buf(unsigned long size
,
106 struct vmem_altmap
*altmap
)
108 unsigned long pfn
, nr_pfns
, nr_align
;
110 if (size
& ~PAGE_MASK
) {
111 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
116 pfn
= vmem_altmap_next_pfn(altmap
);
117 nr_pfns
= size
>> PAGE_SHIFT
;
118 nr_align
= 1UL << find_first_bit(&nr_pfns
, BITS_PER_LONG
);
119 nr_align
= ALIGN(pfn
, nr_align
) - pfn
;
120 if (nr_pfns
+ nr_align
> vmem_altmap_nr_free(altmap
))
123 altmap
->alloc
+= nr_pfns
;
124 altmap
->align
+= nr_align
;
127 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
128 __func__
, pfn
, altmap
->alloc
, altmap
->align
, nr_pfns
);
129 return __va(__pfn_to_phys(pfn
));
132 void __meminit
vmemmap_verify(pte_t
*pte
, int node
,
133 unsigned long start
, unsigned long end
)
135 unsigned long pfn
= pte_pfn(*pte
);
136 int actual_node
= early_pfn_to_nid(pfn
);
138 if (node_distance(actual_node
, node
) > LOCAL_DISTANCE
)
139 pr_warn("[%lx-%lx] potential offnode page_structs\n",
143 pte_t
* __meminit
vmemmap_pte_populate(pmd_t
*pmd
, unsigned long addr
, int node
,
144 struct vmem_altmap
*altmap
)
146 pte_t
*pte
= pte_offset_kernel(pmd
, addr
);
147 if (pte_none(*pte
)) {
151 p
= vmemmap_alloc_block_buf(PAGE_SIZE
, node
, altmap
);
154 entry
= pfn_pte(__pa(p
) >> PAGE_SHIFT
, PAGE_KERNEL
);
155 set_pte_at(&init_mm
, addr
, pte
, entry
);
160 static void * __meminit
vmemmap_alloc_block_zero(unsigned long size
, int node
)
162 void *p
= vmemmap_alloc_block(size
, node
);
171 pmd_t
* __meminit
vmemmap_pmd_populate(pud_t
*pud
, unsigned long addr
, int node
)
173 pmd_t
*pmd
= pmd_offset(pud
, addr
);
174 if (pmd_none(*pmd
)) {
175 void *p
= vmemmap_alloc_block_zero(PAGE_SIZE
, node
);
178 pmd_populate_kernel(&init_mm
, pmd
, p
);
183 pud_t
* __meminit
vmemmap_pud_populate(p4d_t
*p4d
, unsigned long addr
, int node
)
185 pud_t
*pud
= pud_offset(p4d
, addr
);
186 if (pud_none(*pud
)) {
187 void *p
= vmemmap_alloc_block_zero(PAGE_SIZE
, node
);
190 pud_populate(&init_mm
, pud
, p
);
195 p4d_t
* __meminit
vmemmap_p4d_populate(pgd_t
*pgd
, unsigned long addr
, int node
)
197 p4d_t
*p4d
= p4d_offset(pgd
, addr
);
198 if (p4d_none(*p4d
)) {
199 void *p
= vmemmap_alloc_block_zero(PAGE_SIZE
, node
);
202 p4d_populate(&init_mm
, p4d
, p
);
207 pgd_t
* __meminit
vmemmap_pgd_populate(unsigned long addr
, int node
)
209 pgd_t
*pgd
= pgd_offset_k(addr
);
210 if (pgd_none(*pgd
)) {
211 void *p
= vmemmap_alloc_block_zero(PAGE_SIZE
, node
);
214 pgd_populate(&init_mm
, pgd
, p
);
219 int __meminit
vmemmap_populate_basepages(unsigned long start
, unsigned long end
,
220 int node
, struct vmem_altmap
*altmap
)
222 unsigned long addr
= start
;
229 for (; addr
< end
; addr
+= PAGE_SIZE
) {
230 pgd
= vmemmap_pgd_populate(addr
, node
);
233 p4d
= vmemmap_p4d_populate(pgd
, addr
, node
);
236 pud
= vmemmap_pud_populate(p4d
, addr
, node
);
239 pmd
= vmemmap_pmd_populate(pud
, addr
, node
);
242 pte
= vmemmap_pte_populate(pmd
, addr
, node
, altmap
);
245 vmemmap_verify(pte
, node
, addr
, addr
+ PAGE_SIZE
);
251 struct page
* __meminit
__populate_section_memmap(unsigned long pfn
,
252 unsigned long nr_pages
, int nid
, struct vmem_altmap
*altmap
)
254 unsigned long start
= (unsigned long) pfn_to_page(pfn
);
255 unsigned long end
= start
+ nr_pages
* sizeof(struct page
);
257 if (WARN_ON_ONCE(!IS_ALIGNED(pfn
, PAGES_PER_SUBSECTION
) ||
258 !IS_ALIGNED(nr_pages
, PAGES_PER_SUBSECTION
)))
261 if (vmemmap_populate(start
, end
, nid
, altmap
))
264 return pfn_to_page(pfn
);