1 // SPDX-License-Identifier: GPL-2.0
3 * Virtual Memory Map support
5 * (C) 2007 sgi. Christoph Lameter.
7 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
8 * virt_to_page, page_address() to be implemented as a base offset
9 * calculation without memory access.
11 * However, virtual mappings need a page table and TLBs. Many Linux
12 * architectures already map their physical space using 1-1 mappings
13 * via TLBs. For those arches the virtual memory map is essentially
14 * for free if we use the same page size as the 1-1 mappings. In that
15 * case the overhead consists of a few additional pages that are
16 * allocated to create a view of memory for vmemmap.
18 * The architecture is expected to provide a vmemmap_populate() function
19 * to instantiate the mapping.
22 #include <linux/mmzone.h>
23 #include <linux/memblock.h>
24 #include <linux/memremap.h>
25 #include <linux/highmem.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched.h>
32 #include <asm/pgalloc.h>
35 * Allocate a block of memory to be used to back the virtual memory map
36 * or to back the page tables that are used to create the mapping.
37 * Uses the main allocators if they are available, else bootmem.
40 static void * __ref
__earlyonly_bootmem_alloc(int node
,
45 return memblock_alloc_try_nid_raw(size
, align
, goal
,
46 MEMBLOCK_ALLOC_ACCESSIBLE
, node
);
49 void * __meminit
vmemmap_alloc_block(unsigned long size
, int node
)
51 /* If the main allocator is up use that, fallback to bootmem. */
52 if (slab_is_available()) {
53 gfp_t gfp_mask
= GFP_KERNEL
|__GFP_RETRY_MAYFAIL
|__GFP_NOWARN
;
54 int order
= get_order(size
);
58 page
= alloc_pages_node(node
, gfp_mask
, order
);
60 return page_address(page
);
63 warn_alloc(gfp_mask
& ~__GFP_NOWARN
, NULL
,
64 "vmemmap alloc failure: order:%u", order
);
69 return __earlyonly_bootmem_alloc(node
, size
, size
,
70 __pa(MAX_DMA_ADDRESS
));
73 static void * __meminit
altmap_alloc_block_buf(unsigned long size
,
74 struct vmem_altmap
*altmap
);
76 /* need to make sure size is all the same during early stage */
77 void * __meminit
vmemmap_alloc_block_buf(unsigned long size
, int node
,
78 struct vmem_altmap
*altmap
)
83 return altmap_alloc_block_buf(size
, altmap
);
85 ptr
= sparse_buffer_alloc(size
);
87 ptr
= vmemmap_alloc_block(size
, node
);
91 static unsigned long __meminit
vmem_altmap_next_pfn(struct vmem_altmap
*altmap
)
93 return altmap
->base_pfn
+ altmap
->reserve
+ altmap
->alloc
97 static unsigned long __meminit
vmem_altmap_nr_free(struct vmem_altmap
*altmap
)
99 unsigned long allocated
= altmap
->alloc
+ altmap
->align
;
101 if (altmap
->free
> allocated
)
102 return altmap
->free
- allocated
;
106 static void * __meminit
altmap_alloc_block_buf(unsigned long size
,
107 struct vmem_altmap
*altmap
)
109 unsigned long pfn
, nr_pfns
, nr_align
;
111 if (size
& ~PAGE_MASK
) {
112 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
117 pfn
= vmem_altmap_next_pfn(altmap
);
118 nr_pfns
= size
>> PAGE_SHIFT
;
119 nr_align
= 1UL << find_first_bit(&nr_pfns
, BITS_PER_LONG
);
120 nr_align
= ALIGN(pfn
, nr_align
) - pfn
;
121 if (nr_pfns
+ nr_align
> vmem_altmap_nr_free(altmap
))
124 altmap
->alloc
+= nr_pfns
;
125 altmap
->align
+= nr_align
;
128 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
129 __func__
, pfn
, altmap
->alloc
, altmap
->align
, nr_pfns
);
130 return __va(__pfn_to_phys(pfn
));
133 void __meminit
vmemmap_verify(pte_t
*pte
, int node
,
134 unsigned long start
, unsigned long end
)
136 unsigned long pfn
= pte_pfn(ptep_get(pte
));
137 int actual_node
= early_pfn_to_nid(pfn
);
139 if (node_distance(actual_node
, node
) > LOCAL_DISTANCE
)
140 pr_warn_once("[%lx-%lx] potential offnode page_structs\n",
144 pte_t
* __meminit
vmemmap_pte_populate(pmd_t
*pmd
, unsigned long addr
, int node
,
145 struct vmem_altmap
*altmap
,
148 pte_t
*pte
= pte_offset_kernel(pmd
, addr
);
149 if (pte_none(ptep_get(pte
))) {
154 p
= vmemmap_alloc_block_buf(PAGE_SIZE
, node
, altmap
);
159 * When a PTE/PMD entry is freed from the init_mm
160 * there's a free_pages() call to this page allocated
161 * above. Thus this get_page() is paired with the
162 * put_page_testzero() on the freeing path.
163 * This can only called by certain ZONE_DEVICE path,
164 * and through vmemmap_populate_compound_pages() when
168 p
= page_to_virt(reuse
);
170 entry
= pfn_pte(__pa(p
) >> PAGE_SHIFT
, PAGE_KERNEL
);
171 set_pte_at(&init_mm
, addr
, pte
, entry
);
176 static void * __meminit
vmemmap_alloc_block_zero(unsigned long size
, int node
)
178 void *p
= vmemmap_alloc_block(size
, node
);
187 pmd_t
* __meminit
vmemmap_pmd_populate(pud_t
*pud
, unsigned long addr
, int node
)
189 pmd_t
*pmd
= pmd_offset(pud
, addr
);
190 if (pmd_none(*pmd
)) {
191 void *p
= vmemmap_alloc_block_zero(PAGE_SIZE
, node
);
195 pmd_populate_kernel(&init_mm
, pmd
, p
);
200 pud_t
* __meminit
vmemmap_pud_populate(p4d_t
*p4d
, unsigned long addr
, int node
)
202 pud_t
*pud
= pud_offset(p4d
, addr
);
203 if (pud_none(*pud
)) {
204 void *p
= vmemmap_alloc_block_zero(PAGE_SIZE
, node
);
208 pud_populate(&init_mm
, pud
, p
);
213 p4d_t
* __meminit
vmemmap_p4d_populate(pgd_t
*pgd
, unsigned long addr
, int node
)
215 p4d_t
*p4d
= p4d_offset(pgd
, addr
);
216 if (p4d_none(*p4d
)) {
217 void *p
= vmemmap_alloc_block_zero(PAGE_SIZE
, node
);
221 p4d_populate(&init_mm
, p4d
, p
);
226 pgd_t
* __meminit
vmemmap_pgd_populate(unsigned long addr
, int node
)
228 pgd_t
*pgd
= pgd_offset_k(addr
);
229 if (pgd_none(*pgd
)) {
230 void *p
= vmemmap_alloc_block_zero(PAGE_SIZE
, node
);
233 pgd_populate(&init_mm
, pgd
, p
);
238 static pte_t
* __meminit
vmemmap_populate_address(unsigned long addr
, int node
,
239 struct vmem_altmap
*altmap
,
248 pgd
= vmemmap_pgd_populate(addr
, node
);
251 p4d
= vmemmap_p4d_populate(pgd
, addr
, node
);
254 pud
= vmemmap_pud_populate(p4d
, addr
, node
);
257 pmd
= vmemmap_pmd_populate(pud
, addr
, node
);
260 pte
= vmemmap_pte_populate(pmd
, addr
, node
, altmap
, reuse
);
263 vmemmap_verify(pte
, node
, addr
, addr
+ PAGE_SIZE
);
268 static int __meminit
vmemmap_populate_range(unsigned long start
,
269 unsigned long end
, int node
,
270 struct vmem_altmap
*altmap
,
273 unsigned long addr
= start
;
276 for (; addr
< end
; addr
+= PAGE_SIZE
) {
277 pte
= vmemmap_populate_address(addr
, node
, altmap
, reuse
);
285 int __meminit
vmemmap_populate_basepages(unsigned long start
, unsigned long end
,
286 int node
, struct vmem_altmap
*altmap
)
288 return vmemmap_populate_range(start
, end
, node
, altmap
, NULL
);
291 void __weak __meminit
vmemmap_set_pmd(pmd_t
*pmd
, void *p
, int node
,
292 unsigned long addr
, unsigned long next
)
296 int __weak __meminit
vmemmap_check_pmd(pmd_t
*pmd
, int node
,
297 unsigned long addr
, unsigned long next
)
302 int __meminit
vmemmap_populate_hugepages(unsigned long start
, unsigned long end
,
303 int node
, struct vmem_altmap
*altmap
)
312 for (addr
= start
; addr
< end
; addr
= next
) {
313 next
= pmd_addr_end(addr
, end
);
315 pgd
= vmemmap_pgd_populate(addr
, node
);
319 p4d
= vmemmap_p4d_populate(pgd
, addr
, node
);
323 pud
= vmemmap_pud_populate(p4d
, addr
, node
);
327 pmd
= pmd_offset(pud
, addr
);
328 if (pmd_none(READ_ONCE(*pmd
))) {
331 p
= vmemmap_alloc_block_buf(PMD_SIZE
, node
, altmap
);
333 vmemmap_set_pmd(pmd
, p
, node
, addr
, next
);
337 * No fallback: In any case we care about, the
338 * altmap should be reasonably sized and aligned
339 * such that vmemmap_alloc_block_buf() will always
340 * succeed. For consistency with the PTE case,
341 * return an error here as failure could indicate
342 * a configuration issue with the size of the altmap.
346 } else if (vmemmap_check_pmd(pmd
, node
, addr
, next
))
348 if (vmemmap_populate_basepages(addr
, next
, node
, altmap
))
354 #ifndef vmemmap_populate_compound_pages
356 * For compound pages bigger than section size (e.g. x86 1G compound
357 * pages with 2M subsection size) fill the rest of sections as tail
360 * Note that memremap_pages() resets @nr_range value and will increment
361 * it after each range successful onlining. Thus the value or @nr_range
362 * at section memmap populate corresponds to the in-progress range
363 * being onlined here.
365 static bool __meminit
reuse_compound_section(unsigned long start_pfn
,
366 struct dev_pagemap
*pgmap
)
368 unsigned long nr_pages
= pgmap_vmemmap_nr(pgmap
);
369 unsigned long offset
= start_pfn
-
370 PHYS_PFN(pgmap
->ranges
[pgmap
->nr_range
].start
);
372 return !IS_ALIGNED(offset
, nr_pages
) && nr_pages
> PAGES_PER_SUBSECTION
;
375 static pte_t
* __meminit
compound_section_tail_page(unsigned long addr
)
382 * Assuming sections are populated sequentially, the previous section's
383 * page data can be reused.
385 pte
= pte_offset_kernel(pmd_off_k(addr
), addr
);
392 static int __meminit
vmemmap_populate_compound_pages(unsigned long start_pfn
,
394 unsigned long end
, int node
,
395 struct dev_pagemap
*pgmap
)
397 unsigned long size
, addr
;
401 if (reuse_compound_section(start_pfn
, pgmap
)) {
402 pte
= compound_section_tail_page(start
);
407 * Reuse the page that was populated in the prior iteration
408 * with just tail struct pages.
410 return vmemmap_populate_range(start
, end
, node
, NULL
,
411 pte_page(ptep_get(pte
)));
414 size
= min(end
- start
, pgmap_vmemmap_nr(pgmap
) * sizeof(struct page
));
415 for (addr
= start
; addr
< end
; addr
+= size
) {
416 unsigned long next
, last
= addr
+ size
;
418 /* Populate the head page vmemmap page */
419 pte
= vmemmap_populate_address(addr
, node
, NULL
, NULL
);
423 /* Populate the tail pages vmemmap page */
424 next
= addr
+ PAGE_SIZE
;
425 pte
= vmemmap_populate_address(next
, node
, NULL
, NULL
);
430 * Reuse the previous page for the rest of tail pages
431 * See layout diagram in Documentation/mm/vmemmap_dedup.rst
434 rc
= vmemmap_populate_range(next
, last
, node
, NULL
,
435 pte_page(ptep_get(pte
)));
445 struct page
* __meminit
__populate_section_memmap(unsigned long pfn
,
446 unsigned long nr_pages
, int nid
, struct vmem_altmap
*altmap
,
447 struct dev_pagemap
*pgmap
)
449 unsigned long start
= (unsigned long) pfn_to_page(pfn
);
450 unsigned long end
= start
+ nr_pages
* sizeof(struct page
);
453 if (WARN_ON_ONCE(!IS_ALIGNED(pfn
, PAGES_PER_SUBSECTION
) ||
454 !IS_ALIGNED(nr_pages
, PAGES_PER_SUBSECTION
)))
457 if (vmemmap_can_optimize(altmap
, pgmap
))
458 r
= vmemmap_populate_compound_pages(pfn
, start
, end
, nid
, pgmap
);
460 r
= vmemmap_populate(start
, end
, nid
, altmap
);
465 if (system_state
== SYSTEM_BOOTING
)
466 memmap_boot_pages_add(DIV_ROUND_UP(end
- start
, PAGE_SIZE
));
468 memmap_pages_add(DIV_ROUND_UP(end
- start
, PAGE_SIZE
));
470 return pfn_to_page(pfn
);