1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/x86_64/mm/init.c
5 * Copyright (C) 1995 Linus Torvalds
6 * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz>
7 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/initrd.h>
23 #include <linux/pagemap.h>
24 #include <linux/memblock.h>
25 #include <linux/proc_fs.h>
26 #include <linux/pci.h>
27 #include <linux/pfn.h>
28 #include <linux/poison.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/memory.h>
31 #include <linux/memory_hotplug.h>
32 #include <linux/memremap.h>
33 #include <linux/nmi.h>
34 #include <linux/gfp.h>
35 #include <linux/kcore.h>
36 #include <linux/bootmem_info.h>
38 #include <asm/processor.h>
39 #include <asm/bios_ebda.h>
40 #include <linux/uaccess.h>
41 #include <asm/pgalloc.h>
43 #include <asm/fixmap.h>
44 #include <asm/e820/api.h>
47 #include <asm/mmu_context.h>
48 #include <asm/proto.h>
50 #include <asm/sections.h>
51 #include <asm/kdebug.h>
53 #include <asm/set_memory.h>
55 #include <asm/uv/uv.h>
56 #include <asm/setup.h>
57 #include <asm/ftrace.h>
59 #include "mm_internal.h"
61 #include "ident_map.c"
63 #define DEFINE_POPULATE(fname, type1, type2, init) \
64 static inline void fname##_init(struct mm_struct *mm, \
65 type1##_t *arg1, type2##_t *arg2, bool init) \
68 fname##_safe(mm, arg1, arg2); \
70 fname(mm, arg1, arg2); \
73 DEFINE_POPULATE(p4d_populate
, p4d
, pud
, init
)
74 DEFINE_POPULATE(pgd_populate
, pgd
, p4d
, init
)
75 DEFINE_POPULATE(pud_populate
, pud
, pmd
, init
)
76 DEFINE_POPULATE(pmd_populate_kernel
, pmd
, pte
, init
)
78 #define DEFINE_ENTRY(type1, type2, init) \
79 static inline void set_##type1##_init(type1##_t *arg1, \
80 type2##_t arg2, bool init) \
83 set_##type1##_safe(arg1, arg2); \
85 set_##type1(arg1, arg2); \
88 DEFINE_ENTRY(p4d
, p4d
, init
)
89 DEFINE_ENTRY(pud
, pud
, init
)
90 DEFINE_ENTRY(pmd
, pmd
, init
)
91 DEFINE_ENTRY(pte
, pte
, init
)
93 static inline pgprot_t
prot_sethuge(pgprot_t prot
)
95 WARN_ON_ONCE(pgprot_val(prot
) & _PAGE_PAT
);
97 return __pgprot(pgprot_val(prot
) | _PAGE_PSE
);
101 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
102 * physical space so we can cache the place of the first one and move
103 * around without checking the pgd every time.
106 /* Bits supported by the hardware: */
107 pteval_t __supported_pte_mask __read_mostly
= ~0;
108 /* Bits allowed in normal kernel mappings: */
109 pteval_t __default_kernel_pte_mask __read_mostly
= ~0;
110 EXPORT_SYMBOL_GPL(__supported_pte_mask
);
111 /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
112 EXPORT_SYMBOL(__default_kernel_pte_mask
);
114 int force_personality32
;
118 * Control non executable heap for 32bit processes.
120 * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
121 * off PROT_READ implies PROT_EXEC
123 static int __init
nonx32_setup(char *str
)
125 if (!strcmp(str
, "on"))
126 force_personality32
&= ~READ_IMPLIES_EXEC
;
127 else if (!strcmp(str
, "off"))
128 force_personality32
|= READ_IMPLIES_EXEC
;
131 __setup("noexec32=", nonx32_setup
);
133 static void sync_global_pgds_l5(unsigned long start
, unsigned long end
)
137 for (addr
= start
; addr
<= end
; addr
= ALIGN(addr
+ 1, PGDIR_SIZE
)) {
138 const pgd_t
*pgd_ref
= pgd_offset_k(addr
);
141 /* Check for overflow */
145 if (pgd_none(*pgd_ref
))
148 spin_lock(&pgd_lock
);
149 list_for_each_entry(page
, &pgd_list
, lru
) {
151 spinlock_t
*pgt_lock
;
153 pgd
= (pgd_t
*)page_address(page
) + pgd_index(addr
);
154 /* the pgt_lock only for Xen */
155 pgt_lock
= &pgd_page_get_mm(page
)->page_table_lock
;
158 if (!pgd_none(*pgd_ref
) && !pgd_none(*pgd
))
159 BUG_ON(pgd_page_vaddr(*pgd
) != pgd_page_vaddr(*pgd_ref
));
162 set_pgd(pgd
, *pgd_ref
);
164 spin_unlock(pgt_lock
);
166 spin_unlock(&pgd_lock
);
170 static void sync_global_pgds_l4(unsigned long start
, unsigned long end
)
174 for (addr
= start
; addr
<= end
; addr
= ALIGN(addr
+ 1, PGDIR_SIZE
)) {
175 pgd_t
*pgd_ref
= pgd_offset_k(addr
);
176 const p4d_t
*p4d_ref
;
180 * With folded p4d, pgd_none() is always false, we need to
181 * handle synchronization on p4d level.
183 MAYBE_BUILD_BUG_ON(pgd_none(*pgd_ref
));
184 p4d_ref
= p4d_offset(pgd_ref
, addr
);
186 if (p4d_none(*p4d_ref
))
189 spin_lock(&pgd_lock
);
190 list_for_each_entry(page
, &pgd_list
, lru
) {
193 spinlock_t
*pgt_lock
;
195 pgd
= (pgd_t
*)page_address(page
) + pgd_index(addr
);
196 p4d
= p4d_offset(pgd
, addr
);
197 /* the pgt_lock only for Xen */
198 pgt_lock
= &pgd_page_get_mm(page
)->page_table_lock
;
201 if (!p4d_none(*p4d_ref
) && !p4d_none(*p4d
))
202 BUG_ON(p4d_pgtable(*p4d
)
203 != p4d_pgtable(*p4d_ref
));
206 set_p4d(p4d
, *p4d_ref
);
208 spin_unlock(pgt_lock
);
210 spin_unlock(&pgd_lock
);
215 * When memory was added make sure all the processes MM have
216 * suitable PGD entries in the local PGD level page.
218 static void sync_global_pgds(unsigned long start
, unsigned long end
)
220 if (pgtable_l5_enabled())
221 sync_global_pgds_l5(start
, end
);
223 sync_global_pgds_l4(start
, end
);
227 * NOTE: This function is marked __ref because it calls __init function
228 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
230 static __ref
void *spp_getpage(void)
235 ptr
= (void *) get_zeroed_page(GFP_ATOMIC
);
237 ptr
= memblock_alloc(PAGE_SIZE
, PAGE_SIZE
);
239 if (!ptr
|| ((unsigned long)ptr
& ~PAGE_MASK
)) {
240 panic("set_pte_phys: cannot allocate page data %s\n",
241 after_bootmem
? "after bootmem" : "");
244 pr_debug("spp_getpage %p\n", ptr
);
249 static p4d_t
*fill_p4d(pgd_t
*pgd
, unsigned long vaddr
)
251 if (pgd_none(*pgd
)) {
252 p4d_t
*p4d
= (p4d_t
*)spp_getpage();
253 pgd_populate(&init_mm
, pgd
, p4d
);
254 if (p4d
!= p4d_offset(pgd
, 0))
255 printk(KERN_ERR
"PAGETABLE BUG #00! %p <-> %p\n",
256 p4d
, p4d_offset(pgd
, 0));
258 return p4d_offset(pgd
, vaddr
);
261 static pud_t
*fill_pud(p4d_t
*p4d
, unsigned long vaddr
)
263 if (p4d_none(*p4d
)) {
264 pud_t
*pud
= (pud_t
*)spp_getpage();
265 p4d_populate(&init_mm
, p4d
, pud
);
266 if (pud
!= pud_offset(p4d
, 0))
267 printk(KERN_ERR
"PAGETABLE BUG #01! %p <-> %p\n",
268 pud
, pud_offset(p4d
, 0));
270 return pud_offset(p4d
, vaddr
);
273 static pmd_t
*fill_pmd(pud_t
*pud
, unsigned long vaddr
)
275 if (pud_none(*pud
)) {
276 pmd_t
*pmd
= (pmd_t
*) spp_getpage();
277 pud_populate(&init_mm
, pud
, pmd
);
278 if (pmd
!= pmd_offset(pud
, 0))
279 printk(KERN_ERR
"PAGETABLE BUG #02! %p <-> %p\n",
280 pmd
, pmd_offset(pud
, 0));
282 return pmd_offset(pud
, vaddr
);
285 static pte_t
*fill_pte(pmd_t
*pmd
, unsigned long vaddr
)
287 if (pmd_none(*pmd
)) {
288 pte_t
*pte
= (pte_t
*) spp_getpage();
289 pmd_populate_kernel(&init_mm
, pmd
, pte
);
290 if (pte
!= pte_offset_kernel(pmd
, 0))
291 printk(KERN_ERR
"PAGETABLE BUG #03!\n");
293 return pte_offset_kernel(pmd
, vaddr
);
296 static void __set_pte_vaddr(pud_t
*pud
, unsigned long vaddr
, pte_t new_pte
)
298 pmd_t
*pmd
= fill_pmd(pud
, vaddr
);
299 pte_t
*pte
= fill_pte(pmd
, vaddr
);
301 set_pte(pte
, new_pte
);
304 * It's enough to flush this one mapping.
305 * (PGE mappings get flushed as well)
307 flush_tlb_one_kernel(vaddr
);
310 void set_pte_vaddr_p4d(p4d_t
*p4d_page
, unsigned long vaddr
, pte_t new_pte
)
312 p4d_t
*p4d
= p4d_page
+ p4d_index(vaddr
);
313 pud_t
*pud
= fill_pud(p4d
, vaddr
);
315 __set_pte_vaddr(pud
, vaddr
, new_pte
);
318 void set_pte_vaddr_pud(pud_t
*pud_page
, unsigned long vaddr
, pte_t new_pte
)
320 pud_t
*pud
= pud_page
+ pud_index(vaddr
);
322 __set_pte_vaddr(pud
, vaddr
, new_pte
);
325 void set_pte_vaddr(unsigned long vaddr
, pte_t pteval
)
330 pr_debug("set_pte_vaddr %lx to %lx\n", vaddr
, native_pte_val(pteval
));
332 pgd
= pgd_offset_k(vaddr
);
333 if (pgd_none(*pgd
)) {
335 "PGD FIXMAP MISSING, it should be setup in head.S!\n");
339 p4d_page
= p4d_offset(pgd
, 0);
340 set_pte_vaddr_p4d(p4d_page
, vaddr
, pteval
);
343 pmd_t
* __init
populate_extra_pmd(unsigned long vaddr
)
349 pgd
= pgd_offset_k(vaddr
);
350 p4d
= fill_p4d(pgd
, vaddr
);
351 pud
= fill_pud(p4d
, vaddr
);
352 return fill_pmd(pud
, vaddr
);
355 pte_t
* __init
populate_extra_pte(unsigned long vaddr
)
359 pmd
= populate_extra_pmd(vaddr
);
360 return fill_pte(pmd
, vaddr
);
364 * Create large page table mappings for a range of physical addresses.
366 static void __init
__init_extra_mapping(unsigned long phys
, unsigned long size
,
367 enum page_cache_mode cache
)
375 pgprot_val(prot
) = pgprot_val(PAGE_KERNEL_LARGE
) |
376 protval_4k_2_large(cachemode2protval(cache
));
377 BUG_ON((phys
& ~PMD_MASK
) || (size
& ~PMD_MASK
));
378 for (; size
; phys
+= PMD_SIZE
, size
-= PMD_SIZE
) {
379 pgd
= pgd_offset_k((unsigned long)__va(phys
));
380 if (pgd_none(*pgd
)) {
381 p4d
= (p4d_t
*) spp_getpage();
382 set_pgd(pgd
, __pgd(__pa(p4d
) | _KERNPG_TABLE
|
385 p4d
= p4d_offset(pgd
, (unsigned long)__va(phys
));
386 if (p4d_none(*p4d
)) {
387 pud
= (pud_t
*) spp_getpage();
388 set_p4d(p4d
, __p4d(__pa(pud
) | _KERNPG_TABLE
|
391 pud
= pud_offset(p4d
, (unsigned long)__va(phys
));
392 if (pud_none(*pud
)) {
393 pmd
= (pmd_t
*) spp_getpage();
394 set_pud(pud
, __pud(__pa(pmd
) | _KERNPG_TABLE
|
397 pmd
= pmd_offset(pud
, phys
);
398 BUG_ON(!pmd_none(*pmd
));
399 set_pmd(pmd
, __pmd(phys
| pgprot_val(prot
)));
403 void __init
init_extra_mapping_wb(unsigned long phys
, unsigned long size
)
405 __init_extra_mapping(phys
, size
, _PAGE_CACHE_MODE_WB
);
408 void __init
init_extra_mapping_uc(unsigned long phys
, unsigned long size
)
410 __init_extra_mapping(phys
, size
, _PAGE_CACHE_MODE_UC
);
414 * The head.S code sets up the kernel high mapping:
416 * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
418 * phys_base holds the negative offset to the kernel, which is added
419 * to the compile time generated pmds. This results in invalid pmds up
420 * to the point where we hit the physaddr 0 mapping.
422 * We limit the mappings to the region from _text to _brk_end. _brk_end
423 * is rounded up to the 2MB boundary. This catches the invalid pmds as
424 * well, as they are located before _text:
426 void __init
cleanup_highmap(void)
428 unsigned long vaddr
= __START_KERNEL_map
;
429 unsigned long vaddr_end
= __START_KERNEL_map
+ KERNEL_IMAGE_SIZE
;
430 unsigned long end
= roundup((unsigned long)_brk_end
, PMD_SIZE
) - 1;
431 pmd_t
*pmd
= level2_kernel_pgt
;
434 * Native path, max_pfn_mapped is not set yet.
435 * Xen has valid max_pfn_mapped set in
436 * arch/x86/xen/mmu.c:xen_setup_kernel_pagetable().
439 vaddr_end
= __START_KERNEL_map
+ (max_pfn_mapped
<< PAGE_SHIFT
);
441 for (; vaddr
+ PMD_SIZE
- 1 < vaddr_end
; pmd
++, vaddr
+= PMD_SIZE
) {
444 if (vaddr
< (unsigned long) _text
|| vaddr
> end
)
445 set_pmd(pmd
, __pmd(0));
450 * Create PTE level page table mapping for physical addresses.
451 * It returns the last physical address mapped.
453 static unsigned long __meminit
454 phys_pte_init(pte_t
*pte_page
, unsigned long paddr
, unsigned long paddr_end
,
455 pgprot_t prot
, bool init
)
457 unsigned long pages
= 0, paddr_next
;
458 unsigned long paddr_last
= paddr_end
;
462 pte
= pte_page
+ pte_index(paddr
);
463 i
= pte_index(paddr
);
465 for (; i
< PTRS_PER_PTE
; i
++, paddr
= paddr_next
, pte
++) {
466 paddr_next
= (paddr
& PAGE_MASK
) + PAGE_SIZE
;
467 if (paddr
>= paddr_end
) {
468 if (!after_bootmem
&&
469 !e820__mapped_any(paddr
& PAGE_MASK
, paddr_next
,
471 !e820__mapped_any(paddr
& PAGE_MASK
, paddr_next
,
472 E820_TYPE_RESERVED_KERN
) &&
473 !e820__mapped_any(paddr
& PAGE_MASK
, paddr_next
,
475 set_pte_init(pte
, __pte(0), init
);
480 * We will re-use the existing mapping.
481 * Xen for example has some special requirements, like mapping
482 * pagetable pages as RO. So assume someone who pre-setup
483 * these mappings are more intelligent.
485 if (!pte_none(*pte
)) {
492 pr_info(" pte=%p addr=%lx pte=%016lx\n", pte
, paddr
,
493 pfn_pte(paddr
>> PAGE_SHIFT
, PAGE_KERNEL
).pte
);
495 set_pte_init(pte
, pfn_pte(paddr
>> PAGE_SHIFT
, prot
), init
);
496 paddr_last
= (paddr
& PAGE_MASK
) + PAGE_SIZE
;
499 update_page_count(PG_LEVEL_4K
, pages
);
505 * Create PMD level page table mapping for physical addresses. The virtual
506 * and physical address have to be aligned at this level.
507 * It returns the last physical address mapped.
509 static unsigned long __meminit
510 phys_pmd_init(pmd_t
*pmd_page
, unsigned long paddr
, unsigned long paddr_end
,
511 unsigned long page_size_mask
, pgprot_t prot
, bool init
)
513 unsigned long pages
= 0, paddr_next
;
514 unsigned long paddr_last
= paddr_end
;
516 int i
= pmd_index(paddr
);
518 for (; i
< PTRS_PER_PMD
; i
++, paddr
= paddr_next
) {
519 pmd_t
*pmd
= pmd_page
+ pmd_index(paddr
);
521 pgprot_t new_prot
= prot
;
523 paddr_next
= (paddr
& PMD_MASK
) + PMD_SIZE
;
524 if (paddr
>= paddr_end
) {
525 if (!after_bootmem
&&
526 !e820__mapped_any(paddr
& PMD_MASK
, paddr_next
,
528 !e820__mapped_any(paddr
& PMD_MASK
, paddr_next
,
529 E820_TYPE_RESERVED_KERN
) &&
530 !e820__mapped_any(paddr
& PMD_MASK
, paddr_next
,
532 set_pmd_init(pmd
, __pmd(0), init
);
536 if (!pmd_none(*pmd
)) {
537 if (!pmd_leaf(*pmd
)) {
538 spin_lock(&init_mm
.page_table_lock
);
539 pte
= (pte_t
*)pmd_page_vaddr(*pmd
);
540 paddr_last
= phys_pte_init(pte
, paddr
,
543 spin_unlock(&init_mm
.page_table_lock
);
547 * If we are ok with PG_LEVEL_2M mapping, then we will
548 * use the existing mapping,
550 * Otherwise, we will split the large page mapping but
551 * use the same existing protection bits except for
552 * large page, so that we don't violate Intel's TLB
553 * Application note (317080) which says, while changing
554 * the page sizes, new and old translations should
555 * not differ with respect to page frame and
558 if (page_size_mask
& (1 << PG_LEVEL_2M
)) {
561 paddr_last
= paddr_next
;
564 new_prot
= pte_pgprot(pte_clrhuge(*(pte_t
*)pmd
));
567 if (page_size_mask
& (1<<PG_LEVEL_2M
)) {
569 spin_lock(&init_mm
.page_table_lock
);
571 pfn_pmd(paddr
>> PAGE_SHIFT
, prot_sethuge(prot
)),
573 spin_unlock(&init_mm
.page_table_lock
);
574 paddr_last
= paddr_next
;
578 pte
= alloc_low_page();
579 paddr_last
= phys_pte_init(pte
, paddr
, paddr_end
, new_prot
, init
);
581 spin_lock(&init_mm
.page_table_lock
);
582 pmd_populate_kernel_init(&init_mm
, pmd
, pte
, init
);
583 spin_unlock(&init_mm
.page_table_lock
);
585 update_page_count(PG_LEVEL_2M
, pages
);
590 * Create PUD level page table mapping for physical addresses. The virtual
591 * and physical address do not have to be aligned at this level. KASLR can
592 * randomize virtual addresses up to this level.
593 * It returns the last physical address mapped.
595 static unsigned long __meminit
596 phys_pud_init(pud_t
*pud_page
, unsigned long paddr
, unsigned long paddr_end
,
597 unsigned long page_size_mask
, pgprot_t _prot
, bool init
)
599 unsigned long pages
= 0, paddr_next
;
600 unsigned long paddr_last
= paddr_end
;
601 unsigned long vaddr
= (unsigned long)__va(paddr
);
602 int i
= pud_index(vaddr
);
604 for (; i
< PTRS_PER_PUD
; i
++, paddr
= paddr_next
) {
607 pgprot_t prot
= _prot
;
609 vaddr
= (unsigned long)__va(paddr
);
610 pud
= pud_page
+ pud_index(vaddr
);
611 paddr_next
= (paddr
& PUD_MASK
) + PUD_SIZE
;
613 if (paddr
>= paddr_end
) {
614 if (!after_bootmem
&&
615 !e820__mapped_any(paddr
& PUD_MASK
, paddr_next
,
617 !e820__mapped_any(paddr
& PUD_MASK
, paddr_next
,
618 E820_TYPE_RESERVED_KERN
) &&
619 !e820__mapped_any(paddr
& PUD_MASK
, paddr_next
,
621 set_pud_init(pud
, __pud(0), init
);
625 if (!pud_none(*pud
)) {
626 if (!pud_leaf(*pud
)) {
627 pmd
= pmd_offset(pud
, 0);
628 paddr_last
= phys_pmd_init(pmd
, paddr
,
635 * If we are ok with PG_LEVEL_1G mapping, then we will
636 * use the existing mapping.
638 * Otherwise, we will split the gbpage mapping but use
639 * the same existing protection bits except for large
640 * page, so that we don't violate Intel's TLB
641 * Application note (317080) which says, while changing
642 * the page sizes, new and old translations should
643 * not differ with respect to page frame and
646 if (page_size_mask
& (1 << PG_LEVEL_1G
)) {
649 paddr_last
= paddr_next
;
652 prot
= pte_pgprot(pte_clrhuge(*(pte_t
*)pud
));
655 if (page_size_mask
& (1<<PG_LEVEL_1G
)) {
657 spin_lock(&init_mm
.page_table_lock
);
659 pfn_pud(paddr
>> PAGE_SHIFT
, prot_sethuge(prot
)),
661 spin_unlock(&init_mm
.page_table_lock
);
662 paddr_last
= paddr_next
;
666 pmd
= alloc_low_page();
667 paddr_last
= phys_pmd_init(pmd
, paddr
, paddr_end
,
668 page_size_mask
, prot
, init
);
670 spin_lock(&init_mm
.page_table_lock
);
671 pud_populate_init(&init_mm
, pud
, pmd
, init
);
672 spin_unlock(&init_mm
.page_table_lock
);
675 update_page_count(PG_LEVEL_1G
, pages
);
680 static unsigned long __meminit
681 phys_p4d_init(p4d_t
*p4d_page
, unsigned long paddr
, unsigned long paddr_end
,
682 unsigned long page_size_mask
, pgprot_t prot
, bool init
)
684 unsigned long vaddr
, vaddr_end
, vaddr_next
, paddr_next
, paddr_last
;
686 paddr_last
= paddr_end
;
687 vaddr
= (unsigned long)__va(paddr
);
688 vaddr_end
= (unsigned long)__va(paddr_end
);
690 if (!pgtable_l5_enabled())
691 return phys_pud_init((pud_t
*) p4d_page
, paddr
, paddr_end
,
692 page_size_mask
, prot
, init
);
694 for (; vaddr
< vaddr_end
; vaddr
= vaddr_next
) {
695 p4d_t
*p4d
= p4d_page
+ p4d_index(vaddr
);
698 vaddr_next
= (vaddr
& P4D_MASK
) + P4D_SIZE
;
701 if (paddr
>= paddr_end
) {
702 paddr_next
= __pa(vaddr_next
);
703 if (!after_bootmem
&&
704 !e820__mapped_any(paddr
& P4D_MASK
, paddr_next
,
706 !e820__mapped_any(paddr
& P4D_MASK
, paddr_next
,
707 E820_TYPE_RESERVED_KERN
) &&
708 !e820__mapped_any(paddr
& P4D_MASK
, paddr_next
,
710 set_p4d_init(p4d
, __p4d(0), init
);
714 if (!p4d_none(*p4d
)) {
715 pud
= pud_offset(p4d
, 0);
716 paddr_last
= phys_pud_init(pud
, paddr
, __pa(vaddr_end
),
717 page_size_mask
, prot
, init
);
721 pud
= alloc_low_page();
722 paddr_last
= phys_pud_init(pud
, paddr
, __pa(vaddr_end
),
723 page_size_mask
, prot
, init
);
725 spin_lock(&init_mm
.page_table_lock
);
726 p4d_populate_init(&init_mm
, p4d
, pud
, init
);
727 spin_unlock(&init_mm
.page_table_lock
);
733 static unsigned long __meminit
734 __kernel_physical_mapping_init(unsigned long paddr_start
,
735 unsigned long paddr_end
,
736 unsigned long page_size_mask
,
737 pgprot_t prot
, bool init
)
739 bool pgd_changed
= false;
740 unsigned long vaddr
, vaddr_start
, vaddr_end
, vaddr_next
, paddr_last
;
742 paddr_last
= paddr_end
;
743 vaddr
= (unsigned long)__va(paddr_start
);
744 vaddr_end
= (unsigned long)__va(paddr_end
);
747 for (; vaddr
< vaddr_end
; vaddr
= vaddr_next
) {
748 pgd_t
*pgd
= pgd_offset_k(vaddr
);
751 vaddr_next
= (vaddr
& PGDIR_MASK
) + PGDIR_SIZE
;
754 p4d
= (p4d_t
*)pgd_page_vaddr(*pgd
);
755 paddr_last
= phys_p4d_init(p4d
, __pa(vaddr
),
762 p4d
= alloc_low_page();
763 paddr_last
= phys_p4d_init(p4d
, __pa(vaddr
), __pa(vaddr_end
),
764 page_size_mask
, prot
, init
);
766 spin_lock(&init_mm
.page_table_lock
);
767 if (pgtable_l5_enabled())
768 pgd_populate_init(&init_mm
, pgd
, p4d
, init
);
770 p4d_populate_init(&init_mm
, p4d_offset(pgd
, vaddr
),
771 (pud_t
*) p4d
, init
);
773 spin_unlock(&init_mm
.page_table_lock
);
778 sync_global_pgds(vaddr_start
, vaddr_end
- 1);
785 * Create page table mapping for the physical memory for specific physical
786 * addresses. Note that it can only be used to populate non-present entries.
787 * The virtual and physical addresses have to be aligned on PMD level
788 * down. It returns the last physical address mapped.
790 unsigned long __meminit
791 kernel_physical_mapping_init(unsigned long paddr_start
,
792 unsigned long paddr_end
,
793 unsigned long page_size_mask
, pgprot_t prot
)
795 return __kernel_physical_mapping_init(paddr_start
, paddr_end
,
796 page_size_mask
, prot
, true);
800 * This function is similar to kernel_physical_mapping_init() above with the
801 * exception that it uses set_{pud,pmd}() instead of the set_{pud,pte}_safe()
802 * when updating the mapping. The caller is responsible to flush the TLBs after
803 * the function returns.
805 unsigned long __meminit
806 kernel_physical_mapping_change(unsigned long paddr_start
,
807 unsigned long paddr_end
,
808 unsigned long page_size_mask
)
810 return __kernel_physical_mapping_init(paddr_start
, paddr_end
,
811 page_size_mask
, PAGE_KERNEL
,
816 void __init
initmem_init(void)
818 memblock_set_node(0, PHYS_ADDR_MAX
, &memblock
.memory
, 0);
822 void __init
paging_init(void)
827 * clear the default setting with node 0
828 * note: don't use nodes_clear here, that is really clearing when
829 * numa support is not compiled in, and later node_set_state
830 * will not set it back.
832 node_clear_state(0, N_MEMORY
);
833 node_clear_state(0, N_NORMAL_MEMORY
);
838 #ifdef CONFIG_SPARSEMEM_VMEMMAP
839 #define PAGE_UNUSED 0xFD
842 * The unused vmemmap range, which was not yet memset(PAGE_UNUSED), ranges
843 * from unused_pmd_start to next PMD_SIZE boundary.
845 static unsigned long unused_pmd_start __meminitdata
;
847 static void __meminit
vmemmap_flush_unused_pmd(void)
849 if (!unused_pmd_start
)
852 * Clears (unused_pmd_start, PMD_END]
854 memset((void *)unused_pmd_start
, PAGE_UNUSED
,
855 ALIGN(unused_pmd_start
, PMD_SIZE
) - unused_pmd_start
);
856 unused_pmd_start
= 0;
859 #ifdef CONFIG_MEMORY_HOTPLUG
860 /* Returns true if the PMD is completely unused and thus it can be freed */
861 static bool __meminit
vmemmap_pmd_is_unused(unsigned long addr
, unsigned long end
)
863 unsigned long start
= ALIGN_DOWN(addr
, PMD_SIZE
);
866 * Flush the unused range cache to ensure that memchr_inv() will work
867 * for the whole range.
869 vmemmap_flush_unused_pmd();
870 memset((void *)addr
, PAGE_UNUSED
, end
- addr
);
872 return !memchr_inv((void *)start
, PAGE_UNUSED
, PMD_SIZE
);
876 static void __meminit
__vmemmap_use_sub_pmd(unsigned long start
)
879 * As we expect to add in the same granularity as we remove, it's
880 * sufficient to mark only some piece used to block the memmap page from
881 * getting removed when removing some other adjacent memmap (just in
882 * case the first memmap never gets initialized e.g., because the memory
883 * block never gets onlined).
885 memset((void *)start
, 0, sizeof(struct page
));
888 static void __meminit
vmemmap_use_sub_pmd(unsigned long start
, unsigned long end
)
891 * We only optimize if the new used range directly follows the
892 * previously unused range (esp., when populating consecutive sections).
894 if (unused_pmd_start
== start
) {
895 if (likely(IS_ALIGNED(end
, PMD_SIZE
)))
896 unused_pmd_start
= 0;
898 unused_pmd_start
= end
;
903 * If the range does not contiguously follows previous one, make sure
904 * to mark the unused range of the previous one so it can be removed.
906 vmemmap_flush_unused_pmd();
907 __vmemmap_use_sub_pmd(start
);
911 static void __meminit
vmemmap_use_new_sub_pmd(unsigned long start
, unsigned long end
)
913 const unsigned long page
= ALIGN_DOWN(start
, PMD_SIZE
);
915 vmemmap_flush_unused_pmd();
918 * Could be our memmap page is filled with PAGE_UNUSED already from a
919 * previous remove. Make sure to reset it.
921 __vmemmap_use_sub_pmd(start
);
924 * Mark with PAGE_UNUSED the unused parts of the new memmap range
926 if (!IS_ALIGNED(start
, PMD_SIZE
))
927 memset((void *)page
, PAGE_UNUSED
, start
- page
);
930 * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
931 * consecutive sections. Remember for the last added PMD where the
932 * unused range begins.
934 if (!IS_ALIGNED(end
, PMD_SIZE
))
935 unused_pmd_start
= end
;
940 * Memory hotplug specific functions
942 #ifdef CONFIG_MEMORY_HOTPLUG
944 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
947 static void update_end_of_memory_vars(u64 start
, u64 size
)
949 unsigned long end_pfn
= PFN_UP(start
+ size
);
951 if (end_pfn
> max_pfn
) {
953 max_low_pfn
= end_pfn
;
954 high_memory
= (void *)__va(max_pfn
* PAGE_SIZE
- 1) + 1;
958 int add_pages(int nid
, unsigned long start_pfn
, unsigned long nr_pages
,
959 struct mhp_params
*params
)
961 unsigned long end
= ((start_pfn
+ nr_pages
) << PAGE_SHIFT
) - 1;
964 if (WARN_ON_ONCE(end
> DIRECT_MAP_PHYSMEM_END
))
967 ret
= __add_pages(nid
, start_pfn
, nr_pages
, params
);
970 /* update max_pfn, max_low_pfn and high_memory */
971 update_end_of_memory_vars(start_pfn
<< PAGE_SHIFT
,
972 nr_pages
<< PAGE_SHIFT
);
977 int arch_add_memory(int nid
, u64 start
, u64 size
,
978 struct mhp_params
*params
)
980 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
981 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
983 init_memory_mapping(start
, start
+ size
, params
->pgprot
);
985 return add_pages(nid
, start_pfn
, nr_pages
, params
);
988 static void free_reserved_pages(struct page
*page
, unsigned long nr_pages
)
991 free_reserved_page(page
++);
994 static void __meminit
free_pagetable(struct page
*page
, int order
)
996 /* bootmem page has reserved flag */
997 if (PageReserved(page
)) {
998 unsigned long nr_pages
= 1 << order
;
999 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
1000 enum bootmem_type type
= bootmem_type(page
);
1002 if (type
== SECTION_INFO
|| type
== MIX_SECTION_INFO
) {
1004 put_page_bootmem(page
++);
1006 free_reserved_pages(page
, nr_pages
);
1009 free_reserved_pages(page
, nr_pages
);
1012 free_pages((unsigned long)page_address(page
), order
);
1016 static void __meminit
free_hugepage_table(struct page
*page
,
1017 struct vmem_altmap
*altmap
)
1020 vmem_altmap_free(altmap
, PMD_SIZE
/ PAGE_SIZE
);
1022 free_pagetable(page
, get_order(PMD_SIZE
));
1025 static void __meminit
free_pte_table(pte_t
*pte_start
, pmd_t
*pmd
)
1030 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
1031 pte
= pte_start
+ i
;
1032 if (!pte_none(*pte
))
1036 /* free a pte table */
1037 free_pagetable(pmd_page(*pmd
), 0);
1038 spin_lock(&init_mm
.page_table_lock
);
1040 spin_unlock(&init_mm
.page_table_lock
);
1043 static void __meminit
free_pmd_table(pmd_t
*pmd_start
, pud_t
*pud
)
1048 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
1049 pmd
= pmd_start
+ i
;
1050 if (!pmd_none(*pmd
))
1054 /* free a pmd table */
1055 free_pagetable(pud_page(*pud
), 0);
1056 spin_lock(&init_mm
.page_table_lock
);
1058 spin_unlock(&init_mm
.page_table_lock
);
1061 static void __meminit
free_pud_table(pud_t
*pud_start
, p4d_t
*p4d
)
1066 for (i
= 0; i
< PTRS_PER_PUD
; i
++) {
1067 pud
= pud_start
+ i
;
1068 if (!pud_none(*pud
))
1072 /* free a pud table */
1073 free_pagetable(p4d_page(*p4d
), 0);
1074 spin_lock(&init_mm
.page_table_lock
);
1076 spin_unlock(&init_mm
.page_table_lock
);
1079 static void __meminit
1080 remove_pte_table(pte_t
*pte_start
, unsigned long addr
, unsigned long end
,
1083 unsigned long next
, pages
= 0;
1085 phys_addr_t phys_addr
;
1087 pte
= pte_start
+ pte_index(addr
);
1088 for (; addr
< end
; addr
= next
, pte
++) {
1089 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
1093 if (!pte_present(*pte
))
1097 * We mapped [0,1G) memory as identity mapping when
1098 * initializing, in arch/x86/kernel/head_64.S. These
1099 * pagetables cannot be removed.
1101 phys_addr
= pte_val(*pte
) + (addr
& PAGE_MASK
);
1102 if (phys_addr
< (phys_addr_t
)0x40000000)
1106 free_pagetable(pte_page(*pte
), 0);
1108 spin_lock(&init_mm
.page_table_lock
);
1109 pte_clear(&init_mm
, addr
, pte
);
1110 spin_unlock(&init_mm
.page_table_lock
);
1112 /* For non-direct mapping, pages means nothing. */
1116 /* Call free_pte_table() in remove_pmd_table(). */
1119 update_page_count(PG_LEVEL_4K
, -pages
);
1122 static void __meminit
1123 remove_pmd_table(pmd_t
*pmd_start
, unsigned long addr
, unsigned long end
,
1124 bool direct
, struct vmem_altmap
*altmap
)
1126 unsigned long next
, pages
= 0;
1130 pmd
= pmd_start
+ pmd_index(addr
);
1131 for (; addr
< end
; addr
= next
, pmd
++) {
1132 next
= pmd_addr_end(addr
, end
);
1134 if (!pmd_present(*pmd
))
1137 if (pmd_leaf(*pmd
)) {
1138 if (IS_ALIGNED(addr
, PMD_SIZE
) &&
1139 IS_ALIGNED(next
, PMD_SIZE
)) {
1141 free_hugepage_table(pmd_page(*pmd
),
1144 spin_lock(&init_mm
.page_table_lock
);
1146 spin_unlock(&init_mm
.page_table_lock
);
1149 #ifdef CONFIG_SPARSEMEM_VMEMMAP
1150 else if (vmemmap_pmd_is_unused(addr
, next
)) {
1151 free_hugepage_table(pmd_page(*pmd
),
1153 spin_lock(&init_mm
.page_table_lock
);
1155 spin_unlock(&init_mm
.page_table_lock
);
1161 pte_base
= (pte_t
*)pmd_page_vaddr(*pmd
);
1162 remove_pte_table(pte_base
, addr
, next
, direct
);
1163 free_pte_table(pte_base
, pmd
);
1166 /* Call free_pmd_table() in remove_pud_table(). */
1168 update_page_count(PG_LEVEL_2M
, -pages
);
1171 static void __meminit
1172 remove_pud_table(pud_t
*pud_start
, unsigned long addr
, unsigned long end
,
1173 struct vmem_altmap
*altmap
, bool direct
)
1175 unsigned long next
, pages
= 0;
1179 pud
= pud_start
+ pud_index(addr
);
1180 for (; addr
< end
; addr
= next
, pud
++) {
1181 next
= pud_addr_end(addr
, end
);
1183 if (!pud_present(*pud
))
1186 if (pud_leaf(*pud
) &&
1187 IS_ALIGNED(addr
, PUD_SIZE
) &&
1188 IS_ALIGNED(next
, PUD_SIZE
)) {
1189 spin_lock(&init_mm
.page_table_lock
);
1191 spin_unlock(&init_mm
.page_table_lock
);
1196 pmd_base
= pmd_offset(pud
, 0);
1197 remove_pmd_table(pmd_base
, addr
, next
, direct
, altmap
);
1198 free_pmd_table(pmd_base
, pud
);
1202 update_page_count(PG_LEVEL_1G
, -pages
);
1205 static void __meminit
1206 remove_p4d_table(p4d_t
*p4d_start
, unsigned long addr
, unsigned long end
,
1207 struct vmem_altmap
*altmap
, bool direct
)
1209 unsigned long next
, pages
= 0;
1213 p4d
= p4d_start
+ p4d_index(addr
);
1214 for (; addr
< end
; addr
= next
, p4d
++) {
1215 next
= p4d_addr_end(addr
, end
);
1217 if (!p4d_present(*p4d
))
1220 BUILD_BUG_ON(p4d_leaf(*p4d
));
1222 pud_base
= pud_offset(p4d
, 0);
1223 remove_pud_table(pud_base
, addr
, next
, altmap
, direct
);
1225 * For 4-level page tables we do not want to free PUDs, but in the
1226 * 5-level case we should free them. This code will have to change
1227 * to adapt for boot-time switching between 4 and 5 level page tables.
1229 if (pgtable_l5_enabled())
1230 free_pud_table(pud_base
, p4d
);
1234 update_page_count(PG_LEVEL_512G
, -pages
);
1237 /* start and end are both virtual address. */
1238 static void __meminit
1239 remove_pagetable(unsigned long start
, unsigned long end
, bool direct
,
1240 struct vmem_altmap
*altmap
)
1247 for (addr
= start
; addr
< end
; addr
= next
) {
1248 next
= pgd_addr_end(addr
, end
);
1250 pgd
= pgd_offset_k(addr
);
1251 if (!pgd_present(*pgd
))
1254 p4d
= p4d_offset(pgd
, 0);
1255 remove_p4d_table(p4d
, addr
, next
, altmap
, direct
);
1261 void __ref
vmemmap_free(unsigned long start
, unsigned long end
,
1262 struct vmem_altmap
*altmap
)
1264 VM_BUG_ON(!PAGE_ALIGNED(start
));
1265 VM_BUG_ON(!PAGE_ALIGNED(end
));
1267 remove_pagetable(start
, end
, false, altmap
);
1270 static void __meminit
1271 kernel_physical_mapping_remove(unsigned long start
, unsigned long end
)
1273 start
= (unsigned long)__va(start
);
1274 end
= (unsigned long)__va(end
);
1276 remove_pagetable(start
, end
, true, NULL
);
1279 void __ref
arch_remove_memory(u64 start
, u64 size
, struct vmem_altmap
*altmap
)
1281 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
1282 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
1284 __remove_pages(start_pfn
, nr_pages
, altmap
);
1285 kernel_physical_mapping_remove(start
, start
+ size
);
1287 #endif /* CONFIG_MEMORY_HOTPLUG */
1289 static struct kcore_list kcore_vsyscall
;
1291 static void __init
register_page_bootmem_info(void)
1293 #if defined(CONFIG_NUMA) || defined(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP)
1296 for_each_online_node(i
)
1297 register_page_bootmem_info_node(NODE_DATA(i
));
1302 * Pre-allocates page-table pages for the vmalloc area in the kernel page-table.
1303 * Only the level which needs to be synchronized between all page-tables is
1304 * allocated because the synchronization can be expensive.
1306 static void __init
preallocate_vmalloc_pages(void)
1311 for (addr
= VMALLOC_START
; addr
<= VMEMORY_END
; addr
= ALIGN(addr
+ 1, PGDIR_SIZE
)) {
1312 pgd_t
*pgd
= pgd_offset_k(addr
);
1317 p4d
= p4d_alloc(&init_mm
, pgd
, addr
);
1321 if (pgtable_l5_enabled())
1325 * The goal here is to allocate all possibly required
1326 * hardware page tables pointed to by the top hardware
1329 * On 4-level systems, the P4D layer is folded away and
1330 * the above code does no preallocation. Below, go down
1331 * to the pud _software_ level to ensure the second
1332 * hardware level is allocated on 4-level systems too.
1335 pud
= pud_alloc(&init_mm
, p4d
, addr
);
1345 * The pages have to be there now or they will be missing in
1346 * process page-tables later.
1348 panic("Failed to pre-allocate %s pages for vmalloc area\n", lvl
);
1351 void __init
mem_init(void)
1355 /* clear_bss() already clear the empty_zero_page */
1357 /* this will put all memory onto the freelists */
1358 memblock_free_all();
1360 x86_init
.hyper
.init_after_bootmem();
1363 * Must be done after boot memory is put on freelist, because here we
1364 * might set fields in deferred struct pages that have not yet been
1365 * initialized, and memblock_free_all() initializes all the reserved
1366 * deferred pages for us.
1368 register_page_bootmem_info();
1370 /* Register memory areas for /proc/kcore */
1371 if (get_gate_vma(&init_mm
))
1372 kclist_add(&kcore_vsyscall
, (void *)VSYSCALL_ADDR
, PAGE_SIZE
, KCORE_USER
);
1374 preallocate_vmalloc_pages();
1377 int kernel_set_to_readonly
;
1379 void mark_rodata_ro(void)
1381 unsigned long start
= PFN_ALIGN(_text
);
1382 unsigned long rodata_start
= PFN_ALIGN(__start_rodata
);
1383 unsigned long end
= (unsigned long)__end_rodata_hpage_align
;
1384 unsigned long text_end
= PFN_ALIGN(_etext
);
1385 unsigned long rodata_end
= PFN_ALIGN(__end_rodata
);
1386 unsigned long all_end
;
1388 printk(KERN_INFO
"Write protecting the kernel read-only data: %luk\n",
1389 (end
- start
) >> 10);
1390 set_memory_ro(start
, (end
- start
) >> PAGE_SHIFT
);
1392 kernel_set_to_readonly
= 1;
1395 * The rodata/data/bss/brk section (but not the kernel text!)
1396 * should also be not-executable.
1398 * We align all_end to PMD_SIZE because the existing mapping
1399 * is a full PMD. If we would align _brk_end to PAGE_SIZE we
1400 * split the PMD and the reminder between _brk_end and the end
1401 * of the PMD will remain mapped executable.
1403 * Any PMD which was setup after the one which covers _brk_end
1404 * has been zapped already via cleanup_highmem().
1406 all_end
= roundup((unsigned long)_brk_end
, PMD_SIZE
);
1407 set_memory_nx(text_end
, (all_end
- text_end
) >> PAGE_SHIFT
);
1409 set_ftrace_ops_ro();
1411 #ifdef CONFIG_CPA_DEBUG
1412 printk(KERN_INFO
"Testing CPA: undo %lx-%lx\n", start
, end
);
1413 set_memory_rw(start
, (end
-start
) >> PAGE_SHIFT
);
1415 printk(KERN_INFO
"Testing CPA: again\n");
1416 set_memory_ro(start
, (end
-start
) >> PAGE_SHIFT
);
1419 free_kernel_image_pages("unused kernel image (text/rodata gap)",
1420 (void *)text_end
, (void *)rodata_start
);
1421 free_kernel_image_pages("unused kernel image (rodata/data gap)",
1422 (void *)rodata_end
, (void *)_sdata
);
1426 * Block size is the minimum amount of memory which can be hotplugged or
1427 * hotremoved. It must be power of two and must be equal or larger than
1428 * MIN_MEMORY_BLOCK_SIZE.
1430 #define MAX_BLOCK_SIZE (2UL << 30)
1432 /* Amount of ram needed to start using large blocks */
1433 #define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30)
1435 /* Adjustable memory block size */
1436 static unsigned long set_memory_block_size
;
1437 int __init
set_memory_block_size_order(unsigned int order
)
1439 unsigned long size
= 1UL << order
;
1441 if (size
> MEM_SIZE_FOR_LARGE_BLOCK
|| size
< MIN_MEMORY_BLOCK_SIZE
)
1444 set_memory_block_size
= size
;
1448 static unsigned long probe_memory_block_size(void)
1450 unsigned long boot_mem_end
= max_pfn
<< PAGE_SHIFT
;
1453 /* If memory block size has been set, then use it */
1454 bz
= set_memory_block_size
;
1458 /* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */
1459 if (boot_mem_end
< MEM_SIZE_FOR_LARGE_BLOCK
) {
1460 bz
= MIN_MEMORY_BLOCK_SIZE
;
1465 * Use max block size to minimize overhead on bare metal, where
1466 * alignment for memory hotplug isn't a concern.
1468 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
1469 bz
= MAX_BLOCK_SIZE
;
1473 /* Find the largest allowed block size that aligns to memory end */
1474 for (bz
= MAX_BLOCK_SIZE
; bz
> MIN_MEMORY_BLOCK_SIZE
; bz
>>= 1) {
1475 if (IS_ALIGNED(boot_mem_end
, bz
))
1479 pr_info("x86/mm: Memory block size: %ldMB\n", bz
>> 20);
1484 static unsigned long memory_block_size_probed
;
1485 unsigned long memory_block_size_bytes(void)
1487 if (!memory_block_size_probed
)
1488 memory_block_size_probed
= probe_memory_block_size();
1490 return memory_block_size_probed
;
1493 #ifdef CONFIG_SPARSEMEM_VMEMMAP
1495 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
1497 static long __meminitdata addr_start
, addr_end
;
1498 static void __meminitdata
*p_start
, *p_end
;
1499 static int __meminitdata node_start
;
1501 void __meminit
vmemmap_set_pmd(pmd_t
*pmd
, void *p
, int node
,
1502 unsigned long addr
, unsigned long next
)
1506 entry
= pfn_pte(__pa(p
) >> PAGE_SHIFT
,
1508 set_pmd(pmd
, __pmd(pte_val(entry
)));
1510 /* check to see if we have contiguous blocks */
1511 if (p_end
!= p
|| node_start
!= node
) {
1513 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1514 addr_start
, addr_end
-1, p_start
, p_end
-1, node_start
);
1520 addr_end
= addr
+ PMD_SIZE
;
1521 p_end
= p
+ PMD_SIZE
;
1523 if (!IS_ALIGNED(addr
, PMD_SIZE
) ||
1524 !IS_ALIGNED(next
, PMD_SIZE
))
1525 vmemmap_use_new_sub_pmd(addr
, next
);
1528 int __meminit
vmemmap_check_pmd(pmd_t
*pmd
, int node
,
1529 unsigned long addr
, unsigned long next
)
1531 int large
= pmd_leaf(*pmd
);
1533 if (pmd_leaf(*pmd
)) {
1534 vmemmap_verify((pte_t
*)pmd
, node
, addr
, next
);
1535 vmemmap_use_sub_pmd(addr
, next
);
1541 int __meminit
vmemmap_populate(unsigned long start
, unsigned long end
, int node
,
1542 struct vmem_altmap
*altmap
)
1546 VM_BUG_ON(!PAGE_ALIGNED(start
));
1547 VM_BUG_ON(!PAGE_ALIGNED(end
));
1549 if (end
- start
< PAGES_PER_SECTION
* sizeof(struct page
))
1550 err
= vmemmap_populate_basepages(start
, end
, node
, NULL
);
1551 else if (boot_cpu_has(X86_FEATURE_PSE
))
1552 err
= vmemmap_populate_hugepages(start
, end
, node
, altmap
);
1554 pr_err_once("%s: no cpu support for altmap allocations\n",
1558 err
= vmemmap_populate_basepages(start
, end
, node
, NULL
);
1560 sync_global_pgds(start
, end
- 1);
1564 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
1565 void register_page_bootmem_memmap(unsigned long section_nr
,
1566 struct page
*start_page
, unsigned long nr_pages
)
1568 unsigned long addr
= (unsigned long)start_page
;
1569 unsigned long end
= (unsigned long)(start_page
+ nr_pages
);
1575 unsigned int nr_pmd_pages
;
1578 for (; addr
< end
; addr
= next
) {
1581 pgd
= pgd_offset_k(addr
);
1582 if (pgd_none(*pgd
)) {
1583 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
1586 get_page_bootmem(section_nr
, pgd_page(*pgd
), MIX_SECTION_INFO
);
1588 p4d
= p4d_offset(pgd
, addr
);
1589 if (p4d_none(*p4d
)) {
1590 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
1593 get_page_bootmem(section_nr
, p4d_page(*p4d
), MIX_SECTION_INFO
);
1595 pud
= pud_offset(p4d
, addr
);
1596 if (pud_none(*pud
)) {
1597 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
1600 get_page_bootmem(section_nr
, pud_page(*pud
), MIX_SECTION_INFO
);
1602 if (!boot_cpu_has(X86_FEATURE_PSE
)) {
1603 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
1604 pmd
= pmd_offset(pud
, addr
);
1607 get_page_bootmem(section_nr
, pmd_page(*pmd
),
1610 pte
= pte_offset_kernel(pmd
, addr
);
1613 get_page_bootmem(section_nr
, pte_page(*pte
),
1616 next
= pmd_addr_end(addr
, end
);
1618 pmd
= pmd_offset(pud
, addr
);
1622 nr_pmd_pages
= 1 << get_order(PMD_SIZE
);
1623 page
= pmd_page(*pmd
);
1624 while (nr_pmd_pages
--)
1625 get_page_bootmem(section_nr
, page
++,
1632 void __meminit
vmemmap_populate_print_last(void)
1635 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1636 addr_start
, addr_end
-1, p_start
, p_end
-1, node_start
);