1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/x86_64/mm/init.c
5 * Copyright (C) 1995 Linus Torvalds
6 * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz>
7 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/initrd.h>
23 #include <linux/pagemap.h>
24 #include <linux/memblock.h>
25 #include <linux/proc_fs.h>
26 #include <linux/pci.h>
27 #include <linux/pfn.h>
28 #include <linux/poison.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/memory.h>
31 #include <linux/memory_hotplug.h>
32 #include <linux/memremap.h>
33 #include <linux/nmi.h>
34 #include <linux/gfp.h>
35 #include <linux/kcore.h>
36 #include <linux/bootmem_info.h>
38 #include <asm/processor.h>
39 #include <asm/bios_ebda.h>
40 #include <linux/uaccess.h>
41 #include <asm/pgalloc.h>
43 #include <asm/fixmap.h>
44 #include <asm/e820/api.h>
47 #include <asm/mmu_context.h>
48 #include <asm/proto.h>
50 #include <asm/sections.h>
51 #include <asm/kdebug.h>
53 #include <asm/set_memory.h>
55 #include <asm/uv/uv.h>
56 #include <asm/setup.h>
57 #include <asm/ftrace.h>
59 #include "mm_internal.h"
61 #include "ident_map.c"
63 #define DEFINE_POPULATE(fname, type1, type2, init) \
64 static inline void fname##_init(struct mm_struct *mm, \
65 type1##_t *arg1, type2##_t *arg2, bool init) \
68 fname##_safe(mm, arg1, arg2); \
70 fname(mm, arg1, arg2); \
73 DEFINE_POPULATE(p4d_populate
, p4d
, pud
, init
)
74 DEFINE_POPULATE(pgd_populate
, pgd
, p4d
, init
)
75 DEFINE_POPULATE(pud_populate
, pud
, pmd
, init
)
76 DEFINE_POPULATE(pmd_populate_kernel
, pmd
, pte
, init
)
78 #define DEFINE_ENTRY(type1, type2, init) \
79 static inline void set_##type1##_init(type1##_t *arg1, \
80 type2##_t arg2, bool init) \
83 set_##type1##_safe(arg1, arg2); \
85 set_##type1(arg1, arg2); \
88 DEFINE_ENTRY(p4d
, p4d
, init
)
89 DEFINE_ENTRY(pud
, pud
, init
)
90 DEFINE_ENTRY(pmd
, pmd
, init
)
91 DEFINE_ENTRY(pte
, pte
, init
)
93 static inline pgprot_t
prot_sethuge(pgprot_t prot
)
95 WARN_ON_ONCE(pgprot_val(prot
) & _PAGE_PAT
);
97 return __pgprot(pgprot_val(prot
) | _PAGE_PSE
);
101 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
102 * physical space so we can cache the place of the first one and move
103 * around without checking the pgd every time.
106 /* Bits supported by the hardware: */
107 pteval_t __supported_pte_mask __read_mostly
= ~0;
108 /* Bits allowed in normal kernel mappings: */
109 pteval_t __default_kernel_pte_mask __read_mostly
= ~0;
110 EXPORT_SYMBOL_GPL(__supported_pte_mask
);
111 /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
112 EXPORT_SYMBOL(__default_kernel_pte_mask
);
114 int force_personality32
;
118 * Control non executable heap for 32bit processes.
120 * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
121 * off PROT_READ implies PROT_EXEC
123 static int __init
nonx32_setup(char *str
)
125 if (!strcmp(str
, "on"))
126 force_personality32
&= ~READ_IMPLIES_EXEC
;
127 else if (!strcmp(str
, "off"))
128 force_personality32
|= READ_IMPLIES_EXEC
;
131 __setup("noexec32=", nonx32_setup
);
133 static void sync_global_pgds_l5(unsigned long start
, unsigned long end
)
137 for (addr
= start
; addr
<= end
; addr
= ALIGN(addr
+ 1, PGDIR_SIZE
)) {
138 const pgd_t
*pgd_ref
= pgd_offset_k(addr
);
141 /* Check for overflow */
145 if (pgd_none(*pgd_ref
))
148 spin_lock(&pgd_lock
);
149 list_for_each_entry(page
, &pgd_list
, lru
) {
151 spinlock_t
*pgt_lock
;
153 pgd
= (pgd_t
*)page_address(page
) + pgd_index(addr
);
154 /* the pgt_lock only for Xen */
155 pgt_lock
= &pgd_page_get_mm(page
)->page_table_lock
;
158 if (!pgd_none(*pgd_ref
) && !pgd_none(*pgd
))
159 BUG_ON(pgd_page_vaddr(*pgd
) != pgd_page_vaddr(*pgd_ref
));
162 set_pgd(pgd
, *pgd_ref
);
164 spin_unlock(pgt_lock
);
166 spin_unlock(&pgd_lock
);
170 static void sync_global_pgds_l4(unsigned long start
, unsigned long end
)
174 for (addr
= start
; addr
<= end
; addr
= ALIGN(addr
+ 1, PGDIR_SIZE
)) {
175 pgd_t
*pgd_ref
= pgd_offset_k(addr
);
176 const p4d_t
*p4d_ref
;
180 * With folded p4d, pgd_none() is always false, we need to
181 * handle synchronization on p4d level.
183 MAYBE_BUILD_BUG_ON(pgd_none(*pgd_ref
));
184 p4d_ref
= p4d_offset(pgd_ref
, addr
);
186 if (p4d_none(*p4d_ref
))
189 spin_lock(&pgd_lock
);
190 list_for_each_entry(page
, &pgd_list
, lru
) {
193 spinlock_t
*pgt_lock
;
195 pgd
= (pgd_t
*)page_address(page
) + pgd_index(addr
);
196 p4d
= p4d_offset(pgd
, addr
);
197 /* the pgt_lock only for Xen */
198 pgt_lock
= &pgd_page_get_mm(page
)->page_table_lock
;
201 if (!p4d_none(*p4d_ref
) && !p4d_none(*p4d
))
202 BUG_ON(p4d_pgtable(*p4d
)
203 != p4d_pgtable(*p4d_ref
));
206 set_p4d(p4d
, *p4d_ref
);
208 spin_unlock(pgt_lock
);
210 spin_unlock(&pgd_lock
);
215 * When memory was added make sure all the processes MM have
216 * suitable PGD entries in the local PGD level page.
218 static void sync_global_pgds(unsigned long start
, unsigned long end
)
220 if (pgtable_l5_enabled())
221 sync_global_pgds_l5(start
, end
);
223 sync_global_pgds_l4(start
, end
);
227 * NOTE: This function is marked __ref because it calls __init function
228 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
230 static __ref
void *spp_getpage(void)
235 ptr
= (void *) get_zeroed_page(GFP_ATOMIC
);
237 ptr
= memblock_alloc(PAGE_SIZE
, PAGE_SIZE
);
239 if (!ptr
|| ((unsigned long)ptr
& ~PAGE_MASK
)) {
240 panic("set_pte_phys: cannot allocate page data %s\n",
241 after_bootmem
? "after bootmem" : "");
244 pr_debug("spp_getpage %p\n", ptr
);
249 static p4d_t
*fill_p4d(pgd_t
*pgd
, unsigned long vaddr
)
251 if (pgd_none(*pgd
)) {
252 p4d_t
*p4d
= (p4d_t
*)spp_getpage();
253 pgd_populate(&init_mm
, pgd
, p4d
);
254 if (p4d
!= p4d_offset(pgd
, 0))
255 printk(KERN_ERR
"PAGETABLE BUG #00! %p <-> %p\n",
256 p4d
, p4d_offset(pgd
, 0));
258 return p4d_offset(pgd
, vaddr
);
261 static pud_t
*fill_pud(p4d_t
*p4d
, unsigned long vaddr
)
263 if (p4d_none(*p4d
)) {
264 pud_t
*pud
= (pud_t
*)spp_getpage();
265 p4d_populate(&init_mm
, p4d
, pud
);
266 if (pud
!= pud_offset(p4d
, 0))
267 printk(KERN_ERR
"PAGETABLE BUG #01! %p <-> %p\n",
268 pud
, pud_offset(p4d
, 0));
270 return pud_offset(p4d
, vaddr
);
273 static pmd_t
*fill_pmd(pud_t
*pud
, unsigned long vaddr
)
275 if (pud_none(*pud
)) {
276 pmd_t
*pmd
= (pmd_t
*) spp_getpage();
277 pud_populate(&init_mm
, pud
, pmd
);
278 if (pmd
!= pmd_offset(pud
, 0))
279 printk(KERN_ERR
"PAGETABLE BUG #02! %p <-> %p\n",
280 pmd
, pmd_offset(pud
, 0));
282 return pmd_offset(pud
, vaddr
);
285 static pte_t
*fill_pte(pmd_t
*pmd
, unsigned long vaddr
)
287 if (pmd_none(*pmd
)) {
288 pte_t
*pte
= (pte_t
*) spp_getpage();
289 pmd_populate_kernel(&init_mm
, pmd
, pte
);
290 if (pte
!= pte_offset_kernel(pmd
, 0))
291 printk(KERN_ERR
"PAGETABLE BUG #03!\n");
293 return pte_offset_kernel(pmd
, vaddr
);
296 static void __set_pte_vaddr(pud_t
*pud
, unsigned long vaddr
, pte_t new_pte
)
298 pmd_t
*pmd
= fill_pmd(pud
, vaddr
);
299 pte_t
*pte
= fill_pte(pmd
, vaddr
);
301 set_pte(pte
, new_pte
);
304 * It's enough to flush this one mapping.
305 * (PGE mappings get flushed as well)
307 flush_tlb_one_kernel(vaddr
);
310 void set_pte_vaddr_p4d(p4d_t
*p4d_page
, unsigned long vaddr
, pte_t new_pte
)
312 p4d_t
*p4d
= p4d_page
+ p4d_index(vaddr
);
313 pud_t
*pud
= fill_pud(p4d
, vaddr
);
315 __set_pte_vaddr(pud
, vaddr
, new_pte
);
318 void set_pte_vaddr_pud(pud_t
*pud_page
, unsigned long vaddr
, pte_t new_pte
)
320 pud_t
*pud
= pud_page
+ pud_index(vaddr
);
322 __set_pte_vaddr(pud
, vaddr
, new_pte
);
325 void set_pte_vaddr(unsigned long vaddr
, pte_t pteval
)
330 pr_debug("set_pte_vaddr %lx to %lx\n", vaddr
, native_pte_val(pteval
));
332 pgd
= pgd_offset_k(vaddr
);
333 if (pgd_none(*pgd
)) {
335 "PGD FIXMAP MISSING, it should be setup in head.S!\n");
339 p4d_page
= p4d_offset(pgd
, 0);
340 set_pte_vaddr_p4d(p4d_page
, vaddr
, pteval
);
343 pmd_t
* __init
populate_extra_pmd(unsigned long vaddr
)
349 pgd
= pgd_offset_k(vaddr
);
350 p4d
= fill_p4d(pgd
, vaddr
);
351 pud
= fill_pud(p4d
, vaddr
);
352 return fill_pmd(pud
, vaddr
);
355 pte_t
* __init
populate_extra_pte(unsigned long vaddr
)
359 pmd
= populate_extra_pmd(vaddr
);
360 return fill_pte(pmd
, vaddr
);
364 * Create large page table mappings for a range of physical addresses.
366 static void __init
__init_extra_mapping(unsigned long phys
, unsigned long size
,
367 enum page_cache_mode cache
)
375 pgprot_val(prot
) = pgprot_val(PAGE_KERNEL_LARGE
) |
376 protval_4k_2_large(cachemode2protval(cache
));
377 BUG_ON((phys
& ~PMD_MASK
) || (size
& ~PMD_MASK
));
378 for (; size
; phys
+= PMD_SIZE
, size
-= PMD_SIZE
) {
379 pgd
= pgd_offset_k((unsigned long)__va(phys
));
380 if (pgd_none(*pgd
)) {
381 p4d
= (p4d_t
*) spp_getpage();
382 set_pgd(pgd
, __pgd(__pa(p4d
) | _KERNPG_TABLE
|
385 p4d
= p4d_offset(pgd
, (unsigned long)__va(phys
));
386 if (p4d_none(*p4d
)) {
387 pud
= (pud_t
*) spp_getpage();
388 set_p4d(p4d
, __p4d(__pa(pud
) | _KERNPG_TABLE
|
391 pud
= pud_offset(p4d
, (unsigned long)__va(phys
));
392 if (pud_none(*pud
)) {
393 pmd
= (pmd_t
*) spp_getpage();
394 set_pud(pud
, __pud(__pa(pmd
) | _KERNPG_TABLE
|
397 pmd
= pmd_offset(pud
, phys
);
398 BUG_ON(!pmd_none(*pmd
));
399 set_pmd(pmd
, __pmd(phys
| pgprot_val(prot
)));
403 void __init
init_extra_mapping_wb(unsigned long phys
, unsigned long size
)
405 __init_extra_mapping(phys
, size
, _PAGE_CACHE_MODE_WB
);
408 void __init
init_extra_mapping_uc(unsigned long phys
, unsigned long size
)
410 __init_extra_mapping(phys
, size
, _PAGE_CACHE_MODE_UC
);
414 * The head.S code sets up the kernel high mapping:
416 * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
418 * phys_base holds the negative offset to the kernel, which is added
419 * to the compile time generated pmds. This results in invalid pmds up
420 * to the point where we hit the physaddr 0 mapping.
422 * We limit the mappings to the region from _text to _brk_end. _brk_end
423 * is rounded up to the 2MB boundary. This catches the invalid pmds as
424 * well, as they are located before _text:
426 void __init
cleanup_highmap(void)
428 unsigned long vaddr
= __START_KERNEL_map
;
429 unsigned long vaddr_end
= __START_KERNEL_map
+ KERNEL_IMAGE_SIZE
;
430 unsigned long end
= roundup((unsigned long)_brk_end
, PMD_SIZE
) - 1;
431 pmd_t
*pmd
= level2_kernel_pgt
;
434 * Native path, max_pfn_mapped is not set yet.
435 * Xen has valid max_pfn_mapped set in
436 * arch/x86/xen/mmu.c:xen_setup_kernel_pagetable().
439 vaddr_end
= __START_KERNEL_map
+ (max_pfn_mapped
<< PAGE_SHIFT
);
441 for (; vaddr
+ PMD_SIZE
- 1 < vaddr_end
; pmd
++, vaddr
+= PMD_SIZE
) {
444 if (vaddr
< (unsigned long) _text
|| vaddr
> end
)
445 set_pmd(pmd
, __pmd(0));
450 * Create PTE level page table mapping for physical addresses.
451 * It returns the last physical address mapped.
453 static unsigned long __meminit
454 phys_pte_init(pte_t
*pte_page
, unsigned long paddr
, unsigned long paddr_end
,
455 pgprot_t prot
, bool init
)
457 unsigned long pages
= 0, paddr_next
;
458 unsigned long paddr_last
= paddr_end
;
462 pte
= pte_page
+ pte_index(paddr
);
463 i
= pte_index(paddr
);
465 for (; i
< PTRS_PER_PTE
; i
++, paddr
= paddr_next
, pte
++) {
466 paddr_next
= (paddr
& PAGE_MASK
) + PAGE_SIZE
;
467 if (paddr
>= paddr_end
) {
468 if (!after_bootmem
&&
469 !e820__mapped_any(paddr
& PAGE_MASK
, paddr_next
,
471 !e820__mapped_any(paddr
& PAGE_MASK
, paddr_next
,
472 E820_TYPE_RESERVED_KERN
) &&
473 !e820__mapped_any(paddr
& PAGE_MASK
, paddr_next
,
475 set_pte_init(pte
, __pte(0), init
);
480 * We will re-use the existing mapping.
481 * Xen for example has some special requirements, like mapping
482 * pagetable pages as RO. So assume someone who pre-setup
483 * these mappings are more intelligent.
485 if (!pte_none(*pte
)) {
492 pr_info(" pte=%p addr=%lx pte=%016lx\n", pte
, paddr
,
493 pfn_pte(paddr
>> PAGE_SHIFT
, PAGE_KERNEL
).pte
);
495 set_pte_init(pte
, pfn_pte(paddr
>> PAGE_SHIFT
, prot
), init
);
496 paddr_last
= (paddr
& PAGE_MASK
) + PAGE_SIZE
;
499 update_page_count(PG_LEVEL_4K
, pages
);
505 * Create PMD level page table mapping for physical addresses. The virtual
506 * and physical address have to be aligned at this level.
507 * It returns the last physical address mapped.
509 static unsigned long __meminit
510 phys_pmd_init(pmd_t
*pmd_page
, unsigned long paddr
, unsigned long paddr_end
,
511 unsigned long page_size_mask
, pgprot_t prot
, bool init
)
513 unsigned long pages
= 0, paddr_next
;
514 unsigned long paddr_last
= paddr_end
;
516 int i
= pmd_index(paddr
);
518 for (; i
< PTRS_PER_PMD
; i
++, paddr
= paddr_next
) {
519 pmd_t
*pmd
= pmd_page
+ pmd_index(paddr
);
521 pgprot_t new_prot
= prot
;
523 paddr_next
= (paddr
& PMD_MASK
) + PMD_SIZE
;
524 if (paddr
>= paddr_end
) {
525 if (!after_bootmem
&&
526 !e820__mapped_any(paddr
& PMD_MASK
, paddr_next
,
528 !e820__mapped_any(paddr
& PMD_MASK
, paddr_next
,
529 E820_TYPE_RESERVED_KERN
) &&
530 !e820__mapped_any(paddr
& PMD_MASK
, paddr_next
,
532 set_pmd_init(pmd
, __pmd(0), init
);
536 if (!pmd_none(*pmd
)) {
537 if (!pmd_leaf(*pmd
)) {
538 spin_lock(&init_mm
.page_table_lock
);
539 pte
= (pte_t
*)pmd_page_vaddr(*pmd
);
540 paddr_last
= phys_pte_init(pte
, paddr
,
543 spin_unlock(&init_mm
.page_table_lock
);
547 * If we are ok with PG_LEVEL_2M mapping, then we will
548 * use the existing mapping,
550 * Otherwise, we will split the large page mapping but
551 * use the same existing protection bits except for
552 * large page, so that we don't violate Intel's TLB
553 * Application note (317080) which says, while changing
554 * the page sizes, new and old translations should
555 * not differ with respect to page frame and
558 if (page_size_mask
& (1 << PG_LEVEL_2M
)) {
561 paddr_last
= paddr_next
;
564 new_prot
= pte_pgprot(pte_clrhuge(*(pte_t
*)pmd
));
567 if (page_size_mask
& (1<<PG_LEVEL_2M
)) {
569 spin_lock(&init_mm
.page_table_lock
);
571 pfn_pmd(paddr
>> PAGE_SHIFT
, prot_sethuge(prot
)),
573 spin_unlock(&init_mm
.page_table_lock
);
574 paddr_last
= paddr_next
;
578 pte
= alloc_low_page();
579 paddr_last
= phys_pte_init(pte
, paddr
, paddr_end
, new_prot
, init
);
581 spin_lock(&init_mm
.page_table_lock
);
582 pmd_populate_kernel_init(&init_mm
, pmd
, pte
, init
);
583 spin_unlock(&init_mm
.page_table_lock
);
585 update_page_count(PG_LEVEL_2M
, pages
);
590 * Create PUD level page table mapping for physical addresses. The virtual
591 * and physical address do not have to be aligned at this level. KASLR can
592 * randomize virtual addresses up to this level.
593 * It returns the last physical address mapped.
595 static unsigned long __meminit
596 phys_pud_init(pud_t
*pud_page
, unsigned long paddr
, unsigned long paddr_end
,
597 unsigned long page_size_mask
, pgprot_t _prot
, bool init
)
599 unsigned long pages
= 0, paddr_next
;
600 unsigned long paddr_last
= paddr_end
;
601 unsigned long vaddr
= (unsigned long)__va(paddr
);
602 int i
= pud_index(vaddr
);
604 for (; i
< PTRS_PER_PUD
; i
++, paddr
= paddr_next
) {
607 pgprot_t prot
= _prot
;
609 vaddr
= (unsigned long)__va(paddr
);
610 pud
= pud_page
+ pud_index(vaddr
);
611 paddr_next
= (paddr
& PUD_MASK
) + PUD_SIZE
;
613 if (paddr
>= paddr_end
) {
614 if (!after_bootmem
&&
615 !e820__mapped_any(paddr
& PUD_MASK
, paddr_next
,
617 !e820__mapped_any(paddr
& PUD_MASK
, paddr_next
,
618 E820_TYPE_RESERVED_KERN
) &&
619 !e820__mapped_any(paddr
& PUD_MASK
, paddr_next
,
621 set_pud_init(pud
, __pud(0), init
);
625 if (!pud_none(*pud
)) {
626 if (!pud_leaf(*pud
)) {
627 pmd
= pmd_offset(pud
, 0);
628 paddr_last
= phys_pmd_init(pmd
, paddr
,
635 * If we are ok with PG_LEVEL_1G mapping, then we will
636 * use the existing mapping.
638 * Otherwise, we will split the gbpage mapping but use
639 * the same existing protection bits except for large
640 * page, so that we don't violate Intel's TLB
641 * Application note (317080) which says, while changing
642 * the page sizes, new and old translations should
643 * not differ with respect to page frame and
646 if (page_size_mask
& (1 << PG_LEVEL_1G
)) {
649 paddr_last
= paddr_next
;
652 prot
= pte_pgprot(pte_clrhuge(*(pte_t
*)pud
));
655 if (page_size_mask
& (1<<PG_LEVEL_1G
)) {
657 spin_lock(&init_mm
.page_table_lock
);
659 pfn_pud(paddr
>> PAGE_SHIFT
, prot_sethuge(prot
)),
661 spin_unlock(&init_mm
.page_table_lock
);
662 paddr_last
= paddr_next
;
666 pmd
= alloc_low_page();
667 paddr_last
= phys_pmd_init(pmd
, paddr
, paddr_end
,
668 page_size_mask
, prot
, init
);
670 spin_lock(&init_mm
.page_table_lock
);
671 pud_populate_init(&init_mm
, pud
, pmd
, init
);
672 spin_unlock(&init_mm
.page_table_lock
);
675 update_page_count(PG_LEVEL_1G
, pages
);
680 static unsigned long __meminit
681 phys_p4d_init(p4d_t
*p4d_page
, unsigned long paddr
, unsigned long paddr_end
,
682 unsigned long page_size_mask
, pgprot_t prot
, bool init
)
684 unsigned long vaddr
, vaddr_end
, vaddr_next
, paddr_next
, paddr_last
;
686 paddr_last
= paddr_end
;
687 vaddr
= (unsigned long)__va(paddr
);
688 vaddr_end
= (unsigned long)__va(paddr_end
);
690 if (!pgtable_l5_enabled())
691 return phys_pud_init((pud_t
*) p4d_page
, paddr
, paddr_end
,
692 page_size_mask
, prot
, init
);
694 for (; vaddr
< vaddr_end
; vaddr
= vaddr_next
) {
695 p4d_t
*p4d
= p4d_page
+ p4d_index(vaddr
);
698 vaddr_next
= (vaddr
& P4D_MASK
) + P4D_SIZE
;
701 if (paddr
>= paddr_end
) {
702 paddr_next
= __pa(vaddr_next
);
703 if (!after_bootmem
&&
704 !e820__mapped_any(paddr
& P4D_MASK
, paddr_next
,
706 !e820__mapped_any(paddr
& P4D_MASK
, paddr_next
,
707 E820_TYPE_RESERVED_KERN
) &&
708 !e820__mapped_any(paddr
& P4D_MASK
, paddr_next
,
710 set_p4d_init(p4d
, __p4d(0), init
);
714 if (!p4d_none(*p4d
)) {
715 pud
= pud_offset(p4d
, 0);
716 paddr_last
= phys_pud_init(pud
, paddr
, __pa(vaddr_end
),
717 page_size_mask
, prot
, init
);
721 pud
= alloc_low_page();
722 paddr_last
= phys_pud_init(pud
, paddr
, __pa(vaddr_end
),
723 page_size_mask
, prot
, init
);
725 spin_lock(&init_mm
.page_table_lock
);
726 p4d_populate_init(&init_mm
, p4d
, pud
, init
);
727 spin_unlock(&init_mm
.page_table_lock
);
733 static unsigned long __meminit
734 __kernel_physical_mapping_init(unsigned long paddr_start
,
735 unsigned long paddr_end
,
736 unsigned long page_size_mask
,
737 pgprot_t prot
, bool init
)
739 bool pgd_changed
= false;
740 unsigned long vaddr
, vaddr_start
, vaddr_end
, vaddr_next
, paddr_last
;
742 paddr_last
= paddr_end
;
743 vaddr
= (unsigned long)__va(paddr_start
);
744 vaddr_end
= (unsigned long)__va(paddr_end
);
747 for (; vaddr
< vaddr_end
; vaddr
= vaddr_next
) {
748 pgd_t
*pgd
= pgd_offset_k(vaddr
);
751 vaddr_next
= (vaddr
& PGDIR_MASK
) + PGDIR_SIZE
;
754 p4d
= (p4d_t
*)pgd_page_vaddr(*pgd
);
755 paddr_last
= phys_p4d_init(p4d
, __pa(vaddr
),
762 p4d
= alloc_low_page();
763 paddr_last
= phys_p4d_init(p4d
, __pa(vaddr
), __pa(vaddr_end
),
764 page_size_mask
, prot
, init
);
766 spin_lock(&init_mm
.page_table_lock
);
767 if (pgtable_l5_enabled())
768 pgd_populate_init(&init_mm
, pgd
, p4d
, init
);
770 p4d_populate_init(&init_mm
, p4d_offset(pgd
, vaddr
),
771 (pud_t
*) p4d
, init
);
773 spin_unlock(&init_mm
.page_table_lock
);
778 sync_global_pgds(vaddr_start
, vaddr_end
- 1);
785 * Create page table mapping for the physical memory for specific physical
786 * addresses. Note that it can only be used to populate non-present entries.
787 * The virtual and physical addresses have to be aligned on PMD level
788 * down. It returns the last physical address mapped.
790 unsigned long __meminit
791 kernel_physical_mapping_init(unsigned long paddr_start
,
792 unsigned long paddr_end
,
793 unsigned long page_size_mask
, pgprot_t prot
)
795 return __kernel_physical_mapping_init(paddr_start
, paddr_end
,
796 page_size_mask
, prot
, true);
800 * This function is similar to kernel_physical_mapping_init() above with the
801 * exception that it uses set_{pud,pmd}() instead of the set_{pud,pte}_safe()
802 * when updating the mapping. The caller is responsible to flush the TLBs after
803 * the function returns.
805 unsigned long __meminit
806 kernel_physical_mapping_change(unsigned long paddr_start
,
807 unsigned long paddr_end
,
808 unsigned long page_size_mask
)
810 return __kernel_physical_mapping_init(paddr_start
, paddr_end
,
811 page_size_mask
, PAGE_KERNEL
,
816 void __init
initmem_init(void)
818 memblock_set_node(0, PHYS_ADDR_MAX
, &memblock
.memory
, 0);
822 void __init
paging_init(void)
827 * clear the default setting with node 0
828 * note: don't use nodes_clear here, that is really clearing when
829 * numa support is not compiled in, and later node_set_state
830 * will not set it back.
832 node_clear_state(0, N_MEMORY
);
833 node_clear_state(0, N_NORMAL_MEMORY
);
838 #ifdef CONFIG_SPARSEMEM_VMEMMAP
839 #define PAGE_UNUSED 0xFD
842 * The unused vmemmap range, which was not yet memset(PAGE_UNUSED), ranges
843 * from unused_pmd_start to next PMD_SIZE boundary.
845 static unsigned long unused_pmd_start __meminitdata
;
847 static void __meminit
vmemmap_flush_unused_pmd(void)
849 if (!unused_pmd_start
)
852 * Clears (unused_pmd_start, PMD_END]
854 memset((void *)unused_pmd_start
, PAGE_UNUSED
,
855 ALIGN(unused_pmd_start
, PMD_SIZE
) - unused_pmd_start
);
856 unused_pmd_start
= 0;
859 #ifdef CONFIG_MEMORY_HOTPLUG
860 /* Returns true if the PMD is completely unused and thus it can be freed */
861 static bool __meminit
vmemmap_pmd_is_unused(unsigned long addr
, unsigned long end
)
863 unsigned long start
= ALIGN_DOWN(addr
, PMD_SIZE
);
866 * Flush the unused range cache to ensure that memchr_inv() will work
867 * for the whole range.
869 vmemmap_flush_unused_pmd();
870 memset((void *)addr
, PAGE_UNUSED
, end
- addr
);
872 return !memchr_inv((void *)start
, PAGE_UNUSED
, PMD_SIZE
);
876 static void __meminit
__vmemmap_use_sub_pmd(unsigned long start
)
879 * As we expect to add in the same granularity as we remove, it's
880 * sufficient to mark only some piece used to block the memmap page from
881 * getting removed when removing some other adjacent memmap (just in
882 * case the first memmap never gets initialized e.g., because the memory
883 * block never gets onlined).
885 memset((void *)start
, 0, sizeof(struct page
));
888 static void __meminit
vmemmap_use_sub_pmd(unsigned long start
, unsigned long end
)
891 * We only optimize if the new used range directly follows the
892 * previously unused range (esp., when populating consecutive sections).
894 if (unused_pmd_start
== start
) {
895 if (likely(IS_ALIGNED(end
, PMD_SIZE
)))
896 unused_pmd_start
= 0;
898 unused_pmd_start
= end
;
903 * If the range does not contiguously follows previous one, make sure
904 * to mark the unused range of the previous one so it can be removed.
906 vmemmap_flush_unused_pmd();
907 __vmemmap_use_sub_pmd(start
);
911 static void __meminit
vmemmap_use_new_sub_pmd(unsigned long start
, unsigned long end
)
913 const unsigned long page
= ALIGN_DOWN(start
, PMD_SIZE
);
915 vmemmap_flush_unused_pmd();
918 * Could be our memmap page is filled with PAGE_UNUSED already from a
919 * previous remove. Make sure to reset it.
921 __vmemmap_use_sub_pmd(start
);
924 * Mark with PAGE_UNUSED the unused parts of the new memmap range
926 if (!IS_ALIGNED(start
, PMD_SIZE
))
927 memset((void *)page
, PAGE_UNUSED
, start
- page
);
930 * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
931 * consecutive sections. Remember for the last added PMD where the
932 * unused range begins.
934 if (!IS_ALIGNED(end
, PMD_SIZE
))
935 unused_pmd_start
= end
;
940 * Memory hotplug specific functions
942 #ifdef CONFIG_MEMORY_HOTPLUG
944 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
947 static void update_end_of_memory_vars(u64 start
, u64 size
)
949 unsigned long end_pfn
= PFN_UP(start
+ size
);
951 if (end_pfn
> max_pfn
) {
953 max_low_pfn
= end_pfn
;
954 high_memory
= (void *)__va(max_pfn
* PAGE_SIZE
- 1) + 1;
958 int add_pages(int nid
, unsigned long start_pfn
, unsigned long nr_pages
,
959 struct mhp_params
*params
)
961 unsigned long end
= ((start_pfn
+ nr_pages
) << PAGE_SHIFT
) - 1;
964 if (WARN_ON_ONCE(end
> PHYSMEM_END
))
967 ret
= __add_pages(nid
, start_pfn
, nr_pages
, params
);
970 /* update max_pfn, max_low_pfn and high_memory */
971 update_end_of_memory_vars(start_pfn
<< PAGE_SHIFT
,
972 nr_pages
<< PAGE_SHIFT
);
977 int arch_add_memory(int nid
, u64 start
, u64 size
,
978 struct mhp_params
*params
)
980 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
981 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
983 init_memory_mapping(start
, start
+ size
, params
->pgprot
);
985 return add_pages(nid
, start_pfn
, nr_pages
, params
);
988 static void __meminit
free_pagetable(struct page
*page
, int order
)
991 unsigned int nr_pages
= 1 << order
;
993 /* bootmem page has reserved flag */
994 if (PageReserved(page
)) {
996 if (magic
== SECTION_INFO
|| magic
== MIX_SECTION_INFO
) {
998 put_page_bootmem(page
++);
1001 free_reserved_page(page
++);
1003 free_pages((unsigned long)page_address(page
), order
);
1006 static void __meminit
free_hugepage_table(struct page
*page
,
1007 struct vmem_altmap
*altmap
)
1010 vmem_altmap_free(altmap
, PMD_SIZE
/ PAGE_SIZE
);
1012 free_pagetable(page
, get_order(PMD_SIZE
));
1015 static void __meminit
free_pte_table(pte_t
*pte_start
, pmd_t
*pmd
)
1020 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
1021 pte
= pte_start
+ i
;
1022 if (!pte_none(*pte
))
1026 /* free a pte table */
1027 free_pagetable(pmd_page(*pmd
), 0);
1028 spin_lock(&init_mm
.page_table_lock
);
1030 spin_unlock(&init_mm
.page_table_lock
);
1033 static void __meminit
free_pmd_table(pmd_t
*pmd_start
, pud_t
*pud
)
1038 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
1039 pmd
= pmd_start
+ i
;
1040 if (!pmd_none(*pmd
))
1044 /* free a pmd table */
1045 free_pagetable(pud_page(*pud
), 0);
1046 spin_lock(&init_mm
.page_table_lock
);
1048 spin_unlock(&init_mm
.page_table_lock
);
1051 static void __meminit
free_pud_table(pud_t
*pud_start
, p4d_t
*p4d
)
1056 for (i
= 0; i
< PTRS_PER_PUD
; i
++) {
1057 pud
= pud_start
+ i
;
1058 if (!pud_none(*pud
))
1062 /* free a pud table */
1063 free_pagetable(p4d_page(*p4d
), 0);
1064 spin_lock(&init_mm
.page_table_lock
);
1066 spin_unlock(&init_mm
.page_table_lock
);
1069 static void __meminit
1070 remove_pte_table(pte_t
*pte_start
, unsigned long addr
, unsigned long end
,
1073 unsigned long next
, pages
= 0;
1075 phys_addr_t phys_addr
;
1077 pte
= pte_start
+ pte_index(addr
);
1078 for (; addr
< end
; addr
= next
, pte
++) {
1079 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
1083 if (!pte_present(*pte
))
1087 * We mapped [0,1G) memory as identity mapping when
1088 * initializing, in arch/x86/kernel/head_64.S. These
1089 * pagetables cannot be removed.
1091 phys_addr
= pte_val(*pte
) + (addr
& PAGE_MASK
);
1092 if (phys_addr
< (phys_addr_t
)0x40000000)
1096 free_pagetable(pte_page(*pte
), 0);
1098 spin_lock(&init_mm
.page_table_lock
);
1099 pte_clear(&init_mm
, addr
, pte
);
1100 spin_unlock(&init_mm
.page_table_lock
);
1102 /* For non-direct mapping, pages means nothing. */
1106 /* Call free_pte_table() in remove_pmd_table(). */
1109 update_page_count(PG_LEVEL_4K
, -pages
);
1112 static void __meminit
1113 remove_pmd_table(pmd_t
*pmd_start
, unsigned long addr
, unsigned long end
,
1114 bool direct
, struct vmem_altmap
*altmap
)
1116 unsigned long next
, pages
= 0;
1120 pmd
= pmd_start
+ pmd_index(addr
);
1121 for (; addr
< end
; addr
= next
, pmd
++) {
1122 next
= pmd_addr_end(addr
, end
);
1124 if (!pmd_present(*pmd
))
1127 if (pmd_leaf(*pmd
)) {
1128 if (IS_ALIGNED(addr
, PMD_SIZE
) &&
1129 IS_ALIGNED(next
, PMD_SIZE
)) {
1131 free_hugepage_table(pmd_page(*pmd
),
1134 spin_lock(&init_mm
.page_table_lock
);
1136 spin_unlock(&init_mm
.page_table_lock
);
1139 #ifdef CONFIG_SPARSEMEM_VMEMMAP
1140 else if (vmemmap_pmd_is_unused(addr
, next
)) {
1141 free_hugepage_table(pmd_page(*pmd
),
1143 spin_lock(&init_mm
.page_table_lock
);
1145 spin_unlock(&init_mm
.page_table_lock
);
1151 pte_base
= (pte_t
*)pmd_page_vaddr(*pmd
);
1152 remove_pte_table(pte_base
, addr
, next
, direct
);
1153 free_pte_table(pte_base
, pmd
);
1156 /* Call free_pmd_table() in remove_pud_table(). */
1158 update_page_count(PG_LEVEL_2M
, -pages
);
1161 static void __meminit
1162 remove_pud_table(pud_t
*pud_start
, unsigned long addr
, unsigned long end
,
1163 struct vmem_altmap
*altmap
, bool direct
)
1165 unsigned long next
, pages
= 0;
1169 pud
= pud_start
+ pud_index(addr
);
1170 for (; addr
< end
; addr
= next
, pud
++) {
1171 next
= pud_addr_end(addr
, end
);
1173 if (!pud_present(*pud
))
1176 if (pud_leaf(*pud
) &&
1177 IS_ALIGNED(addr
, PUD_SIZE
) &&
1178 IS_ALIGNED(next
, PUD_SIZE
)) {
1179 spin_lock(&init_mm
.page_table_lock
);
1181 spin_unlock(&init_mm
.page_table_lock
);
1186 pmd_base
= pmd_offset(pud
, 0);
1187 remove_pmd_table(pmd_base
, addr
, next
, direct
, altmap
);
1188 free_pmd_table(pmd_base
, pud
);
1192 update_page_count(PG_LEVEL_1G
, -pages
);
1195 static void __meminit
1196 remove_p4d_table(p4d_t
*p4d_start
, unsigned long addr
, unsigned long end
,
1197 struct vmem_altmap
*altmap
, bool direct
)
1199 unsigned long next
, pages
= 0;
1203 p4d
= p4d_start
+ p4d_index(addr
);
1204 for (; addr
< end
; addr
= next
, p4d
++) {
1205 next
= p4d_addr_end(addr
, end
);
1207 if (!p4d_present(*p4d
))
1210 BUILD_BUG_ON(p4d_leaf(*p4d
));
1212 pud_base
= pud_offset(p4d
, 0);
1213 remove_pud_table(pud_base
, addr
, next
, altmap
, direct
);
1215 * For 4-level page tables we do not want to free PUDs, but in the
1216 * 5-level case we should free them. This code will have to change
1217 * to adapt for boot-time switching between 4 and 5 level page tables.
1219 if (pgtable_l5_enabled())
1220 free_pud_table(pud_base
, p4d
);
1224 update_page_count(PG_LEVEL_512G
, -pages
);
1227 /* start and end are both virtual address. */
1228 static void __meminit
1229 remove_pagetable(unsigned long start
, unsigned long end
, bool direct
,
1230 struct vmem_altmap
*altmap
)
1237 for (addr
= start
; addr
< end
; addr
= next
) {
1238 next
= pgd_addr_end(addr
, end
);
1240 pgd
= pgd_offset_k(addr
);
1241 if (!pgd_present(*pgd
))
1244 p4d
= p4d_offset(pgd
, 0);
1245 remove_p4d_table(p4d
, addr
, next
, altmap
, direct
);
1251 void __ref
vmemmap_free(unsigned long start
, unsigned long end
,
1252 struct vmem_altmap
*altmap
)
1254 VM_BUG_ON(!PAGE_ALIGNED(start
));
1255 VM_BUG_ON(!PAGE_ALIGNED(end
));
1257 remove_pagetable(start
, end
, false, altmap
);
1260 static void __meminit
1261 kernel_physical_mapping_remove(unsigned long start
, unsigned long end
)
1263 start
= (unsigned long)__va(start
);
1264 end
= (unsigned long)__va(end
);
1266 remove_pagetable(start
, end
, true, NULL
);
1269 void __ref
arch_remove_memory(u64 start
, u64 size
, struct vmem_altmap
*altmap
)
1271 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
1272 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
1274 __remove_pages(start_pfn
, nr_pages
, altmap
);
1275 kernel_physical_mapping_remove(start
, start
+ size
);
1277 #endif /* CONFIG_MEMORY_HOTPLUG */
1279 static struct kcore_list kcore_vsyscall
;
1281 static void __init
register_page_bootmem_info(void)
1283 #if defined(CONFIG_NUMA) || defined(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP)
1286 for_each_online_node(i
)
1287 register_page_bootmem_info_node(NODE_DATA(i
));
1292 * Pre-allocates page-table pages for the vmalloc area in the kernel page-table.
1293 * Only the level which needs to be synchronized between all page-tables is
1294 * allocated because the synchronization can be expensive.
1296 static void __init
preallocate_vmalloc_pages(void)
1301 for (addr
= VMALLOC_START
; addr
<= VMEMORY_END
; addr
= ALIGN(addr
+ 1, PGDIR_SIZE
)) {
1302 pgd_t
*pgd
= pgd_offset_k(addr
);
1307 p4d
= p4d_alloc(&init_mm
, pgd
, addr
);
1311 if (pgtable_l5_enabled())
1315 * The goal here is to allocate all possibly required
1316 * hardware page tables pointed to by the top hardware
1319 * On 4-level systems, the P4D layer is folded away and
1320 * the above code does no preallocation. Below, go down
1321 * to the pud _software_ level to ensure the second
1322 * hardware level is allocated on 4-level systems too.
1325 pud
= pud_alloc(&init_mm
, p4d
, addr
);
1335 * The pages have to be there now or they will be missing in
1336 * process page-tables later.
1338 panic("Failed to pre-allocate %s pages for vmalloc area\n", lvl
);
1341 void __init
mem_init(void)
1345 /* clear_bss() already clear the empty_zero_page */
1347 /* this will put all memory onto the freelists */
1348 memblock_free_all();
1350 x86_init
.hyper
.init_after_bootmem();
1353 * Must be done after boot memory is put on freelist, because here we
1354 * might set fields in deferred struct pages that have not yet been
1355 * initialized, and memblock_free_all() initializes all the reserved
1356 * deferred pages for us.
1358 register_page_bootmem_info();
1360 /* Register memory areas for /proc/kcore */
1361 if (get_gate_vma(&init_mm
))
1362 kclist_add(&kcore_vsyscall
, (void *)VSYSCALL_ADDR
, PAGE_SIZE
, KCORE_USER
);
1364 preallocate_vmalloc_pages();
1367 int kernel_set_to_readonly
;
1369 void mark_rodata_ro(void)
1371 unsigned long start
= PFN_ALIGN(_text
);
1372 unsigned long rodata_start
= PFN_ALIGN(__start_rodata
);
1373 unsigned long end
= (unsigned long)__end_rodata_hpage_align
;
1374 unsigned long text_end
= PFN_ALIGN(_etext
);
1375 unsigned long rodata_end
= PFN_ALIGN(__end_rodata
);
1376 unsigned long all_end
;
1378 printk(KERN_INFO
"Write protecting the kernel read-only data: %luk\n",
1379 (end
- start
) >> 10);
1380 set_memory_ro(start
, (end
- start
) >> PAGE_SHIFT
);
1382 kernel_set_to_readonly
= 1;
1385 * The rodata/data/bss/brk section (but not the kernel text!)
1386 * should also be not-executable.
1388 * We align all_end to PMD_SIZE because the existing mapping
1389 * is a full PMD. If we would align _brk_end to PAGE_SIZE we
1390 * split the PMD and the reminder between _brk_end and the end
1391 * of the PMD will remain mapped executable.
1393 * Any PMD which was setup after the one which covers _brk_end
1394 * has been zapped already via cleanup_highmem().
1396 all_end
= roundup((unsigned long)_brk_end
, PMD_SIZE
);
1397 set_memory_nx(text_end
, (all_end
- text_end
) >> PAGE_SHIFT
);
1399 set_ftrace_ops_ro();
1401 #ifdef CONFIG_CPA_DEBUG
1402 printk(KERN_INFO
"Testing CPA: undo %lx-%lx\n", start
, end
);
1403 set_memory_rw(start
, (end
-start
) >> PAGE_SHIFT
);
1405 printk(KERN_INFO
"Testing CPA: again\n");
1406 set_memory_ro(start
, (end
-start
) >> PAGE_SHIFT
);
1409 free_kernel_image_pages("unused kernel image (text/rodata gap)",
1410 (void *)text_end
, (void *)rodata_start
);
1411 free_kernel_image_pages("unused kernel image (rodata/data gap)",
1412 (void *)rodata_end
, (void *)_sdata
);
1416 * Block size is the minimum amount of memory which can be hotplugged or
1417 * hotremoved. It must be power of two and must be equal or larger than
1418 * MIN_MEMORY_BLOCK_SIZE.
1420 #define MAX_BLOCK_SIZE (2UL << 30)
1422 /* Amount of ram needed to start using large blocks */
1423 #define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30)
1425 /* Adjustable memory block size */
1426 static unsigned long set_memory_block_size
;
1427 int __init
set_memory_block_size_order(unsigned int order
)
1429 unsigned long size
= 1UL << order
;
1431 if (size
> MEM_SIZE_FOR_LARGE_BLOCK
|| size
< MIN_MEMORY_BLOCK_SIZE
)
1434 set_memory_block_size
= size
;
1438 static unsigned long probe_memory_block_size(void)
1440 unsigned long boot_mem_end
= max_pfn
<< PAGE_SHIFT
;
1443 /* If memory block size has been set, then use it */
1444 bz
= set_memory_block_size
;
1448 /* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */
1449 if (boot_mem_end
< MEM_SIZE_FOR_LARGE_BLOCK
) {
1450 bz
= MIN_MEMORY_BLOCK_SIZE
;
1455 * Use max block size to minimize overhead on bare metal, where
1456 * alignment for memory hotplug isn't a concern.
1458 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
1459 bz
= MAX_BLOCK_SIZE
;
1463 /* Find the largest allowed block size that aligns to memory end */
1464 for (bz
= MAX_BLOCK_SIZE
; bz
> MIN_MEMORY_BLOCK_SIZE
; bz
>>= 1) {
1465 if (IS_ALIGNED(boot_mem_end
, bz
))
1469 pr_info("x86/mm: Memory block size: %ldMB\n", bz
>> 20);
1474 static unsigned long memory_block_size_probed
;
1475 unsigned long memory_block_size_bytes(void)
1477 if (!memory_block_size_probed
)
1478 memory_block_size_probed
= probe_memory_block_size();
1480 return memory_block_size_probed
;
1483 #ifdef CONFIG_SPARSEMEM_VMEMMAP
1485 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
1487 static long __meminitdata addr_start
, addr_end
;
1488 static void __meminitdata
*p_start
, *p_end
;
1489 static int __meminitdata node_start
;
1491 void __meminit
vmemmap_set_pmd(pmd_t
*pmd
, void *p
, int node
,
1492 unsigned long addr
, unsigned long next
)
1496 entry
= pfn_pte(__pa(p
) >> PAGE_SHIFT
,
1498 set_pmd(pmd
, __pmd(pte_val(entry
)));
1500 /* check to see if we have contiguous blocks */
1501 if (p_end
!= p
|| node_start
!= node
) {
1503 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1504 addr_start
, addr_end
-1, p_start
, p_end
-1, node_start
);
1510 addr_end
= addr
+ PMD_SIZE
;
1511 p_end
= p
+ PMD_SIZE
;
1513 if (!IS_ALIGNED(addr
, PMD_SIZE
) ||
1514 !IS_ALIGNED(next
, PMD_SIZE
))
1515 vmemmap_use_new_sub_pmd(addr
, next
);
1518 int __meminit
vmemmap_check_pmd(pmd_t
*pmd
, int node
,
1519 unsigned long addr
, unsigned long next
)
1521 int large
= pmd_leaf(*pmd
);
1523 if (pmd_leaf(*pmd
)) {
1524 vmemmap_verify((pte_t
*)pmd
, node
, addr
, next
);
1525 vmemmap_use_sub_pmd(addr
, next
);
1531 int __meminit
vmemmap_populate(unsigned long start
, unsigned long end
, int node
,
1532 struct vmem_altmap
*altmap
)
1536 VM_BUG_ON(!PAGE_ALIGNED(start
));
1537 VM_BUG_ON(!PAGE_ALIGNED(end
));
1539 if (end
- start
< PAGES_PER_SECTION
* sizeof(struct page
))
1540 err
= vmemmap_populate_basepages(start
, end
, node
, NULL
);
1541 else if (boot_cpu_has(X86_FEATURE_PSE
))
1542 err
= vmemmap_populate_hugepages(start
, end
, node
, altmap
);
1544 pr_err_once("%s: no cpu support for altmap allocations\n",
1548 err
= vmemmap_populate_basepages(start
, end
, node
, NULL
);
1550 sync_global_pgds(start
, end
- 1);
1554 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
1555 void register_page_bootmem_memmap(unsigned long section_nr
,
1556 struct page
*start_page
, unsigned long nr_pages
)
1558 unsigned long addr
= (unsigned long)start_page
;
1559 unsigned long end
= (unsigned long)(start_page
+ nr_pages
);
1565 unsigned int nr_pmd_pages
;
1568 for (; addr
< end
; addr
= next
) {
1571 pgd
= pgd_offset_k(addr
);
1572 if (pgd_none(*pgd
)) {
1573 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
1576 get_page_bootmem(section_nr
, pgd_page(*pgd
), MIX_SECTION_INFO
);
1578 p4d
= p4d_offset(pgd
, addr
);
1579 if (p4d_none(*p4d
)) {
1580 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
1583 get_page_bootmem(section_nr
, p4d_page(*p4d
), MIX_SECTION_INFO
);
1585 pud
= pud_offset(p4d
, addr
);
1586 if (pud_none(*pud
)) {
1587 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
1590 get_page_bootmem(section_nr
, pud_page(*pud
), MIX_SECTION_INFO
);
1592 if (!boot_cpu_has(X86_FEATURE_PSE
)) {
1593 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
1594 pmd
= pmd_offset(pud
, addr
);
1597 get_page_bootmem(section_nr
, pmd_page(*pmd
),
1600 pte
= pte_offset_kernel(pmd
, addr
);
1603 get_page_bootmem(section_nr
, pte_page(*pte
),
1606 next
= pmd_addr_end(addr
, end
);
1608 pmd
= pmd_offset(pud
, addr
);
1612 nr_pmd_pages
= 1 << get_order(PMD_SIZE
);
1613 page
= pmd_page(*pmd
);
1614 while (nr_pmd_pages
--)
1615 get_page_bootmem(section_nr
, page
++,
1622 void __meminit
vmemmap_populate_print_last(void)
1625 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1626 addr_start
, addr_end
-1, p_start
, p_end
-1, node_start
);