1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/x86_64/mm/init.c
5 * Copyright (C) 1995 Linus Torvalds
6 * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz>
7 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/initrd.h>
23 #include <linux/pagemap.h>
24 #include <linux/memblock.h>
25 #include <linux/proc_fs.h>
26 #include <linux/pci.h>
27 #include <linux/pfn.h>
28 #include <linux/poison.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/memory.h>
31 #include <linux/memory_hotplug.h>
32 #include <linux/memremap.h>
33 #include <linux/nmi.h>
34 #include <linux/gfp.h>
35 #include <linux/kcore.h>
37 #include <asm/processor.h>
38 #include <asm/bios_ebda.h>
39 #include <linux/uaccess.h>
40 #include <asm/pgtable.h>
41 #include <asm/pgalloc.h>
43 #include <asm/fixmap.h>
44 #include <asm/e820/api.h>
47 #include <asm/mmu_context.h>
48 #include <asm/proto.h>
50 #include <asm/sections.h>
51 #include <asm/kdebug.h>
53 #include <asm/set_memory.h>
55 #include <asm/uv/uv.h>
56 #include <asm/setup.h>
57 #include <asm/ftrace.h>
59 #include "mm_internal.h"
61 #include "ident_map.c"
63 #define DEFINE_POPULATE(fname, type1, type2, init) \
64 static inline void fname##_init(struct mm_struct *mm, \
65 type1##_t *arg1, type2##_t *arg2, bool init) \
68 fname##_safe(mm, arg1, arg2); \
70 fname(mm, arg1, arg2); \
73 DEFINE_POPULATE(p4d_populate
, p4d
, pud
, init
)
74 DEFINE_POPULATE(pgd_populate
, pgd
, p4d
, init
)
75 DEFINE_POPULATE(pud_populate
, pud
, pmd
, init
)
76 DEFINE_POPULATE(pmd_populate_kernel
, pmd
, pte
, init
)
78 #define DEFINE_ENTRY(type1, type2, init) \
79 static inline void set_##type1##_init(type1##_t *arg1, \
80 type2##_t arg2, bool init) \
83 set_##type1##_safe(arg1, arg2); \
85 set_##type1(arg1, arg2); \
88 DEFINE_ENTRY(p4d
, p4d
, init
)
89 DEFINE_ENTRY(pud
, pud
, init
)
90 DEFINE_ENTRY(pmd
, pmd
, init
)
91 DEFINE_ENTRY(pte
, pte
, init
)
95 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
96 * physical space so we can cache the place of the first one and move
97 * around without checking the pgd every time.
100 /* Bits supported by the hardware: */
101 pteval_t __supported_pte_mask __read_mostly
= ~0;
102 /* Bits allowed in normal kernel mappings: */
103 pteval_t __default_kernel_pte_mask __read_mostly
= ~0;
104 EXPORT_SYMBOL_GPL(__supported_pte_mask
);
105 /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
106 EXPORT_SYMBOL(__default_kernel_pte_mask
);
108 int force_personality32
;
112 * Control non executable heap for 32bit processes.
113 * To control the stack too use noexec=off
115 * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
116 * off PROT_READ implies PROT_EXEC
118 static int __init
nonx32_setup(char *str
)
120 if (!strcmp(str
, "on"))
121 force_personality32
&= ~READ_IMPLIES_EXEC
;
122 else if (!strcmp(str
, "off"))
123 force_personality32
|= READ_IMPLIES_EXEC
;
126 __setup("noexec32=", nonx32_setup
);
128 static void sync_global_pgds_l5(unsigned long start
, unsigned long end
)
132 for (addr
= start
; addr
<= end
; addr
= ALIGN(addr
+ 1, PGDIR_SIZE
)) {
133 const pgd_t
*pgd_ref
= pgd_offset_k(addr
);
136 /* Check for overflow */
140 if (pgd_none(*pgd_ref
))
143 spin_lock(&pgd_lock
);
144 list_for_each_entry(page
, &pgd_list
, lru
) {
146 spinlock_t
*pgt_lock
;
148 pgd
= (pgd_t
*)page_address(page
) + pgd_index(addr
);
149 /* the pgt_lock only for Xen */
150 pgt_lock
= &pgd_page_get_mm(page
)->page_table_lock
;
153 if (!pgd_none(*pgd_ref
) && !pgd_none(*pgd
))
154 BUG_ON(pgd_page_vaddr(*pgd
) != pgd_page_vaddr(*pgd_ref
));
157 set_pgd(pgd
, *pgd_ref
);
159 spin_unlock(pgt_lock
);
161 spin_unlock(&pgd_lock
);
165 static void sync_global_pgds_l4(unsigned long start
, unsigned long end
)
169 for (addr
= start
; addr
<= end
; addr
= ALIGN(addr
+ 1, PGDIR_SIZE
)) {
170 pgd_t
*pgd_ref
= pgd_offset_k(addr
);
171 const p4d_t
*p4d_ref
;
175 * With folded p4d, pgd_none() is always false, we need to
176 * handle synchonization on p4d level.
178 MAYBE_BUILD_BUG_ON(pgd_none(*pgd_ref
));
179 p4d_ref
= p4d_offset(pgd_ref
, addr
);
181 if (p4d_none(*p4d_ref
))
184 spin_lock(&pgd_lock
);
185 list_for_each_entry(page
, &pgd_list
, lru
) {
188 spinlock_t
*pgt_lock
;
190 pgd
= (pgd_t
*)page_address(page
) + pgd_index(addr
);
191 p4d
= p4d_offset(pgd
, addr
);
192 /* the pgt_lock only for Xen */
193 pgt_lock
= &pgd_page_get_mm(page
)->page_table_lock
;
196 if (!p4d_none(*p4d_ref
) && !p4d_none(*p4d
))
197 BUG_ON(p4d_page_vaddr(*p4d
)
198 != p4d_page_vaddr(*p4d_ref
));
201 set_p4d(p4d
, *p4d_ref
);
203 spin_unlock(pgt_lock
);
205 spin_unlock(&pgd_lock
);
210 * When memory was added make sure all the processes MM have
211 * suitable PGD entries in the local PGD level page.
213 void sync_global_pgds(unsigned long start
, unsigned long end
)
215 if (pgtable_l5_enabled())
216 sync_global_pgds_l5(start
, end
);
218 sync_global_pgds_l4(start
, end
);
222 * NOTE: This function is marked __ref because it calls __init function
223 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
225 static __ref
void *spp_getpage(void)
230 ptr
= (void *) get_zeroed_page(GFP_ATOMIC
);
232 ptr
= memblock_alloc(PAGE_SIZE
, PAGE_SIZE
);
234 if (!ptr
|| ((unsigned long)ptr
& ~PAGE_MASK
)) {
235 panic("set_pte_phys: cannot allocate page data %s\n",
236 after_bootmem
? "after bootmem" : "");
239 pr_debug("spp_getpage %p\n", ptr
);
244 static p4d_t
*fill_p4d(pgd_t
*pgd
, unsigned long vaddr
)
246 if (pgd_none(*pgd
)) {
247 p4d_t
*p4d
= (p4d_t
*)spp_getpage();
248 pgd_populate(&init_mm
, pgd
, p4d
);
249 if (p4d
!= p4d_offset(pgd
, 0))
250 printk(KERN_ERR
"PAGETABLE BUG #00! %p <-> %p\n",
251 p4d
, p4d_offset(pgd
, 0));
253 return p4d_offset(pgd
, vaddr
);
256 static pud_t
*fill_pud(p4d_t
*p4d
, unsigned long vaddr
)
258 if (p4d_none(*p4d
)) {
259 pud_t
*pud
= (pud_t
*)spp_getpage();
260 p4d_populate(&init_mm
, p4d
, pud
);
261 if (pud
!= pud_offset(p4d
, 0))
262 printk(KERN_ERR
"PAGETABLE BUG #01! %p <-> %p\n",
263 pud
, pud_offset(p4d
, 0));
265 return pud_offset(p4d
, vaddr
);
268 static pmd_t
*fill_pmd(pud_t
*pud
, unsigned long vaddr
)
270 if (pud_none(*pud
)) {
271 pmd_t
*pmd
= (pmd_t
*) spp_getpage();
272 pud_populate(&init_mm
, pud
, pmd
);
273 if (pmd
!= pmd_offset(pud
, 0))
274 printk(KERN_ERR
"PAGETABLE BUG #02! %p <-> %p\n",
275 pmd
, pmd_offset(pud
, 0));
277 return pmd_offset(pud
, vaddr
);
280 static pte_t
*fill_pte(pmd_t
*pmd
, unsigned long vaddr
)
282 if (pmd_none(*pmd
)) {
283 pte_t
*pte
= (pte_t
*) spp_getpage();
284 pmd_populate_kernel(&init_mm
, pmd
, pte
);
285 if (pte
!= pte_offset_kernel(pmd
, 0))
286 printk(KERN_ERR
"PAGETABLE BUG #03!\n");
288 return pte_offset_kernel(pmd
, vaddr
);
291 static void __set_pte_vaddr(pud_t
*pud
, unsigned long vaddr
, pte_t new_pte
)
293 pmd_t
*pmd
= fill_pmd(pud
, vaddr
);
294 pte_t
*pte
= fill_pte(pmd
, vaddr
);
296 set_pte(pte
, new_pte
);
299 * It's enough to flush this one mapping.
300 * (PGE mappings get flushed as well)
302 __flush_tlb_one_kernel(vaddr
);
305 void set_pte_vaddr_p4d(p4d_t
*p4d_page
, unsigned long vaddr
, pte_t new_pte
)
307 p4d_t
*p4d
= p4d_page
+ p4d_index(vaddr
);
308 pud_t
*pud
= fill_pud(p4d
, vaddr
);
310 __set_pte_vaddr(pud
, vaddr
, new_pte
);
313 void set_pte_vaddr_pud(pud_t
*pud_page
, unsigned long vaddr
, pte_t new_pte
)
315 pud_t
*pud
= pud_page
+ pud_index(vaddr
);
317 __set_pte_vaddr(pud
, vaddr
, new_pte
);
320 void set_pte_vaddr(unsigned long vaddr
, pte_t pteval
)
325 pr_debug("set_pte_vaddr %lx to %lx\n", vaddr
, native_pte_val(pteval
));
327 pgd
= pgd_offset_k(vaddr
);
328 if (pgd_none(*pgd
)) {
330 "PGD FIXMAP MISSING, it should be setup in head.S!\n");
334 p4d_page
= p4d_offset(pgd
, 0);
335 set_pte_vaddr_p4d(p4d_page
, vaddr
, pteval
);
338 pmd_t
* __init
populate_extra_pmd(unsigned long vaddr
)
344 pgd
= pgd_offset_k(vaddr
);
345 p4d
= fill_p4d(pgd
, vaddr
);
346 pud
= fill_pud(p4d
, vaddr
);
347 return fill_pmd(pud
, vaddr
);
350 pte_t
* __init
populate_extra_pte(unsigned long vaddr
)
354 pmd
= populate_extra_pmd(vaddr
);
355 return fill_pte(pmd
, vaddr
);
359 * Create large page table mappings for a range of physical addresses.
361 static void __init
__init_extra_mapping(unsigned long phys
, unsigned long size
,
362 enum page_cache_mode cache
)
370 pgprot_val(prot
) = pgprot_val(PAGE_KERNEL_LARGE
) |
371 pgprot_val(pgprot_4k_2_large(cachemode2pgprot(cache
)));
372 BUG_ON((phys
& ~PMD_MASK
) || (size
& ~PMD_MASK
));
373 for (; size
; phys
+= PMD_SIZE
, size
-= PMD_SIZE
) {
374 pgd
= pgd_offset_k((unsigned long)__va(phys
));
375 if (pgd_none(*pgd
)) {
376 p4d
= (p4d_t
*) spp_getpage();
377 set_pgd(pgd
, __pgd(__pa(p4d
) | _KERNPG_TABLE
|
380 p4d
= p4d_offset(pgd
, (unsigned long)__va(phys
));
381 if (p4d_none(*p4d
)) {
382 pud
= (pud_t
*) spp_getpage();
383 set_p4d(p4d
, __p4d(__pa(pud
) | _KERNPG_TABLE
|
386 pud
= pud_offset(p4d
, (unsigned long)__va(phys
));
387 if (pud_none(*pud
)) {
388 pmd
= (pmd_t
*) spp_getpage();
389 set_pud(pud
, __pud(__pa(pmd
) | _KERNPG_TABLE
|
392 pmd
= pmd_offset(pud
, phys
);
393 BUG_ON(!pmd_none(*pmd
));
394 set_pmd(pmd
, __pmd(phys
| pgprot_val(prot
)));
398 void __init
init_extra_mapping_wb(unsigned long phys
, unsigned long size
)
400 __init_extra_mapping(phys
, size
, _PAGE_CACHE_MODE_WB
);
403 void __init
init_extra_mapping_uc(unsigned long phys
, unsigned long size
)
405 __init_extra_mapping(phys
, size
, _PAGE_CACHE_MODE_UC
);
409 * The head.S code sets up the kernel high mapping:
411 * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
413 * phys_base holds the negative offset to the kernel, which is added
414 * to the compile time generated pmds. This results in invalid pmds up
415 * to the point where we hit the physaddr 0 mapping.
417 * We limit the mappings to the region from _text to _brk_end. _brk_end
418 * is rounded up to the 2MB boundary. This catches the invalid pmds as
419 * well, as they are located before _text:
421 void __init
cleanup_highmap(void)
423 unsigned long vaddr
= __START_KERNEL_map
;
424 unsigned long vaddr_end
= __START_KERNEL_map
+ KERNEL_IMAGE_SIZE
;
425 unsigned long end
= roundup((unsigned long)_brk_end
, PMD_SIZE
) - 1;
426 pmd_t
*pmd
= level2_kernel_pgt
;
429 * Native path, max_pfn_mapped is not set yet.
430 * Xen has valid max_pfn_mapped set in
431 * arch/x86/xen/mmu.c:xen_setup_kernel_pagetable().
434 vaddr_end
= __START_KERNEL_map
+ (max_pfn_mapped
<< PAGE_SHIFT
);
436 for (; vaddr
+ PMD_SIZE
- 1 < vaddr_end
; pmd
++, vaddr
+= PMD_SIZE
) {
439 if (vaddr
< (unsigned long) _text
|| vaddr
> end
)
440 set_pmd(pmd
, __pmd(0));
445 * Create PTE level page table mapping for physical addresses.
446 * It returns the last physical address mapped.
448 static unsigned long __meminit
449 phys_pte_init(pte_t
*pte_page
, unsigned long paddr
, unsigned long paddr_end
,
450 pgprot_t prot
, bool init
)
452 unsigned long pages
= 0, paddr_next
;
453 unsigned long paddr_last
= paddr_end
;
457 pte
= pte_page
+ pte_index(paddr
);
458 i
= pte_index(paddr
);
460 for (; i
< PTRS_PER_PTE
; i
++, paddr
= paddr_next
, pte
++) {
461 paddr_next
= (paddr
& PAGE_MASK
) + PAGE_SIZE
;
462 if (paddr
>= paddr_end
) {
463 if (!after_bootmem
&&
464 !e820__mapped_any(paddr
& PAGE_MASK
, paddr_next
,
466 !e820__mapped_any(paddr
& PAGE_MASK
, paddr_next
,
467 E820_TYPE_RESERVED_KERN
))
468 set_pte_init(pte
, __pte(0), init
);
473 * We will re-use the existing mapping.
474 * Xen for example has some special requirements, like mapping
475 * pagetable pages as RO. So assume someone who pre-setup
476 * these mappings are more intelligent.
478 if (!pte_none(*pte
)) {
485 pr_info(" pte=%p addr=%lx pte=%016lx\n", pte
, paddr
,
486 pfn_pte(paddr
>> PAGE_SHIFT
, PAGE_KERNEL
).pte
);
488 set_pte_init(pte
, pfn_pte(paddr
>> PAGE_SHIFT
, prot
), init
);
489 paddr_last
= (paddr
& PAGE_MASK
) + PAGE_SIZE
;
492 update_page_count(PG_LEVEL_4K
, pages
);
498 * Create PMD level page table mapping for physical addresses. The virtual
499 * and physical address have to be aligned at this level.
500 * It returns the last physical address mapped.
502 static unsigned long __meminit
503 phys_pmd_init(pmd_t
*pmd_page
, unsigned long paddr
, unsigned long paddr_end
,
504 unsigned long page_size_mask
, pgprot_t prot
, bool init
)
506 unsigned long pages
= 0, paddr_next
;
507 unsigned long paddr_last
= paddr_end
;
509 int i
= pmd_index(paddr
);
511 for (; i
< PTRS_PER_PMD
; i
++, paddr
= paddr_next
) {
512 pmd_t
*pmd
= pmd_page
+ pmd_index(paddr
);
514 pgprot_t new_prot
= prot
;
516 paddr_next
= (paddr
& PMD_MASK
) + PMD_SIZE
;
517 if (paddr
>= paddr_end
) {
518 if (!after_bootmem
&&
519 !e820__mapped_any(paddr
& PMD_MASK
, paddr_next
,
521 !e820__mapped_any(paddr
& PMD_MASK
, paddr_next
,
522 E820_TYPE_RESERVED_KERN
))
523 set_pmd_init(pmd
, __pmd(0), init
);
527 if (!pmd_none(*pmd
)) {
528 if (!pmd_large(*pmd
)) {
529 spin_lock(&init_mm
.page_table_lock
);
530 pte
= (pte_t
*)pmd_page_vaddr(*pmd
);
531 paddr_last
= phys_pte_init(pte
, paddr
,
534 spin_unlock(&init_mm
.page_table_lock
);
538 * If we are ok with PG_LEVEL_2M mapping, then we will
539 * use the existing mapping,
541 * Otherwise, we will split the large page mapping but
542 * use the same existing protection bits except for
543 * large page, so that we don't violate Intel's TLB
544 * Application note (317080) which says, while changing
545 * the page sizes, new and old translations should
546 * not differ with respect to page frame and
549 if (page_size_mask
& (1 << PG_LEVEL_2M
)) {
552 paddr_last
= paddr_next
;
555 new_prot
= pte_pgprot(pte_clrhuge(*(pte_t
*)pmd
));
558 if (page_size_mask
& (1<<PG_LEVEL_2M
)) {
560 spin_lock(&init_mm
.page_table_lock
);
561 set_pte_init((pte_t
*)pmd
,
562 pfn_pte((paddr
& PMD_MASK
) >> PAGE_SHIFT
,
563 __pgprot(pgprot_val(prot
) | _PAGE_PSE
)),
565 spin_unlock(&init_mm
.page_table_lock
);
566 paddr_last
= paddr_next
;
570 pte
= alloc_low_page();
571 paddr_last
= phys_pte_init(pte
, paddr
, paddr_end
, new_prot
, init
);
573 spin_lock(&init_mm
.page_table_lock
);
574 pmd_populate_kernel_init(&init_mm
, pmd
, pte
, init
);
575 spin_unlock(&init_mm
.page_table_lock
);
577 update_page_count(PG_LEVEL_2M
, pages
);
582 * Create PUD level page table mapping for physical addresses. The virtual
583 * and physical address do not have to be aligned at this level. KASLR can
584 * randomize virtual addresses up to this level.
585 * It returns the last physical address mapped.
587 static unsigned long __meminit
588 phys_pud_init(pud_t
*pud_page
, unsigned long paddr
, unsigned long paddr_end
,
589 unsigned long page_size_mask
, pgprot_t _prot
, bool init
)
591 unsigned long pages
= 0, paddr_next
;
592 unsigned long paddr_last
= paddr_end
;
593 unsigned long vaddr
= (unsigned long)__va(paddr
);
594 int i
= pud_index(vaddr
);
596 for (; i
< PTRS_PER_PUD
; i
++, paddr
= paddr_next
) {
599 pgprot_t prot
= _prot
;
601 vaddr
= (unsigned long)__va(paddr
);
602 pud
= pud_page
+ pud_index(vaddr
);
603 paddr_next
= (paddr
& PUD_MASK
) + PUD_SIZE
;
605 if (paddr
>= paddr_end
) {
606 if (!after_bootmem
&&
607 !e820__mapped_any(paddr
& PUD_MASK
, paddr_next
,
609 !e820__mapped_any(paddr
& PUD_MASK
, paddr_next
,
610 E820_TYPE_RESERVED_KERN
))
611 set_pud_init(pud
, __pud(0), init
);
615 if (!pud_none(*pud
)) {
616 if (!pud_large(*pud
)) {
617 pmd
= pmd_offset(pud
, 0);
618 paddr_last
= phys_pmd_init(pmd
, paddr
,
625 * If we are ok with PG_LEVEL_1G mapping, then we will
626 * use the existing mapping.
628 * Otherwise, we will split the gbpage mapping but use
629 * the same existing protection bits except for large
630 * page, so that we don't violate Intel's TLB
631 * Application note (317080) which says, while changing
632 * the page sizes, new and old translations should
633 * not differ with respect to page frame and
636 if (page_size_mask
& (1 << PG_LEVEL_1G
)) {
639 paddr_last
= paddr_next
;
642 prot
= pte_pgprot(pte_clrhuge(*(pte_t
*)pud
));
645 if (page_size_mask
& (1<<PG_LEVEL_1G
)) {
647 spin_lock(&init_mm
.page_table_lock
);
649 prot
= __pgprot(pgprot_val(prot
) | __PAGE_KERNEL_LARGE
);
651 set_pte_init((pte_t
*)pud
,
652 pfn_pte((paddr
& PUD_MASK
) >> PAGE_SHIFT
,
655 spin_unlock(&init_mm
.page_table_lock
);
656 paddr_last
= paddr_next
;
660 pmd
= alloc_low_page();
661 paddr_last
= phys_pmd_init(pmd
, paddr
, paddr_end
,
662 page_size_mask
, prot
, init
);
664 spin_lock(&init_mm
.page_table_lock
);
665 pud_populate_init(&init_mm
, pud
, pmd
, init
);
666 spin_unlock(&init_mm
.page_table_lock
);
669 update_page_count(PG_LEVEL_1G
, pages
);
674 static unsigned long __meminit
675 phys_p4d_init(p4d_t
*p4d_page
, unsigned long paddr
, unsigned long paddr_end
,
676 unsigned long page_size_mask
, pgprot_t prot
, bool init
)
678 unsigned long vaddr
, vaddr_end
, vaddr_next
, paddr_next
, paddr_last
;
680 paddr_last
= paddr_end
;
681 vaddr
= (unsigned long)__va(paddr
);
682 vaddr_end
= (unsigned long)__va(paddr_end
);
684 if (!pgtable_l5_enabled())
685 return phys_pud_init((pud_t
*) p4d_page
, paddr
, paddr_end
,
686 page_size_mask
, prot
, init
);
688 for (; vaddr
< vaddr_end
; vaddr
= vaddr_next
) {
689 p4d_t
*p4d
= p4d_page
+ p4d_index(vaddr
);
692 vaddr_next
= (vaddr
& P4D_MASK
) + P4D_SIZE
;
695 if (paddr
>= paddr_end
) {
696 paddr_next
= __pa(vaddr_next
);
697 if (!after_bootmem
&&
698 !e820__mapped_any(paddr
& P4D_MASK
, paddr_next
,
700 !e820__mapped_any(paddr
& P4D_MASK
, paddr_next
,
701 E820_TYPE_RESERVED_KERN
))
702 set_p4d_init(p4d
, __p4d(0), init
);
706 if (!p4d_none(*p4d
)) {
707 pud
= pud_offset(p4d
, 0);
708 paddr_last
= phys_pud_init(pud
, paddr
, __pa(vaddr_end
),
709 page_size_mask
, prot
, init
);
713 pud
= alloc_low_page();
714 paddr_last
= phys_pud_init(pud
, paddr
, __pa(vaddr_end
),
715 page_size_mask
, prot
, init
);
717 spin_lock(&init_mm
.page_table_lock
);
718 p4d_populate_init(&init_mm
, p4d
, pud
, init
);
719 spin_unlock(&init_mm
.page_table_lock
);
725 static unsigned long __meminit
726 __kernel_physical_mapping_init(unsigned long paddr_start
,
727 unsigned long paddr_end
,
728 unsigned long page_size_mask
,
729 pgprot_t prot
, bool init
)
731 bool pgd_changed
= false;
732 unsigned long vaddr
, vaddr_start
, vaddr_end
, vaddr_next
, paddr_last
;
734 paddr_last
= paddr_end
;
735 vaddr
= (unsigned long)__va(paddr_start
);
736 vaddr_end
= (unsigned long)__va(paddr_end
);
739 for (; vaddr
< vaddr_end
; vaddr
= vaddr_next
) {
740 pgd_t
*pgd
= pgd_offset_k(vaddr
);
743 vaddr_next
= (vaddr
& PGDIR_MASK
) + PGDIR_SIZE
;
746 p4d
= (p4d_t
*)pgd_page_vaddr(*pgd
);
747 paddr_last
= phys_p4d_init(p4d
, __pa(vaddr
),
754 p4d
= alloc_low_page();
755 paddr_last
= phys_p4d_init(p4d
, __pa(vaddr
), __pa(vaddr_end
),
756 page_size_mask
, prot
, init
);
758 spin_lock(&init_mm
.page_table_lock
);
759 if (pgtable_l5_enabled())
760 pgd_populate_init(&init_mm
, pgd
, p4d
, init
);
762 p4d_populate_init(&init_mm
, p4d_offset(pgd
, vaddr
),
763 (pud_t
*) p4d
, init
);
765 spin_unlock(&init_mm
.page_table_lock
);
770 sync_global_pgds(vaddr_start
, vaddr_end
- 1);
777 * Create page table mapping for the physical memory for specific physical
778 * addresses. Note that it can only be used to populate non-present entries.
779 * The virtual and physical addresses have to be aligned on PMD level
780 * down. It returns the last physical address mapped.
782 unsigned long __meminit
783 kernel_physical_mapping_init(unsigned long paddr_start
,
784 unsigned long paddr_end
,
785 unsigned long page_size_mask
, pgprot_t prot
)
787 return __kernel_physical_mapping_init(paddr_start
, paddr_end
,
788 page_size_mask
, prot
, true);
792 * This function is similar to kernel_physical_mapping_init() above with the
793 * exception that it uses set_{pud,pmd}() instead of the set_{pud,pte}_safe()
794 * when updating the mapping. The caller is responsible to flush the TLBs after
795 * the function returns.
797 unsigned long __meminit
798 kernel_physical_mapping_change(unsigned long paddr_start
,
799 unsigned long paddr_end
,
800 unsigned long page_size_mask
)
802 return __kernel_physical_mapping_init(paddr_start
, paddr_end
,
803 page_size_mask
, PAGE_KERNEL
,
808 void __init
initmem_init(void)
810 memblock_set_node(0, PHYS_ADDR_MAX
, &memblock
.memory
, 0);
814 void __init
paging_init(void)
816 sparse_memory_present_with_active_regions(MAX_NUMNODES
);
820 * clear the default setting with node 0
821 * note: don't use nodes_clear here, that is really clearing when
822 * numa support is not compiled in, and later node_set_state
823 * will not set it back.
825 node_clear_state(0, N_MEMORY
);
826 node_clear_state(0, N_NORMAL_MEMORY
);
832 * Memory hotplug specific functions
834 #ifdef CONFIG_MEMORY_HOTPLUG
836 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
839 static void update_end_of_memory_vars(u64 start
, u64 size
)
841 unsigned long end_pfn
= PFN_UP(start
+ size
);
843 if (end_pfn
> max_pfn
) {
845 max_low_pfn
= end_pfn
;
846 high_memory
= (void *)__va(max_pfn
* PAGE_SIZE
- 1) + 1;
850 int add_pages(int nid
, unsigned long start_pfn
, unsigned long nr_pages
,
851 struct mhp_params
*params
)
855 ret
= __add_pages(nid
, start_pfn
, nr_pages
, params
);
858 /* update max_pfn, max_low_pfn and high_memory */
859 update_end_of_memory_vars(start_pfn
<< PAGE_SHIFT
,
860 nr_pages
<< PAGE_SHIFT
);
865 int arch_add_memory(int nid
, u64 start
, u64 size
,
866 struct mhp_params
*params
)
868 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
869 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
871 init_memory_mapping(start
, start
+ size
, params
->pgprot
);
873 return add_pages(nid
, start_pfn
, nr_pages
, params
);
876 #define PAGE_INUSE 0xFD
878 static void __meminit
free_pagetable(struct page
*page
, int order
)
881 unsigned int nr_pages
= 1 << order
;
883 /* bootmem page has reserved flag */
884 if (PageReserved(page
)) {
885 __ClearPageReserved(page
);
887 magic
= (unsigned long)page
->freelist
;
888 if (magic
== SECTION_INFO
|| magic
== MIX_SECTION_INFO
) {
890 put_page_bootmem(page
++);
893 free_reserved_page(page
++);
895 free_pages((unsigned long)page_address(page
), order
);
898 static void __meminit
free_hugepage_table(struct page
*page
,
899 struct vmem_altmap
*altmap
)
902 vmem_altmap_free(altmap
, PMD_SIZE
/ PAGE_SIZE
);
904 free_pagetable(page
, get_order(PMD_SIZE
));
907 static void __meminit
free_pte_table(pte_t
*pte_start
, pmd_t
*pmd
)
912 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
918 /* free a pte talbe */
919 free_pagetable(pmd_page(*pmd
), 0);
920 spin_lock(&init_mm
.page_table_lock
);
922 spin_unlock(&init_mm
.page_table_lock
);
925 static void __meminit
free_pmd_table(pmd_t
*pmd_start
, pud_t
*pud
)
930 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
936 /* free a pmd talbe */
937 free_pagetable(pud_page(*pud
), 0);
938 spin_lock(&init_mm
.page_table_lock
);
940 spin_unlock(&init_mm
.page_table_lock
);
943 static void __meminit
free_pud_table(pud_t
*pud_start
, p4d_t
*p4d
)
948 for (i
= 0; i
< PTRS_PER_PUD
; i
++) {
954 /* free a pud talbe */
955 free_pagetable(p4d_page(*p4d
), 0);
956 spin_lock(&init_mm
.page_table_lock
);
958 spin_unlock(&init_mm
.page_table_lock
);
961 static void __meminit
962 remove_pte_table(pte_t
*pte_start
, unsigned long addr
, unsigned long end
,
965 unsigned long next
, pages
= 0;
968 phys_addr_t phys_addr
;
970 pte
= pte_start
+ pte_index(addr
);
971 for (; addr
< end
; addr
= next
, pte
++) {
972 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
976 if (!pte_present(*pte
))
980 * We mapped [0,1G) memory as identity mapping when
981 * initializing, in arch/x86/kernel/head_64.S. These
982 * pagetables cannot be removed.
984 phys_addr
= pte_val(*pte
) + (addr
& PAGE_MASK
);
985 if (phys_addr
< (phys_addr_t
)0x40000000)
988 if (PAGE_ALIGNED(addr
) && PAGE_ALIGNED(next
)) {
990 * Do not free direct mapping pages since they were
991 * freed when offlining, or simplely not in use.
994 free_pagetable(pte_page(*pte
), 0);
996 spin_lock(&init_mm
.page_table_lock
);
997 pte_clear(&init_mm
, addr
, pte
);
998 spin_unlock(&init_mm
.page_table_lock
);
1000 /* For non-direct mapping, pages means nothing. */
1004 * If we are here, we are freeing vmemmap pages since
1005 * direct mapped memory ranges to be freed are aligned.
1007 * If we are not removing the whole page, it means
1008 * other page structs in this page are being used and
1009 * we canot remove them. So fill the unused page_structs
1010 * with 0xFD, and remove the page when it is wholly
1013 memset((void *)addr
, PAGE_INUSE
, next
- addr
);
1015 page_addr
= page_address(pte_page(*pte
));
1016 if (!memchr_inv(page_addr
, PAGE_INUSE
, PAGE_SIZE
)) {
1017 free_pagetable(pte_page(*pte
), 0);
1019 spin_lock(&init_mm
.page_table_lock
);
1020 pte_clear(&init_mm
, addr
, pte
);
1021 spin_unlock(&init_mm
.page_table_lock
);
1026 /* Call free_pte_table() in remove_pmd_table(). */
1029 update_page_count(PG_LEVEL_4K
, -pages
);
1032 static void __meminit
1033 remove_pmd_table(pmd_t
*pmd_start
, unsigned long addr
, unsigned long end
,
1034 bool direct
, struct vmem_altmap
*altmap
)
1036 unsigned long next
, pages
= 0;
1041 pmd
= pmd_start
+ pmd_index(addr
);
1042 for (; addr
< end
; addr
= next
, pmd
++) {
1043 next
= pmd_addr_end(addr
, end
);
1045 if (!pmd_present(*pmd
))
1048 if (pmd_large(*pmd
)) {
1049 if (IS_ALIGNED(addr
, PMD_SIZE
) &&
1050 IS_ALIGNED(next
, PMD_SIZE
)) {
1052 free_hugepage_table(pmd_page(*pmd
),
1055 spin_lock(&init_mm
.page_table_lock
);
1057 spin_unlock(&init_mm
.page_table_lock
);
1060 /* If here, we are freeing vmemmap pages. */
1061 memset((void *)addr
, PAGE_INUSE
, next
- addr
);
1063 page_addr
= page_address(pmd_page(*pmd
));
1064 if (!memchr_inv(page_addr
, PAGE_INUSE
,
1066 free_hugepage_table(pmd_page(*pmd
),
1069 spin_lock(&init_mm
.page_table_lock
);
1071 spin_unlock(&init_mm
.page_table_lock
);
1078 pte_base
= (pte_t
*)pmd_page_vaddr(*pmd
);
1079 remove_pte_table(pte_base
, addr
, next
, direct
);
1080 free_pte_table(pte_base
, pmd
);
1083 /* Call free_pmd_table() in remove_pud_table(). */
1085 update_page_count(PG_LEVEL_2M
, -pages
);
1088 static void __meminit
1089 remove_pud_table(pud_t
*pud_start
, unsigned long addr
, unsigned long end
,
1090 struct vmem_altmap
*altmap
, bool direct
)
1092 unsigned long next
, pages
= 0;
1097 pud
= pud_start
+ pud_index(addr
);
1098 for (; addr
< end
; addr
= next
, pud
++) {
1099 next
= pud_addr_end(addr
, end
);
1101 if (!pud_present(*pud
))
1104 if (pud_large(*pud
)) {
1105 if (IS_ALIGNED(addr
, PUD_SIZE
) &&
1106 IS_ALIGNED(next
, PUD_SIZE
)) {
1108 free_pagetable(pud_page(*pud
),
1109 get_order(PUD_SIZE
));
1111 spin_lock(&init_mm
.page_table_lock
);
1113 spin_unlock(&init_mm
.page_table_lock
);
1116 /* If here, we are freeing vmemmap pages. */
1117 memset((void *)addr
, PAGE_INUSE
, next
- addr
);
1119 page_addr
= page_address(pud_page(*pud
));
1120 if (!memchr_inv(page_addr
, PAGE_INUSE
,
1122 free_pagetable(pud_page(*pud
),
1123 get_order(PUD_SIZE
));
1125 spin_lock(&init_mm
.page_table_lock
);
1127 spin_unlock(&init_mm
.page_table_lock
);
1134 pmd_base
= pmd_offset(pud
, 0);
1135 remove_pmd_table(pmd_base
, addr
, next
, direct
, altmap
);
1136 free_pmd_table(pmd_base
, pud
);
1140 update_page_count(PG_LEVEL_1G
, -pages
);
1143 static void __meminit
1144 remove_p4d_table(p4d_t
*p4d_start
, unsigned long addr
, unsigned long end
,
1145 struct vmem_altmap
*altmap
, bool direct
)
1147 unsigned long next
, pages
= 0;
1151 p4d
= p4d_start
+ p4d_index(addr
);
1152 for (; addr
< end
; addr
= next
, p4d
++) {
1153 next
= p4d_addr_end(addr
, end
);
1155 if (!p4d_present(*p4d
))
1158 BUILD_BUG_ON(p4d_large(*p4d
));
1160 pud_base
= pud_offset(p4d
, 0);
1161 remove_pud_table(pud_base
, addr
, next
, altmap
, direct
);
1163 * For 4-level page tables we do not want to free PUDs, but in the
1164 * 5-level case we should free them. This code will have to change
1165 * to adapt for boot-time switching between 4 and 5 level page tables.
1167 if (pgtable_l5_enabled())
1168 free_pud_table(pud_base
, p4d
);
1172 update_page_count(PG_LEVEL_512G
, -pages
);
1175 /* start and end are both virtual address. */
1176 static void __meminit
1177 remove_pagetable(unsigned long start
, unsigned long end
, bool direct
,
1178 struct vmem_altmap
*altmap
)
1185 for (addr
= start
; addr
< end
; addr
= next
) {
1186 next
= pgd_addr_end(addr
, end
);
1188 pgd
= pgd_offset_k(addr
);
1189 if (!pgd_present(*pgd
))
1192 p4d
= p4d_offset(pgd
, 0);
1193 remove_p4d_table(p4d
, addr
, next
, altmap
, direct
);
1199 void __ref
vmemmap_free(unsigned long start
, unsigned long end
,
1200 struct vmem_altmap
*altmap
)
1202 remove_pagetable(start
, end
, false, altmap
);
1205 static void __meminit
1206 kernel_physical_mapping_remove(unsigned long start
, unsigned long end
)
1208 start
= (unsigned long)__va(start
);
1209 end
= (unsigned long)__va(end
);
1211 remove_pagetable(start
, end
, true, NULL
);
1214 void __ref
arch_remove_memory(int nid
, u64 start
, u64 size
,
1215 struct vmem_altmap
*altmap
)
1217 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
1218 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
1220 __remove_pages(start_pfn
, nr_pages
, altmap
);
1221 kernel_physical_mapping_remove(start
, start
+ size
);
1223 #endif /* CONFIG_MEMORY_HOTPLUG */
1225 static struct kcore_list kcore_vsyscall
;
1227 static void __init
register_page_bootmem_info(void)
1232 for_each_online_node(i
)
1233 register_page_bootmem_info_node(NODE_DATA(i
));
1237 void __init
mem_init(void)
1241 /* clear_bss() already clear the empty_zero_page */
1243 /* this will put all memory onto the freelists */
1244 memblock_free_all();
1246 x86_init
.hyper
.init_after_bootmem();
1249 * Must be done after boot memory is put on freelist, because here we
1250 * might set fields in deferred struct pages that have not yet been
1251 * initialized, and memblock_free_all() initializes all the reserved
1252 * deferred pages for us.
1254 register_page_bootmem_info();
1256 /* Register memory areas for /proc/kcore */
1257 if (get_gate_vma(&init_mm
))
1258 kclist_add(&kcore_vsyscall
, (void *)VSYSCALL_ADDR
, PAGE_SIZE
, KCORE_USER
);
1260 mem_init_print_info(NULL
);
1263 int kernel_set_to_readonly
;
1265 void mark_rodata_ro(void)
1267 unsigned long start
= PFN_ALIGN(_text
);
1268 unsigned long rodata_start
= PFN_ALIGN(__start_rodata
);
1269 unsigned long end
= (unsigned long)__end_rodata_hpage_align
;
1270 unsigned long text_end
= PFN_ALIGN(_etext
);
1271 unsigned long rodata_end
= PFN_ALIGN(__end_rodata
);
1272 unsigned long all_end
;
1274 printk(KERN_INFO
"Write protecting the kernel read-only data: %luk\n",
1275 (end
- start
) >> 10);
1276 set_memory_ro(start
, (end
- start
) >> PAGE_SHIFT
);
1278 kernel_set_to_readonly
= 1;
1281 * The rodata/data/bss/brk section (but not the kernel text!)
1282 * should also be not-executable.
1284 * We align all_end to PMD_SIZE because the existing mapping
1285 * is a full PMD. If we would align _brk_end to PAGE_SIZE we
1286 * split the PMD and the reminder between _brk_end and the end
1287 * of the PMD will remain mapped executable.
1289 * Any PMD which was setup after the one which covers _brk_end
1290 * has been zapped already via cleanup_highmem().
1292 all_end
= roundup((unsigned long)_brk_end
, PMD_SIZE
);
1293 set_memory_nx(text_end
, (all_end
- text_end
) >> PAGE_SHIFT
);
1295 set_ftrace_ops_ro();
1297 #ifdef CONFIG_CPA_DEBUG
1298 printk(KERN_INFO
"Testing CPA: undo %lx-%lx\n", start
, end
);
1299 set_memory_rw(start
, (end
-start
) >> PAGE_SHIFT
);
1301 printk(KERN_INFO
"Testing CPA: again\n");
1302 set_memory_ro(start
, (end
-start
) >> PAGE_SHIFT
);
1305 free_kernel_image_pages("unused kernel image (text/rodata gap)",
1306 (void *)text_end
, (void *)rodata_start
);
1307 free_kernel_image_pages("unused kernel image (rodata/data gap)",
1308 (void *)rodata_end
, (void *)_sdata
);
1313 int kern_addr_valid(unsigned long addr
)
1315 unsigned long above
= ((long)addr
) >> __VIRTUAL_MASK_SHIFT
;
1322 if (above
!= 0 && above
!= -1UL)
1325 pgd
= pgd_offset_k(addr
);
1329 p4d
= p4d_offset(pgd
, addr
);
1333 pud
= pud_offset(p4d
, addr
);
1337 if (pud_large(*pud
))
1338 return pfn_valid(pud_pfn(*pud
));
1340 pmd
= pmd_offset(pud
, addr
);
1344 if (pmd_large(*pmd
))
1345 return pfn_valid(pmd_pfn(*pmd
));
1347 pte
= pte_offset_kernel(pmd
, addr
);
1351 return pfn_valid(pte_pfn(*pte
));
1355 * Block size is the minimum amount of memory which can be hotplugged or
1356 * hotremoved. It must be power of two and must be equal or larger than
1357 * MIN_MEMORY_BLOCK_SIZE.
1359 #define MAX_BLOCK_SIZE (2UL << 30)
1361 /* Amount of ram needed to start using large blocks */
1362 #define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30)
1364 /* Adjustable memory block size */
1365 static unsigned long set_memory_block_size
;
1366 int __init
set_memory_block_size_order(unsigned int order
)
1368 unsigned long size
= 1UL << order
;
1370 if (size
> MEM_SIZE_FOR_LARGE_BLOCK
|| size
< MIN_MEMORY_BLOCK_SIZE
)
1373 set_memory_block_size
= size
;
1377 static unsigned long probe_memory_block_size(void)
1379 unsigned long boot_mem_end
= max_pfn
<< PAGE_SHIFT
;
1382 /* If memory block size has been set, then use it */
1383 bz
= set_memory_block_size
;
1387 /* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */
1388 if (boot_mem_end
< MEM_SIZE_FOR_LARGE_BLOCK
) {
1389 bz
= MIN_MEMORY_BLOCK_SIZE
;
1393 /* Find the largest allowed block size that aligns to memory end */
1394 for (bz
= MAX_BLOCK_SIZE
; bz
> MIN_MEMORY_BLOCK_SIZE
; bz
>>= 1) {
1395 if (IS_ALIGNED(boot_mem_end
, bz
))
1399 pr_info("x86/mm: Memory block size: %ldMB\n", bz
>> 20);
1404 static unsigned long memory_block_size_probed
;
1405 unsigned long memory_block_size_bytes(void)
1407 if (!memory_block_size_probed
)
1408 memory_block_size_probed
= probe_memory_block_size();
1410 return memory_block_size_probed
;
1413 #ifdef CONFIG_SPARSEMEM_VMEMMAP
1415 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
1417 static long __meminitdata addr_start
, addr_end
;
1418 static void __meminitdata
*p_start
, *p_end
;
1419 static int __meminitdata node_start
;
1421 static int __meminit
vmemmap_populate_hugepages(unsigned long start
,
1422 unsigned long end
, int node
, struct vmem_altmap
*altmap
)
1431 for (addr
= start
; addr
< end
; addr
= next
) {
1432 next
= pmd_addr_end(addr
, end
);
1434 pgd
= vmemmap_pgd_populate(addr
, node
);
1438 p4d
= vmemmap_p4d_populate(pgd
, addr
, node
);
1442 pud
= vmemmap_pud_populate(p4d
, addr
, node
);
1446 pmd
= pmd_offset(pud
, addr
);
1447 if (pmd_none(*pmd
)) {
1451 p
= altmap_alloc_block_buf(PMD_SIZE
, altmap
);
1453 p
= vmemmap_alloc_block_buf(PMD_SIZE
, node
);
1457 entry
= pfn_pte(__pa(p
) >> PAGE_SHIFT
,
1459 set_pmd(pmd
, __pmd(pte_val(entry
)));
1461 /* check to see if we have contiguous blocks */
1462 if (p_end
!= p
|| node_start
!= node
) {
1464 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1465 addr_start
, addr_end
-1, p_start
, p_end
-1, node_start
);
1471 addr_end
= addr
+ PMD_SIZE
;
1472 p_end
= p
+ PMD_SIZE
;
1475 return -ENOMEM
; /* no fallback */
1476 } else if (pmd_large(*pmd
)) {
1477 vmemmap_verify((pte_t
*)pmd
, node
, addr
, next
);
1480 if (vmemmap_populate_basepages(addr
, next
, node
))
1486 int __meminit
vmemmap_populate(unsigned long start
, unsigned long end
, int node
,
1487 struct vmem_altmap
*altmap
)
1491 if (end
- start
< PAGES_PER_SECTION
* sizeof(struct page
))
1492 err
= vmemmap_populate_basepages(start
, end
, node
);
1493 else if (boot_cpu_has(X86_FEATURE_PSE
))
1494 err
= vmemmap_populate_hugepages(start
, end
, node
, altmap
);
1496 pr_err_once("%s: no cpu support for altmap allocations\n",
1500 err
= vmemmap_populate_basepages(start
, end
, node
);
1502 sync_global_pgds(start
, end
- 1);
1506 #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
1507 void register_page_bootmem_memmap(unsigned long section_nr
,
1508 struct page
*start_page
, unsigned long nr_pages
)
1510 unsigned long addr
= (unsigned long)start_page
;
1511 unsigned long end
= (unsigned long)(start_page
+ nr_pages
);
1517 unsigned int nr_pmd_pages
;
1520 for (; addr
< end
; addr
= next
) {
1523 pgd
= pgd_offset_k(addr
);
1524 if (pgd_none(*pgd
)) {
1525 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
1528 get_page_bootmem(section_nr
, pgd_page(*pgd
), MIX_SECTION_INFO
);
1530 p4d
= p4d_offset(pgd
, addr
);
1531 if (p4d_none(*p4d
)) {
1532 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
1535 get_page_bootmem(section_nr
, p4d_page(*p4d
), MIX_SECTION_INFO
);
1537 pud
= pud_offset(p4d
, addr
);
1538 if (pud_none(*pud
)) {
1539 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
1542 get_page_bootmem(section_nr
, pud_page(*pud
), MIX_SECTION_INFO
);
1544 if (!boot_cpu_has(X86_FEATURE_PSE
)) {
1545 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
1546 pmd
= pmd_offset(pud
, addr
);
1549 get_page_bootmem(section_nr
, pmd_page(*pmd
),
1552 pte
= pte_offset_kernel(pmd
, addr
);
1555 get_page_bootmem(section_nr
, pte_page(*pte
),
1558 next
= pmd_addr_end(addr
, end
);
1560 pmd
= pmd_offset(pud
, addr
);
1564 nr_pmd_pages
= 1 << get_order(PMD_SIZE
);
1565 page
= pmd_page(*pmd
);
1566 while (nr_pmd_pages
--)
1567 get_page_bootmem(section_nr
, page
++,
1574 void __meminit
vmemmap_populate_print_last(void)
1577 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1578 addr_start
, addr_end
-1, p_start
, p_end
-1, node_start
);