2 * Debug helper to dump the current kernel pagetables of the system
3 * so that we can see what the various memory ranges are set to.
5 * (C) Copyright 2008 Intel Corporation
7 * Author: Arjan van de Ven <arjan@linux.intel.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
15 #include <linux/debugfs.h>
16 #include <linux/kasan.h>
18 #include <linux/init.h>
19 #include <linux/sched.h>
20 #include <linux/seq_file.h>
21 #include <linux/highmem.h>
22 #include <linux/pci.h>
24 #include <asm/e820/types.h>
25 #include <asm/pgtable.h>
28 * The dumper groups pagetable entries of the same type into one, and for
29 * that it needs to keep some state when walking, and flush this state
30 * when a "break" in the continuity is found.
34 pgprot_t current_prot
;
35 pgprotval_t effective_prot
;
36 unsigned long start_address
;
37 unsigned long current_address
;
38 const struct addr_marker
*marker
;
42 unsigned long wx_pages
;
46 unsigned long start_address
;
48 unsigned long max_lines
;
51 /* Address space markers hints */
55 enum address_markers_idx
{
59 #if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL)
65 KASAN_SHADOW_START_NR
,
69 #if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL)
72 #ifdef CONFIG_X86_ESPFIX64
85 static struct addr_marker address_markers
[] = {
86 [USER_SPACE_NR
] = { 0, "User Space" },
87 [KERNEL_SPACE_NR
] = { (1UL << 63), "Kernel Space" },
88 [LOW_KERNEL_NR
] = { 0UL, "Low Kernel Mapping" },
89 [VMALLOC_START_NR
] = { 0UL, "vmalloc() Area" },
90 [VMEMMAP_START_NR
] = { 0UL, "Vmemmap" },
93 * These fields get initialized with the (dynamic)
94 * KASAN_SHADOW_{START,END} values in pt_dump_init().
96 [KASAN_SHADOW_START_NR
] = { 0UL, "KASAN shadow" },
97 [KASAN_SHADOW_END_NR
] = { 0UL, "KASAN shadow end" },
99 #ifdef CONFIG_MODIFY_LDT_SYSCALL
100 [LDT_NR
] = { 0UL, "LDT remap" },
102 [CPU_ENTRY_AREA_NR
] = { CPU_ENTRY_AREA_BASE
,"CPU entry Area" },
103 #ifdef CONFIG_X86_ESPFIX64
104 [ESPFIX_START_NR
] = { ESPFIX_BASE_ADDR
, "ESPfix Area", 16 },
107 [EFI_END_NR
] = { EFI_VA_END
, "EFI Runtime Services" },
109 [HIGH_KERNEL_NR
] = { __START_KERNEL_map
, "High Kernel Mapping" },
110 [MODULES_VADDR_NR
] = { MODULES_VADDR
, "Modules" },
111 [MODULES_END_NR
] = { MODULES_END
, "End Modules" },
112 [FIXADDR_START_NR
] = { FIXADDR_START
, "Fixmap Area" },
113 [END_OF_SPACE_NR
] = { -1, NULL
}
116 #define INIT_PGD ((pgd_t *) &init_top_pgt)
118 #else /* CONFIG_X86_64 */
120 enum address_markers_idx
{
125 #ifdef CONFIG_HIGHMEM
128 #ifdef CONFIG_MODIFY_LDT_SYSCALL
136 static struct addr_marker address_markers
[] = {
137 [USER_SPACE_NR
] = { 0, "User Space" },
138 [KERNEL_SPACE_NR
] = { PAGE_OFFSET
, "Kernel Mapping" },
139 [VMALLOC_START_NR
] = { 0UL, "vmalloc() Area" },
140 [VMALLOC_END_NR
] = { 0UL, "vmalloc() End" },
141 #ifdef CONFIG_HIGHMEM
142 [PKMAP_BASE_NR
] = { 0UL, "Persistent kmap() Area" },
144 #ifdef CONFIG_MODIFY_LDT_SYSCALL
145 [LDT_NR
] = { 0UL, "LDT remap" },
147 [CPU_ENTRY_AREA_NR
] = { 0UL, "CPU entry area" },
148 [FIXADDR_START_NR
] = { 0UL, "Fixmap area" },
149 [END_OF_SPACE_NR
] = { -1, NULL
}
152 #define INIT_PGD (swapper_pg_dir)
154 #endif /* !CONFIG_X86_64 */
156 /* Multipliers for offsets within the PTEs */
157 #define PTE_LEVEL_MULT (PAGE_SIZE)
158 #define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
159 #define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT)
160 #define P4D_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT)
161 #define PGD_LEVEL_MULT (PTRS_PER_P4D * P4D_LEVEL_MULT)
163 #define pt_dump_seq_printf(m, to_dmesg, fmt, args...) \
166 printk(KERN_INFO fmt, ##args); \
169 seq_printf(m, fmt, ##args); \
172 #define pt_dump_cont_printf(m, to_dmesg, fmt, args...) \
175 printk(KERN_CONT fmt, ##args); \
178 seq_printf(m, fmt, ##args); \
182 * Print a readable form of a pgprot_t to the seq_file
184 static void printk_prot(struct seq_file
*m
, pgprot_t prot
, int level
, bool dmsg
)
186 pgprotval_t pr
= pgprot_val(prot
);
187 static const char * const level_name
[] =
188 { "cr3", "pgd", "p4d", "pud", "pmd", "pte" };
190 if (!(pr
& _PAGE_PRESENT
)) {
192 pt_dump_cont_printf(m
, dmsg
, " ");
195 pt_dump_cont_printf(m
, dmsg
, "USR ");
197 pt_dump_cont_printf(m
, dmsg
, " ");
199 pt_dump_cont_printf(m
, dmsg
, "RW ");
201 pt_dump_cont_printf(m
, dmsg
, "ro ");
203 pt_dump_cont_printf(m
, dmsg
, "PWT ");
205 pt_dump_cont_printf(m
, dmsg
, " ");
207 pt_dump_cont_printf(m
, dmsg
, "PCD ");
209 pt_dump_cont_printf(m
, dmsg
, " ");
211 /* Bit 7 has a different meaning on level 3 vs 4 */
212 if (level
<= 4 && pr
& _PAGE_PSE
)
213 pt_dump_cont_printf(m
, dmsg
, "PSE ");
215 pt_dump_cont_printf(m
, dmsg
, " ");
216 if ((level
== 5 && pr
& _PAGE_PAT
) ||
217 ((level
== 4 || level
== 3) && pr
& _PAGE_PAT_LARGE
))
218 pt_dump_cont_printf(m
, dmsg
, "PAT ");
220 pt_dump_cont_printf(m
, dmsg
, " ");
221 if (pr
& _PAGE_GLOBAL
)
222 pt_dump_cont_printf(m
, dmsg
, "GLB ");
224 pt_dump_cont_printf(m
, dmsg
, " ");
226 pt_dump_cont_printf(m
, dmsg
, "NX ");
228 pt_dump_cont_printf(m
, dmsg
, "x ");
230 pt_dump_cont_printf(m
, dmsg
, "%s\n", level_name
[level
]);
234 * On 64 bits, sign-extend the 48 bit address to 64 bit
236 static unsigned long normalize_addr(unsigned long u
)
239 if (!IS_ENABLED(CONFIG_X86_64
))
242 shift
= 64 - (__VIRTUAL_MASK_SHIFT
+ 1);
243 return (signed long)(u
<< shift
) >> shift
;
246 static void note_wx(struct pg_state
*st
)
248 unsigned long npages
;
250 npages
= (st
->current_address
- st
->start_address
) / PAGE_SIZE
;
252 #ifdef CONFIG_PCI_BIOS
254 * If PCI BIOS is enabled, the PCI BIOS area is forced to WX.
255 * Inform about it, but avoid the warning.
257 if (pcibios_enabled
&& st
->start_address
>= PAGE_OFFSET
+ BIOS_BEGIN
&&
258 st
->current_address
<= PAGE_OFFSET
+ BIOS_END
) {
259 pr_warn_once("x86/mm: PCI BIOS W+X mapping %lu pages\n", npages
);
263 /* Account the WX pages */
264 st
->wx_pages
+= npages
;
265 WARN_ONCE(1, "x86/mm: Found insecure W+X mapping at address %pS\n",
266 (void *)st
->start_address
);
270 * This function gets called on a break in a continuous series
271 * of PTE entries; the next one is different so we need to
272 * print what we collected so far.
274 static void note_page(struct seq_file
*m
, struct pg_state
*st
,
275 pgprot_t new_prot
, pgprotval_t new_eff
, int level
)
277 pgprotval_t prot
, cur
, eff
;
278 static const char units
[] = "BKMGTPE";
281 * If we have a "break" in the series, we need to flush the state that
282 * we have now. "break" is either changing perms, levels or
283 * address space marker.
285 prot
= pgprot_val(new_prot
);
286 cur
= pgprot_val(st
->current_prot
);
287 eff
= st
->effective_prot
;
291 st
->current_prot
= new_prot
;
292 st
->effective_prot
= new_eff
;
294 st
->marker
= address_markers
;
296 pt_dump_seq_printf(m
, st
->to_dmesg
, "---[ %s ]---\n",
298 } else if (prot
!= cur
|| new_eff
!= eff
|| level
!= st
->level
||
299 st
->current_address
>= st
->marker
[1].start_address
) {
300 const char *unit
= units
;
302 int width
= sizeof(unsigned long) * 2;
304 if (st
->check_wx
&& (eff
& _PAGE_RW
) && !(eff
& _PAGE_NX
))
308 * Now print the actual finished series
310 if (!st
->marker
->max_lines
||
311 st
->lines
< st
->marker
->max_lines
) {
312 pt_dump_seq_printf(m
, st
->to_dmesg
,
314 width
, st
->start_address
,
315 width
, st
->current_address
);
317 delta
= st
->current_address
- st
->start_address
;
318 while (!(delta
& 1023) && unit
[1]) {
322 pt_dump_cont_printf(m
, st
->to_dmesg
, "%9lu%c ",
324 printk_prot(m
, st
->current_prot
, st
->level
,
330 * We print markers for special areas of address space,
331 * such as the start of vmalloc space etc.
332 * This helps in the interpretation.
334 if (st
->current_address
>= st
->marker
[1].start_address
) {
335 if (st
->marker
->max_lines
&&
336 st
->lines
> st
->marker
->max_lines
) {
337 unsigned long nskip
=
338 st
->lines
- st
->marker
->max_lines
;
339 pt_dump_seq_printf(m
, st
->to_dmesg
,
340 "... %lu entr%s skipped ... \n",
342 nskip
== 1 ? "y" : "ies");
346 pt_dump_seq_printf(m
, st
->to_dmesg
, "---[ %s ]---\n",
350 st
->start_address
= st
->current_address
;
351 st
->current_prot
= new_prot
;
352 st
->effective_prot
= new_eff
;
357 static inline pgprotval_t
effective_prot(pgprotval_t prot1
, pgprotval_t prot2
)
359 return (prot1
& prot2
& (_PAGE_USER
| _PAGE_RW
)) |
360 ((prot1
| prot2
) & _PAGE_NX
);
363 static void walk_pte_level(struct seq_file
*m
, struct pg_state
*st
, pmd_t addr
,
364 pgprotval_t eff_in
, unsigned long P
)
368 pgprotval_t prot
, eff
;
370 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
371 st
->current_address
= normalize_addr(P
+ i
* PTE_LEVEL_MULT
);
372 pte
= pte_offset_map(&addr
, st
->current_address
);
373 prot
= pte_flags(*pte
);
374 eff
= effective_prot(eff_in
, prot
);
375 note_page(m
, st
, __pgprot(prot
), eff
, 5);
382 * This is an optimization for KASAN=y case. Since all kasan page tables
383 * eventually point to the kasan_zero_page we could call note_page()
384 * right away without walking through lower level page tables. This saves
385 * us dozens of seconds (minutes for 5-level config) while checking for
386 * W+X mapping or reading kernel_page_tables debugfs file.
388 static inline bool kasan_page_table(struct seq_file
*m
, struct pg_state
*st
,
391 if (__pa(pt
) == __pa(kasan_zero_pmd
) ||
392 (pgtable_l5_enabled() && __pa(pt
) == __pa(kasan_zero_p4d
)) ||
393 __pa(pt
) == __pa(kasan_zero_pud
)) {
394 pgprotval_t prot
= pte_flags(kasan_zero_pte
[0]);
395 note_page(m
, st
, __pgprot(prot
), 0, 5);
401 static inline bool kasan_page_table(struct seq_file
*m
, struct pg_state
*st
,
410 static void walk_pmd_level(struct seq_file
*m
, struct pg_state
*st
, pud_t addr
,
411 pgprotval_t eff_in
, unsigned long P
)
414 pmd_t
*start
, *pmd_start
;
415 pgprotval_t prot
, eff
;
417 pmd_start
= start
= (pmd_t
*)pud_page_vaddr(addr
);
418 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
419 st
->current_address
= normalize_addr(P
+ i
* PMD_LEVEL_MULT
);
420 if (!pmd_none(*start
)) {
421 prot
= pmd_flags(*start
);
422 eff
= effective_prot(eff_in
, prot
);
423 if (pmd_large(*start
) || !pmd_present(*start
)) {
424 note_page(m
, st
, __pgprot(prot
), eff
, 4);
425 } else if (!kasan_page_table(m
, st
, pmd_start
)) {
426 walk_pte_level(m
, st
, *start
, eff
,
427 P
+ i
* PMD_LEVEL_MULT
);
430 note_page(m
, st
, __pgprot(0), 0, 4);
436 #define walk_pmd_level(m,s,a,e,p) walk_pte_level(m,s,__pmd(pud_val(a)),e,p)
437 #define pud_large(a) pmd_large(__pmd(pud_val(a)))
438 #define pud_none(a) pmd_none(__pmd(pud_val(a)))
443 static void walk_pud_level(struct seq_file
*m
, struct pg_state
*st
, p4d_t addr
,
444 pgprotval_t eff_in
, unsigned long P
)
447 pud_t
*start
, *pud_start
;
448 pgprotval_t prot
, eff
;
449 pud_t
*prev_pud
= NULL
;
451 pud_start
= start
= (pud_t
*)p4d_page_vaddr(addr
);
453 for (i
= 0; i
< PTRS_PER_PUD
; i
++) {
454 st
->current_address
= normalize_addr(P
+ i
* PUD_LEVEL_MULT
);
455 if (!pud_none(*start
)) {
456 prot
= pud_flags(*start
);
457 eff
= effective_prot(eff_in
, prot
);
458 if (pud_large(*start
) || !pud_present(*start
)) {
459 note_page(m
, st
, __pgprot(prot
), eff
, 3);
460 } else if (!kasan_page_table(m
, st
, pud_start
)) {
461 walk_pmd_level(m
, st
, *start
, eff
,
462 P
+ i
* PUD_LEVEL_MULT
);
465 note_page(m
, st
, __pgprot(0), 0, 3);
473 #define walk_pud_level(m,s,a,e,p) walk_pmd_level(m,s,__pud(p4d_val(a)),e,p)
474 #define p4d_large(a) pud_large(__pud(p4d_val(a)))
475 #define p4d_none(a) pud_none(__pud(p4d_val(a)))
478 static void walk_p4d_level(struct seq_file
*m
, struct pg_state
*st
, pgd_t addr
,
479 pgprotval_t eff_in
, unsigned long P
)
482 p4d_t
*start
, *p4d_start
;
483 pgprotval_t prot
, eff
;
485 if (PTRS_PER_P4D
== 1)
486 return walk_pud_level(m
, st
, __p4d(pgd_val(addr
)), eff_in
, P
);
488 p4d_start
= start
= (p4d_t
*)pgd_page_vaddr(addr
);
490 for (i
= 0; i
< PTRS_PER_P4D
; i
++) {
491 st
->current_address
= normalize_addr(P
+ i
* P4D_LEVEL_MULT
);
492 if (!p4d_none(*start
)) {
493 prot
= p4d_flags(*start
);
494 eff
= effective_prot(eff_in
, prot
);
495 if (p4d_large(*start
) || !p4d_present(*start
)) {
496 note_page(m
, st
, __pgprot(prot
), eff
, 2);
497 } else if (!kasan_page_table(m
, st
, p4d_start
)) {
498 walk_pud_level(m
, st
, *start
, eff
,
499 P
+ i
* P4D_LEVEL_MULT
);
502 note_page(m
, st
, __pgprot(0), 0, 2);
508 #define pgd_large(a) (pgtable_l5_enabled() ? pgd_large(a) : p4d_large(__p4d(pgd_val(a))))
509 #define pgd_none(a) (pgtable_l5_enabled() ? pgd_none(a) : p4d_none(__p4d(pgd_val(a))))
511 static inline bool is_hypervisor_range(int idx
)
515 * ffff800000000000 - ffff87ffffffffff is reserved for
518 return (idx
>= pgd_index(__PAGE_OFFSET
) - 16) &&
519 (idx
< pgd_index(__PAGE_OFFSET
));
525 static void ptdump_walk_pgd_level_core(struct seq_file
*m
, pgd_t
*pgd
,
526 bool checkwx
, bool dmesg
)
528 pgd_t
*start
= INIT_PGD
;
529 pgprotval_t prot
, eff
;
531 struct pg_state st
= {};
538 st
.check_wx
= checkwx
;
542 for (i
= 0; i
< PTRS_PER_PGD
; i
++) {
543 st
.current_address
= normalize_addr(i
* PGD_LEVEL_MULT
);
544 if (!pgd_none(*start
) && !is_hypervisor_range(i
)) {
545 prot
= pgd_flags(*start
);
546 #ifdef CONFIG_X86_PAE
547 eff
= _PAGE_USER
| _PAGE_RW
;
551 if (pgd_large(*start
) || !pgd_present(*start
)) {
552 note_page(m
, &st
, __pgprot(prot
), eff
, 1);
554 walk_p4d_level(m
, &st
, *start
, eff
,
558 note_page(m
, &st
, __pgprot(0), 0, 1);
564 /* Flush out the last page */
565 st
.current_address
= normalize_addr(PTRS_PER_PGD
*PGD_LEVEL_MULT
);
566 note_page(m
, &st
, __pgprot(0), 0, 0);
570 pr_info("x86/mm: Checked W+X mappings: FAILED, %lu W+X pages found.\n",
573 pr_info("x86/mm: Checked W+X mappings: passed, no W+X pages found.\n");
576 void ptdump_walk_pgd_level(struct seq_file
*m
, pgd_t
*pgd
)
578 ptdump_walk_pgd_level_core(m
, pgd
, false, true);
581 void ptdump_walk_pgd_level_debugfs(struct seq_file
*m
, pgd_t
*pgd
, bool user
)
583 #ifdef CONFIG_PAGE_TABLE_ISOLATION
584 if (user
&& static_cpu_has(X86_FEATURE_PTI
))
585 pgd
= kernel_to_user_pgdp(pgd
);
587 ptdump_walk_pgd_level_core(m
, pgd
, false, false);
589 EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs
);
591 void ptdump_walk_user_pgd_level_checkwx(void)
593 #ifdef CONFIG_PAGE_TABLE_ISOLATION
594 pgd_t
*pgd
= INIT_PGD
;
596 if (!(__supported_pte_mask
& _PAGE_NX
) ||
597 !static_cpu_has(X86_FEATURE_PTI
))
600 pr_info("x86/mm: Checking user space page tables\n");
601 pgd
= kernel_to_user_pgdp(pgd
);
602 ptdump_walk_pgd_level_core(NULL
, pgd
, true, false);
606 void ptdump_walk_pgd_level_checkwx(void)
608 ptdump_walk_pgd_level_core(NULL
, NULL
, true, false);
611 static int __init
pt_dump_init(void)
614 * Various markers are not compile-time constants, so assign them
618 address_markers
[LOW_KERNEL_NR
].start_address
= PAGE_OFFSET
;
619 address_markers
[VMALLOC_START_NR
].start_address
= VMALLOC_START
;
620 address_markers
[VMEMMAP_START_NR
].start_address
= VMEMMAP_START
;
621 #ifdef CONFIG_MODIFY_LDT_SYSCALL
622 address_markers
[LDT_NR
].start_address
= LDT_BASE_ADDR
;
625 address_markers
[KASAN_SHADOW_START_NR
].start_address
= KASAN_SHADOW_START
;
626 address_markers
[KASAN_SHADOW_END_NR
].start_address
= KASAN_SHADOW_END
;
630 address_markers
[VMALLOC_START_NR
].start_address
= VMALLOC_START
;
631 address_markers
[VMALLOC_END_NR
].start_address
= VMALLOC_END
;
632 # ifdef CONFIG_HIGHMEM
633 address_markers
[PKMAP_BASE_NR
].start_address
= PKMAP_BASE
;
635 address_markers
[FIXADDR_START_NR
].start_address
= FIXADDR_START
;
636 address_markers
[CPU_ENTRY_AREA_NR
].start_address
= CPU_ENTRY_AREA_BASE
;
637 # ifdef CONFIG_MODIFY_LDT_SYSCALL
638 address_markers
[LDT_NR
].start_address
= LDT_BASE_ADDR
;
643 __initcall(pt_dump_init
);