2 * Debug helper to dump the current kernel pagetables of the system
3 * so that we can see what the various memory ranges are set to.
5 * (C) Copyright 2008 Intel Corporation
7 * Author: Arjan van de Ven <arjan@linux.intel.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
15 #include <linux/debugfs.h>
16 #include <linux/kasan.h>
18 #include <linux/init.h>
19 #include <linux/sched.h>
20 #include <linux/seq_file.h>
21 #include <linux/highmem.h>
23 #include <asm/pgtable.h>
26 * The dumper groups pagetable entries of the same type into one, and for
27 * that it needs to keep some state when walking, and flush this state
28 * when a "break" in the continuity is found.
32 pgprot_t current_prot
;
33 pgprotval_t effective_prot
;
34 unsigned long start_address
;
35 unsigned long current_address
;
36 const struct addr_marker
*marker
;
40 unsigned long wx_pages
;
44 unsigned long start_address
;
46 unsigned long max_lines
;
49 /* Address space markers hints */
53 enum address_markers_idx
{
57 #if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL)
63 KASAN_SHADOW_START_NR
,
67 #if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL)
70 #ifdef CONFIG_X86_ESPFIX64
83 static struct addr_marker address_markers
[] = {
84 [USER_SPACE_NR
] = { 0, "User Space" },
85 [KERNEL_SPACE_NR
] = { (1UL << 63), "Kernel Space" },
86 [LOW_KERNEL_NR
] = { 0UL, "Low Kernel Mapping" },
87 [VMALLOC_START_NR
] = { 0UL, "vmalloc() Area" },
88 [VMEMMAP_START_NR
] = { 0UL, "Vmemmap" },
91 * These fields get initialized with the (dynamic)
92 * KASAN_SHADOW_{START,END} values in pt_dump_init().
94 [KASAN_SHADOW_START_NR
] = { 0UL, "KASAN shadow" },
95 [KASAN_SHADOW_END_NR
] = { 0UL, "KASAN shadow end" },
97 #ifdef CONFIG_MODIFY_LDT_SYSCALL
98 [LDT_NR
] = { 0UL, "LDT remap" },
100 [CPU_ENTRY_AREA_NR
] = { CPU_ENTRY_AREA_BASE
,"CPU entry Area" },
101 #ifdef CONFIG_X86_ESPFIX64
102 [ESPFIX_START_NR
] = { ESPFIX_BASE_ADDR
, "ESPfix Area", 16 },
105 [EFI_END_NR
] = { EFI_VA_END
, "EFI Runtime Services" },
107 [HIGH_KERNEL_NR
] = { __START_KERNEL_map
, "High Kernel Mapping" },
108 [MODULES_VADDR_NR
] = { MODULES_VADDR
, "Modules" },
109 [MODULES_END_NR
] = { MODULES_END
, "End Modules" },
110 [FIXADDR_START_NR
] = { FIXADDR_START
, "Fixmap Area" },
111 [END_OF_SPACE_NR
] = { -1, NULL
}
114 #else /* CONFIG_X86_64 */
116 enum address_markers_idx
{
121 #ifdef CONFIG_HIGHMEM
129 static struct addr_marker address_markers
[] = {
130 [USER_SPACE_NR
] = { 0, "User Space" },
131 [KERNEL_SPACE_NR
] = { PAGE_OFFSET
, "Kernel Mapping" },
132 [VMALLOC_START_NR
] = { 0UL, "vmalloc() Area" },
133 [VMALLOC_END_NR
] = { 0UL, "vmalloc() End" },
134 #ifdef CONFIG_HIGHMEM
135 [PKMAP_BASE_NR
] = { 0UL, "Persistent kmap() Area" },
137 [CPU_ENTRY_AREA_NR
] = { 0UL, "CPU entry area" },
138 [FIXADDR_START_NR
] = { 0UL, "Fixmap area" },
139 [END_OF_SPACE_NR
] = { -1, NULL
}
142 #endif /* !CONFIG_X86_64 */
144 /* Multipliers for offsets within the PTEs */
145 #define PTE_LEVEL_MULT (PAGE_SIZE)
146 #define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
147 #define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT)
148 #define P4D_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT)
149 #define PGD_LEVEL_MULT (PTRS_PER_P4D * P4D_LEVEL_MULT)
151 #define pt_dump_seq_printf(m, to_dmesg, fmt, args...) \
154 printk(KERN_INFO fmt, ##args); \
157 seq_printf(m, fmt, ##args); \
160 #define pt_dump_cont_printf(m, to_dmesg, fmt, args...) \
163 printk(KERN_CONT fmt, ##args); \
166 seq_printf(m, fmt, ##args); \
170 * Print a readable form of a pgprot_t to the seq_file
172 static void printk_prot(struct seq_file
*m
, pgprot_t prot
, int level
, bool dmsg
)
174 pgprotval_t pr
= pgprot_val(prot
);
175 static const char * const level_name
[] =
176 { "cr3", "pgd", "p4d", "pud", "pmd", "pte" };
178 if (!(pr
& _PAGE_PRESENT
)) {
180 pt_dump_cont_printf(m
, dmsg
, " ");
183 pt_dump_cont_printf(m
, dmsg
, "USR ");
185 pt_dump_cont_printf(m
, dmsg
, " ");
187 pt_dump_cont_printf(m
, dmsg
, "RW ");
189 pt_dump_cont_printf(m
, dmsg
, "ro ");
191 pt_dump_cont_printf(m
, dmsg
, "PWT ");
193 pt_dump_cont_printf(m
, dmsg
, " ");
195 pt_dump_cont_printf(m
, dmsg
, "PCD ");
197 pt_dump_cont_printf(m
, dmsg
, " ");
199 /* Bit 7 has a different meaning on level 3 vs 4 */
200 if (level
<= 4 && pr
& _PAGE_PSE
)
201 pt_dump_cont_printf(m
, dmsg
, "PSE ");
203 pt_dump_cont_printf(m
, dmsg
, " ");
204 if ((level
== 5 && pr
& _PAGE_PAT
) ||
205 ((level
== 4 || level
== 3) && pr
& _PAGE_PAT_LARGE
))
206 pt_dump_cont_printf(m
, dmsg
, "PAT ");
208 pt_dump_cont_printf(m
, dmsg
, " ");
209 if (pr
& _PAGE_GLOBAL
)
210 pt_dump_cont_printf(m
, dmsg
, "GLB ");
212 pt_dump_cont_printf(m
, dmsg
, " ");
214 pt_dump_cont_printf(m
, dmsg
, "NX ");
216 pt_dump_cont_printf(m
, dmsg
, "x ");
218 pt_dump_cont_printf(m
, dmsg
, "%s\n", level_name
[level
]);
222 * On 64 bits, sign-extend the 48 bit address to 64 bit
224 static unsigned long normalize_addr(unsigned long u
)
227 if (!IS_ENABLED(CONFIG_X86_64
))
230 shift
= 64 - (__VIRTUAL_MASK_SHIFT
+ 1);
231 return (signed long)(u
<< shift
) >> shift
;
235 * This function gets called on a break in a continuous series
236 * of PTE entries; the next one is different so we need to
237 * print what we collected so far.
239 static void note_page(struct seq_file
*m
, struct pg_state
*st
,
240 pgprot_t new_prot
, pgprotval_t new_eff
, int level
)
242 pgprotval_t prot
, cur
, eff
;
243 static const char units
[] = "BKMGTPE";
246 * If we have a "break" in the series, we need to flush the state that
247 * we have now. "break" is either changing perms, levels or
248 * address space marker.
250 prot
= pgprot_val(new_prot
);
251 cur
= pgprot_val(st
->current_prot
);
252 eff
= st
->effective_prot
;
256 st
->current_prot
= new_prot
;
257 st
->effective_prot
= new_eff
;
259 st
->marker
= address_markers
;
261 pt_dump_seq_printf(m
, st
->to_dmesg
, "---[ %s ]---\n",
263 } else if (prot
!= cur
|| new_eff
!= eff
|| level
!= st
->level
||
264 st
->current_address
>= st
->marker
[1].start_address
) {
265 const char *unit
= units
;
267 int width
= sizeof(unsigned long) * 2;
269 if (st
->check_wx
&& (eff
& _PAGE_RW
) && !(eff
& _PAGE_NX
)) {
271 "x86/mm: Found insecure W+X mapping at address %p/%pS\n",
272 (void *)st
->start_address
,
273 (void *)st
->start_address
);
274 st
->wx_pages
+= (st
->current_address
-
275 st
->start_address
) / PAGE_SIZE
;
279 * Now print the actual finished series
281 if (!st
->marker
->max_lines
||
282 st
->lines
< st
->marker
->max_lines
) {
283 pt_dump_seq_printf(m
, st
->to_dmesg
,
285 width
, st
->start_address
,
286 width
, st
->current_address
);
288 delta
= st
->current_address
- st
->start_address
;
289 while (!(delta
& 1023) && unit
[1]) {
293 pt_dump_cont_printf(m
, st
->to_dmesg
, "%9lu%c ",
295 printk_prot(m
, st
->current_prot
, st
->level
,
301 * We print markers for special areas of address space,
302 * such as the start of vmalloc space etc.
303 * This helps in the interpretation.
305 if (st
->current_address
>= st
->marker
[1].start_address
) {
306 if (st
->marker
->max_lines
&&
307 st
->lines
> st
->marker
->max_lines
) {
308 unsigned long nskip
=
309 st
->lines
- st
->marker
->max_lines
;
310 pt_dump_seq_printf(m
, st
->to_dmesg
,
311 "... %lu entr%s skipped ... \n",
313 nskip
== 1 ? "y" : "ies");
317 pt_dump_seq_printf(m
, st
->to_dmesg
, "---[ %s ]---\n",
321 st
->start_address
= st
->current_address
;
322 st
->current_prot
= new_prot
;
323 st
->effective_prot
= new_eff
;
328 static inline pgprotval_t
effective_prot(pgprotval_t prot1
, pgprotval_t prot2
)
330 return (prot1
& prot2
& (_PAGE_USER
| _PAGE_RW
)) |
331 ((prot1
| prot2
) & _PAGE_NX
);
334 static void walk_pte_level(struct seq_file
*m
, struct pg_state
*st
, pmd_t addr
,
335 pgprotval_t eff_in
, unsigned long P
)
339 pgprotval_t prot
, eff
;
341 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
342 st
->current_address
= normalize_addr(P
+ i
* PTE_LEVEL_MULT
);
343 pte
= pte_offset_map(&addr
, st
->current_address
);
344 prot
= pte_flags(*pte
);
345 eff
= effective_prot(eff_in
, prot
);
346 note_page(m
, st
, __pgprot(prot
), eff
, 5);
353 * This is an optimization for KASAN=y case. Since all kasan page tables
354 * eventually point to the kasan_zero_page we could call note_page()
355 * right away without walking through lower level page tables. This saves
356 * us dozens of seconds (minutes for 5-level config) while checking for
357 * W+X mapping or reading kernel_page_tables debugfs file.
359 static inline bool kasan_page_table(struct seq_file
*m
, struct pg_state
*st
,
362 if (__pa(pt
) == __pa(kasan_zero_pmd
) ||
363 (pgtable_l5_enabled() && __pa(pt
) == __pa(kasan_zero_p4d
)) ||
364 __pa(pt
) == __pa(kasan_zero_pud
)) {
365 pgprotval_t prot
= pte_flags(kasan_zero_pte
[0]);
366 note_page(m
, st
, __pgprot(prot
), 0, 5);
372 static inline bool kasan_page_table(struct seq_file
*m
, struct pg_state
*st
,
381 static void walk_pmd_level(struct seq_file
*m
, struct pg_state
*st
, pud_t addr
,
382 pgprotval_t eff_in
, unsigned long P
)
385 pmd_t
*start
, *pmd_start
;
386 pgprotval_t prot
, eff
;
388 pmd_start
= start
= (pmd_t
*)pud_page_vaddr(addr
);
389 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
390 st
->current_address
= normalize_addr(P
+ i
* PMD_LEVEL_MULT
);
391 if (!pmd_none(*start
)) {
392 prot
= pmd_flags(*start
);
393 eff
= effective_prot(eff_in
, prot
);
394 if (pmd_large(*start
) || !pmd_present(*start
)) {
395 note_page(m
, st
, __pgprot(prot
), eff
, 4);
396 } else if (!kasan_page_table(m
, st
, pmd_start
)) {
397 walk_pte_level(m
, st
, *start
, eff
,
398 P
+ i
* PMD_LEVEL_MULT
);
401 note_page(m
, st
, __pgprot(0), 0, 4);
407 #define walk_pmd_level(m,s,a,e,p) walk_pte_level(m,s,__pmd(pud_val(a)),e,p)
408 #define pud_large(a) pmd_large(__pmd(pud_val(a)))
409 #define pud_none(a) pmd_none(__pmd(pud_val(a)))
414 static void walk_pud_level(struct seq_file
*m
, struct pg_state
*st
, p4d_t addr
,
415 pgprotval_t eff_in
, unsigned long P
)
418 pud_t
*start
, *pud_start
;
419 pgprotval_t prot
, eff
;
420 pud_t
*prev_pud
= NULL
;
422 pud_start
= start
= (pud_t
*)p4d_page_vaddr(addr
);
424 for (i
= 0; i
< PTRS_PER_PUD
; i
++) {
425 st
->current_address
= normalize_addr(P
+ i
* PUD_LEVEL_MULT
);
426 if (!pud_none(*start
)) {
427 prot
= pud_flags(*start
);
428 eff
= effective_prot(eff_in
, prot
);
429 if (pud_large(*start
) || !pud_present(*start
)) {
430 note_page(m
, st
, __pgprot(prot
), eff
, 3);
431 } else if (!kasan_page_table(m
, st
, pud_start
)) {
432 walk_pmd_level(m
, st
, *start
, eff
,
433 P
+ i
* PUD_LEVEL_MULT
);
436 note_page(m
, st
, __pgprot(0), 0, 3);
444 #define walk_pud_level(m,s,a,e,p) walk_pmd_level(m,s,__pud(p4d_val(a)),e,p)
445 #define p4d_large(a) pud_large(__pud(p4d_val(a)))
446 #define p4d_none(a) pud_none(__pud(p4d_val(a)))
449 static void walk_p4d_level(struct seq_file
*m
, struct pg_state
*st
, pgd_t addr
,
450 pgprotval_t eff_in
, unsigned long P
)
453 p4d_t
*start
, *p4d_start
;
454 pgprotval_t prot
, eff
;
456 if (PTRS_PER_P4D
== 1)
457 return walk_pud_level(m
, st
, __p4d(pgd_val(addr
)), eff_in
, P
);
459 p4d_start
= start
= (p4d_t
*)pgd_page_vaddr(addr
);
461 for (i
= 0; i
< PTRS_PER_P4D
; i
++) {
462 st
->current_address
= normalize_addr(P
+ i
* P4D_LEVEL_MULT
);
463 if (!p4d_none(*start
)) {
464 prot
= p4d_flags(*start
);
465 eff
= effective_prot(eff_in
, prot
);
466 if (p4d_large(*start
) || !p4d_present(*start
)) {
467 note_page(m
, st
, __pgprot(prot
), eff
, 2);
468 } else if (!kasan_page_table(m
, st
, p4d_start
)) {
469 walk_pud_level(m
, st
, *start
, eff
,
470 P
+ i
* P4D_LEVEL_MULT
);
473 note_page(m
, st
, __pgprot(0), 0, 2);
479 #define pgd_large(a) (pgtable_l5_enabled() ? pgd_large(a) : p4d_large(__p4d(pgd_val(a))))
480 #define pgd_none(a) (pgtable_l5_enabled() ? pgd_none(a) : p4d_none(__p4d(pgd_val(a))))
482 static inline bool is_hypervisor_range(int idx
)
486 * ffff800000000000 - ffff87ffffffffff is reserved for
489 return (idx
>= pgd_index(__PAGE_OFFSET
) - 16) &&
490 (idx
< pgd_index(__PAGE_OFFSET
));
496 static void ptdump_walk_pgd_level_core(struct seq_file
*m
, pgd_t
*pgd
,
497 bool checkwx
, bool dmesg
)
500 pgd_t
*start
= (pgd_t
*) &init_top_pgt
;
502 pgd_t
*start
= swapper_pg_dir
;
504 pgprotval_t prot
, eff
;
506 struct pg_state st
= {};
513 st
.check_wx
= checkwx
;
517 for (i
= 0; i
< PTRS_PER_PGD
; i
++) {
518 st
.current_address
= normalize_addr(i
* PGD_LEVEL_MULT
);
519 if (!pgd_none(*start
) && !is_hypervisor_range(i
)) {
520 prot
= pgd_flags(*start
);
521 #ifdef CONFIG_X86_PAE
522 eff
= _PAGE_USER
| _PAGE_RW
;
526 if (pgd_large(*start
) || !pgd_present(*start
)) {
527 note_page(m
, &st
, __pgprot(prot
), eff
, 1);
529 walk_p4d_level(m
, &st
, *start
, eff
,
533 note_page(m
, &st
, __pgprot(0), 0, 1);
539 /* Flush out the last page */
540 st
.current_address
= normalize_addr(PTRS_PER_PGD
*PGD_LEVEL_MULT
);
541 note_page(m
, &st
, __pgprot(0), 0, 0);
545 pr_info("x86/mm: Checked W+X mappings: FAILED, %lu W+X pages found.\n",
548 pr_info("x86/mm: Checked W+X mappings: passed, no W+X pages found.\n");
551 void ptdump_walk_pgd_level(struct seq_file
*m
, pgd_t
*pgd
)
553 ptdump_walk_pgd_level_core(m
, pgd
, false, true);
556 void ptdump_walk_pgd_level_debugfs(struct seq_file
*m
, pgd_t
*pgd
, bool user
)
558 #ifdef CONFIG_PAGE_TABLE_ISOLATION
559 if (user
&& static_cpu_has(X86_FEATURE_PTI
))
560 pgd
= kernel_to_user_pgdp(pgd
);
562 ptdump_walk_pgd_level_core(m
, pgd
, false, false);
564 EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs
);
566 static void ptdump_walk_user_pgd_level_checkwx(void)
568 #ifdef CONFIG_PAGE_TABLE_ISOLATION
569 pgd_t
*pgd
= (pgd_t
*) &init_top_pgt
;
571 if (!static_cpu_has(X86_FEATURE_PTI
))
574 pr_info("x86/mm: Checking user space page tables\n");
575 pgd
= kernel_to_user_pgdp(pgd
);
576 ptdump_walk_pgd_level_core(NULL
, pgd
, true, false);
580 void ptdump_walk_pgd_level_checkwx(void)
582 ptdump_walk_pgd_level_core(NULL
, NULL
, true, false);
583 ptdump_walk_user_pgd_level_checkwx();
586 static int __init
pt_dump_init(void)
589 * Various markers are not compile-time constants, so assign them
593 address_markers
[LOW_KERNEL_NR
].start_address
= PAGE_OFFSET
;
594 address_markers
[VMALLOC_START_NR
].start_address
= VMALLOC_START
;
595 address_markers
[VMEMMAP_START_NR
].start_address
= VMEMMAP_START
;
596 #ifdef CONFIG_MODIFY_LDT_SYSCALL
597 address_markers
[LDT_NR
].start_address
= LDT_BASE_ADDR
;
600 address_markers
[KASAN_SHADOW_START_NR
].start_address
= KASAN_SHADOW_START
;
601 address_markers
[KASAN_SHADOW_END_NR
].start_address
= KASAN_SHADOW_END
;
605 address_markers
[VMALLOC_START_NR
].start_address
= VMALLOC_START
;
606 address_markers
[VMALLOC_END_NR
].start_address
= VMALLOC_END
;
607 # ifdef CONFIG_HIGHMEM
608 address_markers
[PKMAP_BASE_NR
].start_address
= PKMAP_BASE
;
610 address_markers
[FIXADDR_START_NR
].start_address
= FIXADDR_START
;
611 address_markers
[CPU_ENTRY_AREA_NR
].start_address
= CPU_ENTRY_AREA_BASE
;
615 __initcall(pt_dump_init
);