1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2019 SiFive
7 #include <linux/init.h>
8 #include <linux/debugfs.h>
9 #include <linux/memory_hotplug.h>
10 #include <linux/seq_file.h>
11 #include <linux/ptdump.h>
13 #include <linux/pgtable.h>
14 #include <asm/kasan.h>
16 #define pt_dump_seq_printf(m, fmt, args...) \
19 seq_printf(m, fmt, ##args); \
22 #define pt_dump_seq_puts(m, fmt) \
29 * The page dumper groups page table entries of the same type into a single
30 * description. It uses pg_state to track the range information while
31 * iterating over the pte entries. When the continuity is broken it then
32 * dumps out a description of the range.
35 struct ptdump_state ptdump
;
37 const struct addr_marker
*marker
;
38 unsigned long start_address
;
39 unsigned long start_pa
;
40 unsigned long last_pa
;
44 unsigned long wx_pages
;
49 unsigned long start_address
;
53 /* Private information for debugfs */
56 const struct addr_marker
*markers
;
57 unsigned long base_addr
;
61 enum address_markers_idx
{
66 #ifdef CONFIG_SPARSEMEM_VMEMMAP
74 KASAN_SHADOW_START_NR
,
84 static struct addr_marker address_markers
[] = {
89 #ifdef CONFIG_SPARSEMEM_VMEMMAP
93 {0, "vmalloc() area"},
95 {0, "Linear mapping"},
97 {0, "Kasan shadow start"},
98 {0, "Kasan shadow end"},
101 {0, "Modules/BPF mapping"},
102 {0, "Kernel mapping"},
107 static struct ptd_mm_info kernel_ptd_info
= {
109 .markers
= address_markers
,
115 static struct addr_marker efi_addr_markers
[] = {
116 { 0, "UEFI runtime start" },
117 { SZ_1G
, "UEFI runtime end" },
121 static struct ptd_mm_info efi_ptd_info
= {
123 .markers
= efi_addr_markers
,
129 /* Page Table Entry */
136 static const struct prot_bits pte_bits
[] = {
143 .mask
= _PAGE_MTMASK_SVPBMT
,
156 .mask
= _PAGE_ACCESSED
,
160 .mask
= _PAGE_GLOBAL
,
180 .mask
= _PAGE_PRESENT
,
192 static struct pg_level pg_level
[] = {
196 .name
= (CONFIG_PGTABLE_LEVELS
> 4) ? "P4D" : "PGD",
198 .name
= (CONFIG_PGTABLE_LEVELS
> 3) ? "PUD" : "PGD",
200 .name
= (CONFIG_PGTABLE_LEVELS
> 2) ? "PMD" : "PGD",
206 static void dump_prot(struct pg_state
*st
)
210 for (i
= 0; i
< ARRAY_SIZE(pte_bits
); i
++) {
214 val
= st
->current_prot
& pte_bits
[i
].mask
;
216 if (pte_bits
[i
].mask
== _PAGE_SOFT
)
217 sprintf(s
, pte_bits
[i
].set
, val
>> 8);
219 else if (pte_bits
[i
].mask
== _PAGE_MTMASK_SVPBMT
) {
220 if (val
== _PAGE_NOCACHE_SVPBMT
)
221 sprintf(s
, pte_bits
[i
].set
, "NC");
222 else if (val
== _PAGE_IO_SVPBMT
)
223 sprintf(s
, pte_bits
[i
].set
, "IO");
225 sprintf(s
, pte_bits
[i
].set
, "??");
229 sprintf(s
, "%s", pte_bits
[i
].set
);
231 sprintf(s
, "%s", pte_bits
[i
].clear
);
234 pt_dump_seq_printf(st
->seq
, " %s", s
);
239 #define ADDR_FORMAT "0x%016lx"
241 #define ADDR_FORMAT "0x%08lx"
243 static void dump_addr(struct pg_state
*st
, unsigned long addr
)
245 static const char units
[] = "KMGTPE";
246 const char *unit
= units
;
249 pt_dump_seq_printf(st
->seq
, ADDR_FORMAT
"-" ADDR_FORMAT
" ",
250 st
->start_address
, addr
);
252 pt_dump_seq_printf(st
->seq
, " " ADDR_FORMAT
" ", st
->start_pa
);
253 delta
= (addr
- st
->start_address
) >> 10;
255 while (!(delta
& 1023) && unit
[1]) {
260 pt_dump_seq_printf(st
->seq
, "%9lu%c %s", delta
, *unit
,
261 pg_level
[st
->level
].name
);
264 static void note_prot_wx(struct pg_state
*st
, unsigned long addr
)
269 if ((st
->current_prot
& (_PAGE_WRITE
| _PAGE_EXEC
)) !=
270 (_PAGE_WRITE
| _PAGE_EXEC
))
273 WARN_ONCE(1, "riscv/mm: Found insecure W+X mapping at address %p/%pS\n",
274 (void *)st
->start_address
, (void *)st
->start_address
);
276 st
->wx_pages
+= (addr
- st
->start_address
) / PAGE_SIZE
;
279 static void note_page(struct ptdump_state
*pt_st
, unsigned long addr
,
282 struct pg_state
*st
= container_of(pt_st
, struct pg_state
, ptdump
);
283 u64 pa
= PFN_PHYS(pte_pfn(__pte(val
)));
287 prot
= val
& pg_level
[level
].mask
;
289 if (st
->level
== -1) {
291 st
->current_prot
= prot
;
292 st
->start_address
= addr
;
295 pt_dump_seq_printf(st
->seq
, "---[ %s ]---\n", st
->marker
->name
);
296 } else if (prot
!= st
->current_prot
||
297 level
!= st
->level
|| addr
>= st
->marker
[1].start_address
) {
298 if (st
->current_prot
) {
299 note_prot_wx(st
, addr
);
302 pt_dump_seq_puts(st
->seq
, "\n");
305 while (addr
>= st
->marker
[1].start_address
) {
307 pt_dump_seq_printf(st
->seq
, "---[ %s ]---\n",
311 st
->start_address
= addr
;
314 st
->current_prot
= prot
;
321 static void ptdump_walk(struct seq_file
*s
, struct ptd_mm_info
*pinfo
)
323 struct pg_state st
= {
325 .marker
= pinfo
->markers
,
328 .note_page
= note_page
,
329 .range
= (struct ptdump_range
[]) {
330 {pinfo
->base_addr
, pinfo
->end
},
336 ptdump_walk_pgd(&st
.ptdump
, pinfo
->mm
, NULL
);
339 bool ptdump_check_wx(void)
341 struct pg_state st
= {
343 .marker
= (struct addr_marker
[]) {
350 .note_page
= note_page
,
351 .range
= (struct ptdump_range
[]) {
352 {KERN_VIRT_START
, ULONG_MAX
},
358 ptdump_walk_pgd(&st
.ptdump
, &init_mm
, NULL
);
361 pr_warn("Checked W+X mappings: failed, %lu W+X pages found\n",
366 pr_info("Checked W+X mappings: passed, no W+X pages found\n");
372 static int ptdump_show(struct seq_file
*m
, void *v
)
375 ptdump_walk(m
, m
->private);
381 DEFINE_SHOW_ATTRIBUTE(ptdump
);
383 static int __init
ptdump_init(void)
387 address_markers
[FIXMAP_START_NR
].start_address
= FIXADDR_START
;
388 address_markers
[FIXMAP_END_NR
].start_address
= FIXADDR_TOP
;
389 address_markers
[PCI_IO_START_NR
].start_address
= PCI_IO_START
;
390 address_markers
[PCI_IO_END_NR
].start_address
= PCI_IO_END
;
391 #ifdef CONFIG_SPARSEMEM_VMEMMAP
392 address_markers
[VMEMMAP_START_NR
].start_address
= VMEMMAP_START
;
393 address_markers
[VMEMMAP_END_NR
].start_address
= VMEMMAP_END
;
395 address_markers
[VMALLOC_START_NR
].start_address
= VMALLOC_START
;
396 address_markers
[VMALLOC_END_NR
].start_address
= VMALLOC_END
;
397 address_markers
[PAGE_OFFSET_NR
].start_address
= PAGE_OFFSET
;
399 address_markers
[KASAN_SHADOW_START_NR
].start_address
= KASAN_SHADOW_START
;
400 address_markers
[KASAN_SHADOW_END_NR
].start_address
= KASAN_SHADOW_END
;
403 address_markers
[MODULES_MAPPING_NR
].start_address
= MODULES_VADDR
;
404 address_markers
[KERNEL_MAPPING_NR
].start_address
= kernel_map
.virt_addr
;
407 kernel_ptd_info
.base_addr
= KERN_VIRT_START
;
409 pg_level
[1].name
= pgtable_l5_enabled
? "P4D" : "PGD";
410 pg_level
[2].name
= pgtable_l4_enabled
? "PUD" : "PGD";
412 for (i
= 0; i
< ARRAY_SIZE(pg_level
); i
++)
413 for (j
= 0; j
< ARRAY_SIZE(pte_bits
); j
++)
414 pg_level
[i
].mask
|= pte_bits
[j
].mask
;
416 debugfs_create_file("kernel_page_tables", 0400, NULL
, &kernel_ptd_info
,
419 if (efi_enabled(EFI_RUNTIME_SERVICES
))
420 debugfs_create_file("efi_page_tables", 0400, NULL
, &efi_ptd_info
,
427 device_initcall(ptdump_init
);