2 * Debug helper to dump the current kernel pagetables of the system
3 * so that we can see what the various memory ranges are set to.
5 * (C) Copyright 2008 Intel Corporation
7 * Author: Arjan van de Ven <arjan@linux.intel.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
15 #include <linux/debugfs.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
20 #include <asm/pgtable.h>
23 * The dumper groups pagetable entries of the same type into one, and for
24 * that it needs to keep some state when walking, and flush this state
25 * when a "break" in the continuity is found.
29 pgprot_t current_prot
;
30 unsigned long start_address
;
31 unsigned long current_address
;
32 const struct addr_marker
*marker
;
36 unsigned long wx_pages
;
40 unsigned long start_address
;
42 unsigned long max_lines
;
45 /* indices for address_markers; keep sync'd w/ address_markers below */
46 enum address_markers_idx
{
53 # ifdef CONFIG_X86_ESPFIX64
63 # ifdef CONFIG_HIGHMEM
70 /* Address space markers hints */
71 static struct addr_marker address_markers
[] = {
74 { 0x8000000000000000UL
, "Kernel Space" },
75 { PAGE_OFFSET
, "Low Kernel Mapping" },
76 { VMALLOC_START
, "vmalloc() Area" },
77 { VMEMMAP_START
, "Vmemmap" },
78 # ifdef CONFIG_X86_ESPFIX64
79 { ESPFIX_BASE_ADDR
, "ESPfix Area", 16 },
82 { EFI_VA_END
, "EFI Runtime Services" },
84 { __START_KERNEL_map
, "High Kernel Mapping" },
85 { MODULES_VADDR
, "Modules" },
86 { MODULES_END
, "End Modules" },
88 { PAGE_OFFSET
, "Kernel Mapping" },
89 { 0/* VMALLOC_START */, "vmalloc() Area" },
90 { 0/*VMALLOC_END*/, "vmalloc() End" },
91 # ifdef CONFIG_HIGHMEM
92 { 0/*PKMAP_BASE*/, "Persistent kmap() Area" },
94 { 0/*FIXADDR_START*/, "Fixmap Area" },
96 { -1, NULL
} /* End of list */
99 /* Multipliers for offsets within the PTEs */
100 #define PTE_LEVEL_MULT (PAGE_SIZE)
101 #define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
102 #define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT)
103 #define PGD_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT)
105 #define pt_dump_seq_printf(m, to_dmesg, fmt, args...) \
108 printk(KERN_INFO fmt, ##args); \
111 seq_printf(m, fmt, ##args); \
114 #define pt_dump_cont_printf(m, to_dmesg, fmt, args...) \
117 printk(KERN_CONT fmt, ##args); \
120 seq_printf(m, fmt, ##args); \
124 * Print a readable form of a pgprot_t to the seq_file
126 static void printk_prot(struct seq_file
*m
, pgprot_t prot
, int level
, bool dmsg
)
128 pgprotval_t pr
= pgprot_val(prot
);
129 static const char * const level_name
[] =
130 { "cr3", "pgd", "pud", "pmd", "pte" };
132 if (!pgprot_val(prot
)) {
134 pt_dump_cont_printf(m
, dmsg
, " ");
137 pt_dump_cont_printf(m
, dmsg
, "USR ");
139 pt_dump_cont_printf(m
, dmsg
, " ");
141 pt_dump_cont_printf(m
, dmsg
, "RW ");
143 pt_dump_cont_printf(m
, dmsg
, "ro ");
145 pt_dump_cont_printf(m
, dmsg
, "PWT ");
147 pt_dump_cont_printf(m
, dmsg
, " ");
149 pt_dump_cont_printf(m
, dmsg
, "PCD ");
151 pt_dump_cont_printf(m
, dmsg
, " ");
153 /* Bit 7 has a different meaning on level 3 vs 4 */
154 if (level
<= 3 && pr
& _PAGE_PSE
)
155 pt_dump_cont_printf(m
, dmsg
, "PSE ");
157 pt_dump_cont_printf(m
, dmsg
, " ");
158 if ((level
== 4 && pr
& _PAGE_PAT
) ||
159 ((level
== 3 || level
== 2) && pr
& _PAGE_PAT_LARGE
))
160 pt_dump_cont_printf(m
, dmsg
, "PAT ");
162 pt_dump_cont_printf(m
, dmsg
, " ");
163 if (pr
& _PAGE_GLOBAL
)
164 pt_dump_cont_printf(m
, dmsg
, "GLB ");
166 pt_dump_cont_printf(m
, dmsg
, " ");
168 pt_dump_cont_printf(m
, dmsg
, "NX ");
170 pt_dump_cont_printf(m
, dmsg
, "x ");
172 pt_dump_cont_printf(m
, dmsg
, "%s\n", level_name
[level
]);
176 * On 64 bits, sign-extend the 48 bit address to 64 bit
178 static unsigned long normalize_addr(unsigned long u
)
181 return (signed long)(u
<< 16) >> 16;
188 * This function gets called on a break in a continuous series
189 * of PTE entries; the next one is different so we need to
190 * print what we collected so far.
192 static void note_page(struct seq_file
*m
, struct pg_state
*st
,
193 pgprot_t new_prot
, int level
)
195 pgprotval_t prot
, cur
;
196 static const char units
[] = "BKMGTPE";
199 * If we have a "break" in the series, we need to flush the state that
200 * we have now. "break" is either changing perms, levels or
201 * address space marker.
203 prot
= pgprot_val(new_prot
);
204 cur
= pgprot_val(st
->current_prot
);
208 st
->current_prot
= new_prot
;
210 st
->marker
= address_markers
;
212 pt_dump_seq_printf(m
, st
->to_dmesg
, "---[ %s ]---\n",
214 } else if (prot
!= cur
|| level
!= st
->level
||
215 st
->current_address
>= st
->marker
[1].start_address
) {
216 const char *unit
= units
;
218 int width
= sizeof(unsigned long) * 2;
219 pgprotval_t pr
= pgprot_val(st
->current_prot
);
221 if (st
->check_wx
&& (pr
& _PAGE_RW
) && !(pr
& _PAGE_NX
)) {
223 "x86/mm: Found insecure W+X mapping at address %p/%pS\n",
224 (void *)st
->start_address
,
225 (void *)st
->start_address
);
226 st
->wx_pages
+= (st
->current_address
-
227 st
->start_address
) / PAGE_SIZE
;
231 * Now print the actual finished series
233 if (!st
->marker
->max_lines
||
234 st
->lines
< st
->marker
->max_lines
) {
235 pt_dump_seq_printf(m
, st
->to_dmesg
,
237 width
, st
->start_address
,
238 width
, st
->current_address
);
240 delta
= st
->current_address
- st
->start_address
;
241 while (!(delta
& 1023) && unit
[1]) {
245 pt_dump_cont_printf(m
, st
->to_dmesg
, "%9lu%c ",
247 printk_prot(m
, st
->current_prot
, st
->level
,
253 * We print markers for special areas of address space,
254 * such as the start of vmalloc space etc.
255 * This helps in the interpretation.
257 if (st
->current_address
>= st
->marker
[1].start_address
) {
258 if (st
->marker
->max_lines
&&
259 st
->lines
> st
->marker
->max_lines
) {
260 unsigned long nskip
=
261 st
->lines
- st
->marker
->max_lines
;
262 pt_dump_seq_printf(m
, st
->to_dmesg
,
263 "... %lu entr%s skipped ... \n",
265 nskip
== 1 ? "y" : "ies");
269 pt_dump_seq_printf(m
, st
->to_dmesg
, "---[ %s ]---\n",
273 st
->start_address
= st
->current_address
;
274 st
->current_prot
= new_prot
;
279 static void walk_pte_level(struct seq_file
*m
, struct pg_state
*st
, pmd_t addr
,
286 start
= (pte_t
*) pmd_page_vaddr(addr
);
287 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
288 prot
= pte_flags(*start
);
289 st
->current_address
= normalize_addr(P
+ i
* PTE_LEVEL_MULT
);
290 note_page(m
, st
, __pgprot(prot
), 4);
297 static void walk_pmd_level(struct seq_file
*m
, struct pg_state
*st
, pud_t addr
,
304 start
= (pmd_t
*) pud_page_vaddr(addr
);
305 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
306 st
->current_address
= normalize_addr(P
+ i
* PMD_LEVEL_MULT
);
307 if (!pmd_none(*start
)) {
308 if (pmd_large(*start
) || !pmd_present(*start
)) {
309 prot
= pmd_flags(*start
);
310 note_page(m
, st
, __pgprot(prot
), 3);
312 walk_pte_level(m
, st
, *start
,
313 P
+ i
* PMD_LEVEL_MULT
);
316 note_page(m
, st
, __pgprot(0), 3);
322 #define walk_pmd_level(m,s,a,p) walk_pte_level(m,s,__pmd(pud_val(a)),p)
323 #define pud_large(a) pmd_large(__pmd(pud_val(a)))
324 #define pud_none(a) pmd_none(__pmd(pud_val(a)))
329 static void walk_pud_level(struct seq_file
*m
, struct pg_state
*st
, pgd_t addr
,
336 start
= (pud_t
*) pgd_page_vaddr(addr
);
338 for (i
= 0; i
< PTRS_PER_PUD
; i
++) {
339 st
->current_address
= normalize_addr(P
+ i
* PUD_LEVEL_MULT
);
340 if (!pud_none(*start
)) {
341 if (pud_large(*start
) || !pud_present(*start
)) {
342 prot
= pud_flags(*start
);
343 note_page(m
, st
, __pgprot(prot
), 2);
345 walk_pmd_level(m
, st
, *start
,
346 P
+ i
* PUD_LEVEL_MULT
);
349 note_page(m
, st
, __pgprot(0), 2);
356 #define walk_pud_level(m,s,a,p) walk_pmd_level(m,s,__pud(pgd_val(a)),p)
357 #define pgd_large(a) pud_large(__pud(pgd_val(a)))
358 #define pgd_none(a) pud_none(__pud(pgd_val(a)))
362 static inline bool is_hypervisor_range(int idx
)
365 * ffff800000000000 - ffff87ffffffffff is reserved for
368 return paravirt_enabled() &&
369 (idx
>= pgd_index(__PAGE_OFFSET
) - 16) &&
370 (idx
< pgd_index(__PAGE_OFFSET
));
373 static inline bool is_hypervisor_range(int idx
) { return false; }
376 static void ptdump_walk_pgd_level_core(struct seq_file
*m
, pgd_t
*pgd
,
380 pgd_t
*start
= (pgd_t
*) &init_level4_pgt
;
382 pgd_t
*start
= swapper_pg_dir
;
386 struct pg_state st
= {};
393 st
.check_wx
= checkwx
;
397 for (i
= 0; i
< PTRS_PER_PGD
; i
++) {
398 st
.current_address
= normalize_addr(i
* PGD_LEVEL_MULT
);
399 if (!pgd_none(*start
) && !is_hypervisor_range(i
)) {
400 if (pgd_large(*start
) || !pgd_present(*start
)) {
401 prot
= pgd_flags(*start
);
402 note_page(m
, &st
, __pgprot(prot
), 1);
404 walk_pud_level(m
, &st
, *start
,
408 note_page(m
, &st
, __pgprot(0), 1);
413 /* Flush out the last page */
414 st
.current_address
= normalize_addr(PTRS_PER_PGD
*PGD_LEVEL_MULT
);
415 note_page(m
, &st
, __pgprot(0), 0);
419 pr_info("x86/mm: Checked W+X mappings: FAILED, %lu W+X pages found.\n",
422 pr_info("x86/mm: Checked W+X mappings: passed, no W+X pages found.\n");
425 void ptdump_walk_pgd_level(struct seq_file
*m
, pgd_t
*pgd
)
427 ptdump_walk_pgd_level_core(m
, pgd
, false);
429 EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level
);
431 void ptdump_walk_pgd_level_checkwx(void)
433 ptdump_walk_pgd_level_core(NULL
, NULL
, true);
436 static int __init
pt_dump_init(void)
439 /* Not a compile-time constant on x86-32 */
440 address_markers
[VMALLOC_START_NR
].start_address
= VMALLOC_START
;
441 address_markers
[VMALLOC_END_NR
].start_address
= VMALLOC_END
;
442 # ifdef CONFIG_HIGHMEM
443 address_markers
[PKMAP_BASE_NR
].start_address
= PKMAP_BASE
;
445 address_markers
[FIXADDR_START_NR
].start_address
= FIXADDR_START
;
451 __initcall(pt_dump_init
);
452 MODULE_LICENSE("GPL");
453 MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
454 MODULE_DESCRIPTION("Kernel debugging helper that dumps pagetables");