2 * Debug helper to dump the current kernel pagetables of the system
3 * so that we can see what the various memory ranges are set to.
5 * (C) Copyright 2008 Intel Corporation
7 * Author: Arjan van de Ven <arjan@linux.intel.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
15 #include <linux/debugfs.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
20 #include <asm/pgtable.h>
23 * The dumper groups pagetable entries of the same type into one, and for
24 * that it needs to keep some state when walking, and flush this state
25 * when a "break" in the continuity is found.
29 pgprot_t current_prot
;
30 unsigned long start_address
;
31 unsigned long current_address
;
32 const struct addr_marker
*marker
;
38 unsigned long start_address
;
40 unsigned long max_lines
;
43 /* indices for address_markers; keep sync'd w/ address_markers below */
44 enum address_markers_idx
{
51 # ifdef CONFIG_X86_ESPFIX64
61 # ifdef CONFIG_HIGHMEM
68 /* Address space markers hints */
69 static struct addr_marker address_markers
[] = {
72 { 0x8000000000000000UL
, "Kernel Space" },
73 { PAGE_OFFSET
, "Low Kernel Mapping" },
74 { VMALLOC_START
, "vmalloc() Area" },
75 { VMEMMAP_START
, "Vmemmap" },
76 # ifdef CONFIG_X86_ESPFIX64
77 { ESPFIX_BASE_ADDR
, "ESPfix Area", 16 },
80 { EFI_VA_END
, "EFI Runtime Services" },
82 { __START_KERNEL_map
, "High Kernel Mapping" },
83 { MODULES_VADDR
, "Modules" },
84 { MODULES_END
, "End Modules" },
86 { PAGE_OFFSET
, "Kernel Mapping" },
87 { 0/* VMALLOC_START */, "vmalloc() Area" },
88 { 0/*VMALLOC_END*/, "vmalloc() End" },
89 # ifdef CONFIG_HIGHMEM
90 { 0/*PKMAP_BASE*/, "Persisent kmap() Area" },
92 { 0/*FIXADDR_START*/, "Fixmap Area" },
94 { -1, NULL
} /* End of list */
97 /* Multipliers for offsets within the PTEs */
98 #define PTE_LEVEL_MULT (PAGE_SIZE)
99 #define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
100 #define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT)
101 #define PGD_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT)
103 #define pt_dump_seq_printf(m, to_dmesg, fmt, args...) \
106 printk(KERN_INFO fmt, ##args); \
109 seq_printf(m, fmt, ##args); \
112 #define pt_dump_cont_printf(m, to_dmesg, fmt, args...) \
115 printk(KERN_CONT fmt, ##args); \
118 seq_printf(m, fmt, ##args); \
122 * Print a readable form of a pgprot_t to the seq_file
124 static void printk_prot(struct seq_file
*m
, pgprot_t prot
, int level
, bool dmsg
)
126 pgprotval_t pr
= pgprot_val(prot
);
127 static const char * const level_name
[] =
128 { "cr3", "pgd", "pud", "pmd", "pte" };
130 if (!pgprot_val(prot
)) {
132 pt_dump_cont_printf(m
, dmsg
, " ");
135 pt_dump_cont_printf(m
, dmsg
, "USR ");
137 pt_dump_cont_printf(m
, dmsg
, " ");
139 pt_dump_cont_printf(m
, dmsg
, "RW ");
141 pt_dump_cont_printf(m
, dmsg
, "ro ");
143 pt_dump_cont_printf(m
, dmsg
, "PWT ");
145 pt_dump_cont_printf(m
, dmsg
, " ");
147 pt_dump_cont_printf(m
, dmsg
, "PCD ");
149 pt_dump_cont_printf(m
, dmsg
, " ");
151 /* Bit 7 has a different meaning on level 3 vs 4 */
152 if (level
<= 3 && pr
& _PAGE_PSE
)
153 pt_dump_cont_printf(m
, dmsg
, "PSE ");
155 pt_dump_cont_printf(m
, dmsg
, " ");
156 if ((level
== 4 && pr
& _PAGE_PAT
) ||
157 ((level
== 3 || level
== 2) && pr
& _PAGE_PAT_LARGE
))
158 pt_dump_cont_printf(m
, dmsg
, "pat ");
160 pt_dump_cont_printf(m
, dmsg
, " ");
161 if (pr
& _PAGE_GLOBAL
)
162 pt_dump_cont_printf(m
, dmsg
, "GLB ");
164 pt_dump_cont_printf(m
, dmsg
, " ");
166 pt_dump_cont_printf(m
, dmsg
, "NX ");
168 pt_dump_cont_printf(m
, dmsg
, "x ");
170 pt_dump_cont_printf(m
, dmsg
, "%s\n", level_name
[level
]);
174 * On 64 bits, sign-extend the 48 bit address to 64 bit
176 static unsigned long normalize_addr(unsigned long u
)
179 return (signed long)(u
<< 16) >> 16;
186 * This function gets called on a break in a continuous series
187 * of PTE entries; the next one is different so we need to
188 * print what we collected so far.
190 static void note_page(struct seq_file
*m
, struct pg_state
*st
,
191 pgprot_t new_prot
, int level
)
193 pgprotval_t prot
, cur
;
194 static const char units
[] = "BKMGTPE";
197 * If we have a "break" in the series, we need to flush the state that
198 * we have now. "break" is either changing perms, levels or
199 * address space marker.
201 prot
= pgprot_val(new_prot
) & PTE_FLAGS_MASK
;
202 cur
= pgprot_val(st
->current_prot
) & PTE_FLAGS_MASK
;
206 st
->current_prot
= new_prot
;
208 st
->marker
= address_markers
;
210 pt_dump_seq_printf(m
, st
->to_dmesg
, "---[ %s ]---\n",
212 } else if (prot
!= cur
|| level
!= st
->level
||
213 st
->current_address
>= st
->marker
[1].start_address
) {
214 const char *unit
= units
;
216 int width
= sizeof(unsigned long) * 2;
219 * Now print the actual finished series
221 if (!st
->marker
->max_lines
||
222 st
->lines
< st
->marker
->max_lines
) {
223 pt_dump_seq_printf(m
, st
->to_dmesg
,
225 width
, st
->start_address
,
226 width
, st
->current_address
);
228 delta
= st
->current_address
- st
->start_address
;
229 while (!(delta
& 1023) && unit
[1]) {
233 pt_dump_cont_printf(m
, st
->to_dmesg
, "%9lu%c ",
235 printk_prot(m
, st
->current_prot
, st
->level
,
241 * We print markers for special areas of address space,
242 * such as the start of vmalloc space etc.
243 * This helps in the interpretation.
245 if (st
->current_address
>= st
->marker
[1].start_address
) {
246 if (st
->marker
->max_lines
&&
247 st
->lines
> st
->marker
->max_lines
) {
248 unsigned long nskip
=
249 st
->lines
- st
->marker
->max_lines
;
250 pt_dump_seq_printf(m
, st
->to_dmesg
,
251 "... %lu entr%s skipped ... \n",
253 nskip
== 1 ? "y" : "ies");
257 pt_dump_seq_printf(m
, st
->to_dmesg
, "---[ %s ]---\n",
261 st
->start_address
= st
->current_address
;
262 st
->current_prot
= new_prot
;
267 static void walk_pte_level(struct seq_file
*m
, struct pg_state
*st
, pmd_t addr
,
273 start
= (pte_t
*) pmd_page_vaddr(addr
);
274 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
275 pgprot_t prot
= pte_pgprot(*start
);
277 st
->current_address
= normalize_addr(P
+ i
* PTE_LEVEL_MULT
);
278 note_page(m
, st
, prot
, 4);
285 static void walk_pmd_level(struct seq_file
*m
, struct pg_state
*st
, pud_t addr
,
291 start
= (pmd_t
*) pud_page_vaddr(addr
);
292 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
293 st
->current_address
= normalize_addr(P
+ i
* PMD_LEVEL_MULT
);
294 if (!pmd_none(*start
)) {
295 pgprotval_t prot
= pmd_val(*start
) & PTE_FLAGS_MASK
;
297 if (pmd_large(*start
) || !pmd_present(*start
))
298 note_page(m
, st
, __pgprot(prot
), 3);
300 walk_pte_level(m
, st
, *start
,
301 P
+ i
* PMD_LEVEL_MULT
);
303 note_page(m
, st
, __pgprot(0), 3);
309 #define walk_pmd_level(m,s,a,p) walk_pte_level(m,s,__pmd(pud_val(a)),p)
310 #define pud_large(a) pmd_large(__pmd(pud_val(a)))
311 #define pud_none(a) pmd_none(__pmd(pud_val(a)))
316 static void walk_pud_level(struct seq_file
*m
, struct pg_state
*st
, pgd_t addr
,
322 start
= (pud_t
*) pgd_page_vaddr(addr
);
324 for (i
= 0; i
< PTRS_PER_PUD
; i
++) {
325 st
->current_address
= normalize_addr(P
+ i
* PUD_LEVEL_MULT
);
326 if (!pud_none(*start
)) {
327 pgprotval_t prot
= pud_val(*start
) & PTE_FLAGS_MASK
;
329 if (pud_large(*start
) || !pud_present(*start
))
330 note_page(m
, st
, __pgprot(prot
), 2);
332 walk_pmd_level(m
, st
, *start
,
333 P
+ i
* PUD_LEVEL_MULT
);
335 note_page(m
, st
, __pgprot(0), 2);
342 #define walk_pud_level(m,s,a,p) walk_pmd_level(m,s,__pud(pgd_val(a)),p)
343 #define pgd_large(a) pud_large(__pud(pgd_val(a)))
344 #define pgd_none(a) pud_none(__pud(pgd_val(a)))
347 void ptdump_walk_pgd_level(struct seq_file
*m
, pgd_t
*pgd
)
350 pgd_t
*start
= (pgd_t
*) &init_level4_pgt
;
352 pgd_t
*start
= swapper_pg_dir
;
355 struct pg_state st
= {};
362 for (i
= 0; i
< PTRS_PER_PGD
; i
++) {
363 st
.current_address
= normalize_addr(i
* PGD_LEVEL_MULT
);
364 if (!pgd_none(*start
)) {
365 pgprotval_t prot
= pgd_val(*start
) & PTE_FLAGS_MASK
;
367 if (pgd_large(*start
) || !pgd_present(*start
))
368 note_page(m
, &st
, __pgprot(prot
), 1);
370 walk_pud_level(m
, &st
, *start
,
373 note_page(m
, &st
, __pgprot(0), 1);
378 /* Flush out the last page */
379 st
.current_address
= normalize_addr(PTRS_PER_PGD
*PGD_LEVEL_MULT
);
380 note_page(m
, &st
, __pgprot(0), 0);
383 static int ptdump_show(struct seq_file
*m
, void *v
)
385 ptdump_walk_pgd_level(m
, NULL
);
389 static int ptdump_open(struct inode
*inode
, struct file
*filp
)
391 return single_open(filp
, ptdump_show
, NULL
);
394 static const struct file_operations ptdump_fops
= {
398 .release
= single_release
,
401 static int pt_dump_init(void)
406 /* Not a compile-time constant on x86-32 */
407 address_markers
[VMALLOC_START_NR
].start_address
= VMALLOC_START
;
408 address_markers
[VMALLOC_END_NR
].start_address
= VMALLOC_END
;
409 # ifdef CONFIG_HIGHMEM
410 address_markers
[PKMAP_BASE_NR
].start_address
= PKMAP_BASE
;
412 address_markers
[FIXADDR_START_NR
].start_address
= FIXADDR_START
;
415 pe
= debugfs_create_file("kernel_page_tables", 0600, NULL
, NULL
,
423 __initcall(pt_dump_init
);
424 MODULE_LICENSE("GPL");
425 MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
426 MODULE_DESCRIPTION("Kernel debugging helper that dumps pagetables");