1 #include <linux/seq_file.h>
2 #include <linux/debugfs.h>
3 #include <linux/module.h>
5 #include <asm/sections.h>
6 #include <asm/pgtable.h>
8 static unsigned long max_addr
;
11 unsigned long start_address
;
15 enum address_markers_idx
{
26 static struct addr_marker address_markers
[] = {
27 [IDENTITY_NR
] = {0, "Identity Mapping"},
28 [KERNEL_START_NR
] = {(unsigned long)&_stext
, "Kernel Image Start"},
29 [KERNEL_END_NR
] = {(unsigned long)&_end
, "Kernel Image End"},
30 [VMEMMAP_NR
] = {0, "vmemmap Area"},
31 [VMALLOC_NR
] = {0, "vmalloc Area"},
33 [MODULES_NR
] = {0, "Modules Area"},
40 unsigned int current_prot
;
41 unsigned long start_address
;
42 unsigned long current_address
;
43 const struct addr_marker
*marker
;
46 static void print_prot(struct seq_file
*m
, unsigned int pr
, int level
)
48 static const char * const level_name
[] =
49 { "ASCE", "PGD", "PUD", "PMD", "PTE" };
51 seq_printf(m
, "%s ", level_name
[level
]);
52 if (pr
& _PAGE_INVALID
) {
56 seq_printf(m
, "%s", pr
& _PAGE_PROTECT
? "RO " : "RW ");
57 seq_printf(m
, "%s", pr
& _PAGE_CO
? "CO " : " ");
61 static void note_page(struct seq_file
*m
, struct pg_state
*st
,
62 unsigned int new_prot
, int level
)
64 static const char units
[] = "KMGTPE";
65 int width
= sizeof(unsigned long) * 2;
66 const char *unit
= units
;
67 unsigned int prot
, cur
;
71 * If we have a "break" in the series, we need to flush the state
72 * that we have now. "break" is either changing perms, levels or
73 * address space marker.
76 cur
= st
->current_prot
;
80 st
->current_prot
= new_prot
;
82 st
->marker
= address_markers
;
83 seq_printf(m
, "---[ %s ]---\n", st
->marker
->name
);
84 } else if (prot
!= cur
|| level
!= st
->level
||
85 st
->current_address
>= st
->marker
[1].start_address
) {
86 /* Print the actual finished series */
87 seq_printf(m
, "0x%0*lx-0x%0*lx",
88 width
, st
->start_address
,
89 width
, st
->current_address
);
90 delta
= (st
->current_address
- st
->start_address
) >> 10;
91 while (!(delta
& 0x3ff) && unit
[1]) {
95 seq_printf(m
, "%9lu%c ", delta
, *unit
);
96 print_prot(m
, st
->current_prot
, st
->level
);
97 if (st
->current_address
>= st
->marker
[1].start_address
) {
99 seq_printf(m
, "---[ %s ]---\n", st
->marker
->name
);
101 st
->start_address
= st
->current_address
;
102 st
->current_prot
= new_prot
;
108 * The actual page table walker functions. In order to keep the
109 * implementation of print_prot() short, we only check and pass
110 * _PAGE_INVALID and _PAGE_PROTECT flags to note_page() if a region,
111 * segment or page table entry is invalid or read-only.
112 * After all it's just a hint that the current level being walked
113 * contains an invalid or read-only entry.
115 static void walk_pte_level(struct seq_file
*m
, struct pg_state
*st
,
116 pmd_t
*pmd
, unsigned long addr
)
122 for (i
= 0; i
< PTRS_PER_PTE
&& addr
< max_addr
; i
++) {
123 st
->current_address
= addr
;
124 pte
= pte_offset_kernel(pmd
, addr
);
125 prot
= pte_val(*pte
) & (_PAGE_PROTECT
| _PAGE_INVALID
);
126 note_page(m
, st
, prot
, 4);
132 #define _PMD_PROT_MASK (_SEGMENT_ENTRY_PROTECT | _SEGMENT_ENTRY_CO)
134 #define _PMD_PROT_MASK 0
137 static void walk_pmd_level(struct seq_file
*m
, struct pg_state
*st
,
138 pud_t
*pud
, unsigned long addr
)
144 for (i
= 0; i
< PTRS_PER_PMD
&& addr
< max_addr
; i
++) {
145 st
->current_address
= addr
;
146 pmd
= pmd_offset(pud
, addr
);
147 if (!pmd_none(*pmd
)) {
148 if (pmd_large(*pmd
)) {
149 prot
= pmd_val(*pmd
) & _PMD_PROT_MASK
;
150 note_page(m
, st
, prot
, 3);
152 walk_pte_level(m
, st
, pmd
, addr
);
154 note_page(m
, st
, _PAGE_INVALID
, 3);
160 #define _PUD_PROT_MASK (_REGION3_ENTRY_RO | _REGION3_ENTRY_CO)
162 #define _PUD_PROT_MASK 0
165 static void walk_pud_level(struct seq_file
*m
, struct pg_state
*st
,
166 pgd_t
*pgd
, unsigned long addr
)
172 for (i
= 0; i
< PTRS_PER_PUD
&& addr
< max_addr
; i
++) {
173 st
->current_address
= addr
;
174 pud
= pud_offset(pgd
, addr
);
176 if (pud_large(*pud
)) {
177 prot
= pud_val(*pud
) & _PUD_PROT_MASK
;
178 note_page(m
, st
, prot
, 2);
180 walk_pmd_level(m
, st
, pud
, addr
);
182 note_page(m
, st
, _PAGE_INVALID
, 2);
187 static void walk_pgd_level(struct seq_file
*m
)
189 unsigned long addr
= 0;
194 memset(&st
, 0, sizeof(st
));
195 for (i
= 0; i
< PTRS_PER_PGD
&& addr
< max_addr
; i
++) {
196 st
.current_address
= addr
;
197 pgd
= pgd_offset_k(addr
);
199 walk_pud_level(m
, &st
, pgd
, addr
);
201 note_page(m
, &st
, _PAGE_INVALID
, 1);
204 /* Flush out the last page */
205 st
.current_address
= max_addr
;
206 note_page(m
, &st
, 0, 0);
209 static int ptdump_show(struct seq_file
*m
, void *v
)
215 static int ptdump_open(struct inode
*inode
, struct file
*filp
)
217 return single_open(filp
, ptdump_show
, NULL
);
220 static const struct file_operations ptdump_fops
= {
224 .release
= single_release
,
227 static int pt_dump_init(void)
230 * Figure out the maximum virtual address being accessible with the
231 * kernel ASCE. We need this to keep the page table walker functions
232 * from accessing non-existent entries.
235 max_addr
= 1UL << 31;
237 max_addr
= (S390_lowcore
.kernel_asce
& _REGION_ENTRY_TYPE_MASK
) >> 2;
238 max_addr
= 1UL << (max_addr
* 11 + 31);
239 address_markers
[MODULES_NR
].start_address
= MODULES_VADDR
;
241 address_markers
[VMEMMAP_NR
].start_address
= (unsigned long) vmemmap
;
242 address_markers
[VMALLOC_NR
].start_address
= VMALLOC_START
;
243 debugfs_create_file("kernel_page_tables", 0400, NULL
, NULL
, &ptdump_fops
);
246 device_initcall(pt_dump_init
);