treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / s390 / mm / dump_pagetables.c
blob5d67b81c704a49214b500c3fce7af3f728860353
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/seq_file.h>
3 #include <linux/debugfs.h>
4 #include <linux/sched.h>
5 #include <linux/mm.h>
6 #include <linux/kasan.h>
7 #include <asm/kasan.h>
8 #include <asm/sections.h>
9 #include <asm/pgtable.h>
11 static unsigned long max_addr;
13 struct addr_marker {
14 unsigned long start_address;
15 const char *name;
18 enum address_markers_idx {
19 IDENTITY_NR = 0,
20 KERNEL_START_NR,
21 KERNEL_END_NR,
22 #ifdef CONFIG_KASAN
23 KASAN_SHADOW_START_NR,
24 KASAN_SHADOW_END_NR,
25 #endif
26 VMEMMAP_NR,
27 VMALLOC_NR,
28 MODULES_NR,
31 static struct addr_marker address_markers[] = {
32 [IDENTITY_NR] = {0, "Identity Mapping"},
33 [KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"},
34 [KERNEL_END_NR] = {(unsigned long)_end, "Kernel Image End"},
35 #ifdef CONFIG_KASAN
36 [KASAN_SHADOW_START_NR] = {KASAN_SHADOW_START, "Kasan Shadow Start"},
37 [KASAN_SHADOW_END_NR] = {KASAN_SHADOW_END, "Kasan Shadow End"},
38 #endif
39 [VMEMMAP_NR] = {0, "vmemmap Area"},
40 [VMALLOC_NR] = {0, "vmalloc Area"},
41 [MODULES_NR] = {0, "Modules Area"},
42 { -1, NULL }
45 struct pg_state {
46 int level;
47 unsigned int current_prot;
48 unsigned long start_address;
49 unsigned long current_address;
50 const struct addr_marker *marker;
53 static void print_prot(struct seq_file *m, unsigned int pr, int level)
55 static const char * const level_name[] =
56 { "ASCE", "PGD", "PUD", "PMD", "PTE" };
58 seq_printf(m, "%s ", level_name[level]);
59 if (pr & _PAGE_INVALID) {
60 seq_printf(m, "I\n");
61 return;
63 seq_puts(m, (pr & _PAGE_PROTECT) ? "RO " : "RW ");
64 seq_puts(m, (pr & _PAGE_NOEXEC) ? "NX\n" : "X\n");
67 static void note_page(struct seq_file *m, struct pg_state *st,
68 unsigned int new_prot, int level)
70 static const char units[] = "KMGTPE";
71 int width = sizeof(unsigned long) * 2;
72 const char *unit = units;
73 unsigned int prot, cur;
74 unsigned long delta;
77 * If we have a "break" in the series, we need to flush the state
78 * that we have now. "break" is either changing perms, levels or
79 * address space marker.
81 prot = new_prot;
82 cur = st->current_prot;
84 if (!st->level) {
85 /* First entry */
86 st->current_prot = new_prot;
87 st->level = level;
88 st->marker = address_markers;
89 seq_printf(m, "---[ %s ]---\n", st->marker->name);
90 } else if (prot != cur || level != st->level ||
91 st->current_address >= st->marker[1].start_address) {
92 /* Print the actual finished series */
93 seq_printf(m, "0x%0*lx-0x%0*lx ",
94 width, st->start_address,
95 width, st->current_address);
96 delta = (st->current_address - st->start_address) >> 10;
97 while (!(delta & 0x3ff) && unit[1]) {
98 delta >>= 10;
99 unit++;
101 seq_printf(m, "%9lu%c ", delta, *unit);
102 print_prot(m, st->current_prot, st->level);
103 while (st->current_address >= st->marker[1].start_address) {
104 st->marker++;
105 seq_printf(m, "---[ %s ]---\n", st->marker->name);
107 st->start_address = st->current_address;
108 st->current_prot = new_prot;
109 st->level = level;
113 #ifdef CONFIG_KASAN
114 static void note_kasan_early_shadow_page(struct seq_file *m,
115 struct pg_state *st)
117 unsigned int prot;
119 prot = pte_val(*kasan_early_shadow_pte) &
120 (_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC);
121 note_page(m, st, prot, 4);
123 #endif
126 * The actual page table walker functions. In order to keep the
127 * implementation of print_prot() short, we only check and pass
128 * _PAGE_INVALID and _PAGE_PROTECT flags to note_page() if a region,
129 * segment or page table entry is invalid or read-only.
130 * After all it's just a hint that the current level being walked
131 * contains an invalid or read-only entry.
133 static void walk_pte_level(struct seq_file *m, struct pg_state *st,
134 pmd_t *pmd, unsigned long addr)
136 unsigned int prot;
137 pte_t *pte;
138 int i;
140 for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) {
141 st->current_address = addr;
142 pte = pte_offset_kernel(pmd, addr);
143 prot = pte_val(*pte) &
144 (_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC);
145 note_page(m, st, prot, 4);
146 addr += PAGE_SIZE;
150 static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
151 pud_t *pud, unsigned long addr)
153 unsigned int prot;
154 pmd_t *pmd;
155 int i;
157 #ifdef CONFIG_KASAN
158 if ((pud_val(*pud) & PAGE_MASK) == __pa(kasan_early_shadow_pmd)) {
159 note_kasan_early_shadow_page(m, st);
160 return;
162 #endif
164 pmd = pmd_offset(pud, addr);
165 for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++, pmd++) {
166 st->current_address = addr;
167 if (!pmd_none(*pmd)) {
168 if (pmd_large(*pmd)) {
169 prot = pmd_val(*pmd) &
170 (_SEGMENT_ENTRY_PROTECT |
171 _SEGMENT_ENTRY_NOEXEC);
172 note_page(m, st, prot, 3);
173 } else
174 walk_pte_level(m, st, pmd, addr);
175 } else
176 note_page(m, st, _PAGE_INVALID, 3);
177 addr += PMD_SIZE;
181 static void walk_pud_level(struct seq_file *m, struct pg_state *st,
182 p4d_t *p4d, unsigned long addr)
184 unsigned int prot;
185 pud_t *pud;
186 int i;
188 #ifdef CONFIG_KASAN
189 if ((p4d_val(*p4d) & PAGE_MASK) == __pa(kasan_early_shadow_pud)) {
190 note_kasan_early_shadow_page(m, st);
191 return;
193 #endif
195 pud = pud_offset(p4d, addr);
196 for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++, pud++) {
197 st->current_address = addr;
198 if (!pud_none(*pud))
199 if (pud_large(*pud)) {
200 prot = pud_val(*pud) &
201 (_REGION_ENTRY_PROTECT |
202 _REGION_ENTRY_NOEXEC);
203 note_page(m, st, prot, 2);
204 } else
205 walk_pmd_level(m, st, pud, addr);
206 else
207 note_page(m, st, _PAGE_INVALID, 2);
208 addr += PUD_SIZE;
212 static void walk_p4d_level(struct seq_file *m, struct pg_state *st,
213 pgd_t *pgd, unsigned long addr)
215 p4d_t *p4d;
216 int i;
218 #ifdef CONFIG_KASAN
219 if ((pgd_val(*pgd) & PAGE_MASK) == __pa(kasan_early_shadow_p4d)) {
220 note_kasan_early_shadow_page(m, st);
221 return;
223 #endif
225 p4d = p4d_offset(pgd, addr);
226 for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++, p4d++) {
227 st->current_address = addr;
228 if (!p4d_none(*p4d))
229 walk_pud_level(m, st, p4d, addr);
230 else
231 note_page(m, st, _PAGE_INVALID, 2);
232 addr += P4D_SIZE;
236 static void walk_pgd_level(struct seq_file *m)
238 unsigned long addr = 0;
239 struct pg_state st;
240 pgd_t *pgd;
241 int i;
243 memset(&st, 0, sizeof(st));
244 for (i = 0; i < PTRS_PER_PGD && addr < max_addr; i++) {
245 st.current_address = addr;
246 pgd = pgd_offset_k(addr);
247 if (!pgd_none(*pgd))
248 walk_p4d_level(m, &st, pgd, addr);
249 else
250 note_page(m, &st, _PAGE_INVALID, 1);
251 addr += PGDIR_SIZE;
252 cond_resched();
254 /* Flush out the last page */
255 st.current_address = max_addr;
256 note_page(m, &st, 0, 0);
259 static int ptdump_show(struct seq_file *m, void *v)
261 walk_pgd_level(m);
262 return 0;
265 static int ptdump_open(struct inode *inode, struct file *filp)
267 return single_open(filp, ptdump_show, NULL);
270 static const struct file_operations ptdump_fops = {
271 .open = ptdump_open,
272 .read = seq_read,
273 .llseek = seq_lseek,
274 .release = single_release,
277 static int pt_dump_init(void)
280 * Figure out the maximum virtual address being accessible with the
281 * kernel ASCE. We need this to keep the page table walker functions
282 * from accessing non-existent entries.
284 max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
285 max_addr = 1UL << (max_addr * 11 + 31);
286 address_markers[MODULES_NR].start_address = MODULES_VADDR;
287 address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
288 address_markers[VMALLOC_NR].start_address = VMALLOC_START;
289 debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
290 return 0;
292 device_initcall(pt_dump_init);