perf tools: Don't clone maps from parent when synthesizing forks
[linux/fpc-iii.git] / arch / s390 / mm / dump_pagetables.c
blob363f6470d742e5ee3a198aab507362207cc5f16b
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/seq_file.h>
3 #include <linux/debugfs.h>
4 #include <linux/sched.h>
5 #include <linux/mm.h>
6 #include <linux/kasan.h>
7 #include <asm/kasan.h>
8 #include <asm/sections.h>
9 #include <asm/pgtable.h>
11 static unsigned long max_addr;
13 struct addr_marker {
14 unsigned long start_address;
15 const char *name;
18 enum address_markers_idx {
19 IDENTITY_NR = 0,
20 KERNEL_START_NR,
21 KERNEL_END_NR,
22 #ifdef CONFIG_KASAN
23 KASAN_SHADOW_START_NR,
24 KASAN_SHADOW_END_NR,
25 #endif
26 VMEMMAP_NR,
27 VMALLOC_NR,
28 MODULES_NR,
31 static struct addr_marker address_markers[] = {
32 [IDENTITY_NR] = {0, "Identity Mapping"},
33 [KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"},
34 [KERNEL_END_NR] = {(unsigned long)_end, "Kernel Image End"},
35 #ifdef CONFIG_KASAN
36 [KASAN_SHADOW_START_NR] = {KASAN_SHADOW_START, "Kasan Shadow Start"},
37 [KASAN_SHADOW_END_NR] = {KASAN_SHADOW_END, "Kasan Shadow End"},
38 #endif
39 [VMEMMAP_NR] = {0, "vmemmap Area"},
40 [VMALLOC_NR] = {0, "vmalloc Area"},
41 [MODULES_NR] = {0, "Modules Area"},
42 { -1, NULL }
45 struct pg_state {
46 int level;
47 unsigned int current_prot;
48 unsigned long start_address;
49 unsigned long current_address;
50 const struct addr_marker *marker;
53 static void print_prot(struct seq_file *m, unsigned int pr, int level)
55 static const char * const level_name[] =
56 { "ASCE", "PGD", "PUD", "PMD", "PTE" };
58 seq_printf(m, "%s ", level_name[level]);
59 if (pr & _PAGE_INVALID) {
60 seq_printf(m, "I\n");
61 return;
63 seq_puts(m, (pr & _PAGE_PROTECT) ? "RO " : "RW ");
64 seq_puts(m, (pr & _PAGE_NOEXEC) ? "NX\n" : "X\n");
67 static void note_page(struct seq_file *m, struct pg_state *st,
68 unsigned int new_prot, int level)
70 static const char units[] = "KMGTPE";
71 int width = sizeof(unsigned long) * 2;
72 const char *unit = units;
73 unsigned int prot, cur;
74 unsigned long delta;
77 * If we have a "break" in the series, we need to flush the state
78 * that we have now. "break" is either changing perms, levels or
79 * address space marker.
81 prot = new_prot;
82 cur = st->current_prot;
84 if (!st->level) {
85 /* First entry */
86 st->current_prot = new_prot;
87 st->level = level;
88 st->marker = address_markers;
89 seq_printf(m, "---[ %s ]---\n", st->marker->name);
90 } else if (prot != cur || level != st->level ||
91 st->current_address >= st->marker[1].start_address) {
92 /* Print the actual finished series */
93 seq_printf(m, "0x%0*lx-0x%0*lx ",
94 width, st->start_address,
95 width, st->current_address);
96 delta = (st->current_address - st->start_address) >> 10;
97 while (!(delta & 0x3ff) && unit[1]) {
98 delta >>= 10;
99 unit++;
101 seq_printf(m, "%9lu%c ", delta, *unit);
102 print_prot(m, st->current_prot, st->level);
103 while (st->current_address >= st->marker[1].start_address) {
104 st->marker++;
105 seq_printf(m, "---[ %s ]---\n", st->marker->name);
107 st->start_address = st->current_address;
108 st->current_prot = new_prot;
109 st->level = level;
113 #ifdef CONFIG_KASAN
114 static void note_kasan_zero_page(struct seq_file *m, struct pg_state *st)
116 unsigned int prot;
118 prot = pte_val(*kasan_zero_pte) &
119 (_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC);
120 note_page(m, st, prot, 4);
122 #endif
125 * The actual page table walker functions. In order to keep the
126 * implementation of print_prot() short, we only check and pass
127 * _PAGE_INVALID and _PAGE_PROTECT flags to note_page() if a region,
128 * segment or page table entry is invalid or read-only.
129 * After all it's just a hint that the current level being walked
130 * contains an invalid or read-only entry.
132 static void walk_pte_level(struct seq_file *m, struct pg_state *st,
133 pmd_t *pmd, unsigned long addr)
135 unsigned int prot;
136 pte_t *pte;
137 int i;
139 for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) {
140 st->current_address = addr;
141 pte = pte_offset_kernel(pmd, addr);
142 prot = pte_val(*pte) &
143 (_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC);
144 note_page(m, st, prot, 4);
145 addr += PAGE_SIZE;
149 static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
150 pud_t *pud, unsigned long addr)
152 unsigned int prot;
153 pmd_t *pmd;
154 int i;
156 #ifdef CONFIG_KASAN
157 if ((pud_val(*pud) & PAGE_MASK) == __pa(kasan_zero_pmd)) {
158 note_kasan_zero_page(m, st);
159 return;
161 #endif
163 for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++) {
164 st->current_address = addr;
165 pmd = pmd_offset(pud, addr);
166 if (!pmd_none(*pmd)) {
167 if (pmd_large(*pmd)) {
168 prot = pmd_val(*pmd) &
169 (_SEGMENT_ENTRY_PROTECT |
170 _SEGMENT_ENTRY_NOEXEC);
171 note_page(m, st, prot, 3);
172 } else
173 walk_pte_level(m, st, pmd, addr);
174 } else
175 note_page(m, st, _PAGE_INVALID, 3);
176 addr += PMD_SIZE;
180 static void walk_pud_level(struct seq_file *m, struct pg_state *st,
181 p4d_t *p4d, unsigned long addr)
183 unsigned int prot;
184 pud_t *pud;
185 int i;
187 #ifdef CONFIG_KASAN
188 if ((p4d_val(*p4d) & PAGE_MASK) == __pa(kasan_zero_pud)) {
189 note_kasan_zero_page(m, st);
190 return;
192 #endif
194 for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) {
195 st->current_address = addr;
196 pud = pud_offset(p4d, addr);
197 if (!pud_none(*pud))
198 if (pud_large(*pud)) {
199 prot = pud_val(*pud) &
200 (_REGION_ENTRY_PROTECT |
201 _REGION_ENTRY_NOEXEC);
202 note_page(m, st, prot, 2);
203 } else
204 walk_pmd_level(m, st, pud, addr);
205 else
206 note_page(m, st, _PAGE_INVALID, 2);
207 addr += PUD_SIZE;
211 static void walk_p4d_level(struct seq_file *m, struct pg_state *st,
212 pgd_t *pgd, unsigned long addr)
214 p4d_t *p4d;
215 int i;
217 #ifdef CONFIG_KASAN
218 if ((pgd_val(*pgd) & PAGE_MASK) == __pa(kasan_zero_p4d)) {
219 note_kasan_zero_page(m, st);
220 return;
222 #endif
224 for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++) {
225 st->current_address = addr;
226 p4d = p4d_offset(pgd, addr);
227 if (!p4d_none(*p4d))
228 walk_pud_level(m, st, p4d, addr);
229 else
230 note_page(m, st, _PAGE_INVALID, 2);
231 addr += P4D_SIZE;
235 static void walk_pgd_level(struct seq_file *m)
237 unsigned long addr = 0;
238 struct pg_state st;
239 pgd_t *pgd;
240 int i;
242 memset(&st, 0, sizeof(st));
243 for (i = 0; i < PTRS_PER_PGD && addr < max_addr; i++) {
244 st.current_address = addr;
245 pgd = pgd_offset_k(addr);
246 if (!pgd_none(*pgd))
247 walk_p4d_level(m, &st, pgd, addr);
248 else
249 note_page(m, &st, _PAGE_INVALID, 1);
250 addr += PGDIR_SIZE;
251 cond_resched();
253 /* Flush out the last page */
254 st.current_address = max_addr;
255 note_page(m, &st, 0, 0);
258 static int ptdump_show(struct seq_file *m, void *v)
260 walk_pgd_level(m);
261 return 0;
264 static int ptdump_open(struct inode *inode, struct file *filp)
266 return single_open(filp, ptdump_show, NULL);
269 static const struct file_operations ptdump_fops = {
270 .open = ptdump_open,
271 .read = seq_read,
272 .llseek = seq_lseek,
273 .release = single_release,
276 static int pt_dump_init(void)
279 * Figure out the maximum virtual address being accessible with the
280 * kernel ASCE. We need this to keep the page table walker functions
281 * from accessing non-existent entries.
283 max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
284 max_addr = 1UL << (max_addr * 11 + 31);
285 address_markers[MODULES_NR].start_address = MODULES_VADDR;
286 address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
287 address_markers[VMALLOC_NR].start_address = VMALLOC_START;
288 debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
289 return 0;
291 device_initcall(pt_dump_init);