1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2016, Rashmica Gupta, IBM Corp.
5 * This traverses the kernel virtual memory and dumps the pages that are in
6 * the hash pagetable, along with their flags to
7 * /sys/kernel/debug/kernel_hash_pagetable.
9 * If radix is enabled then there is no hash page table and so no debugfs file
12 #include <linux/debugfs.h>
16 #include <linux/sched.h>
17 #include <linux/seq_file.h>
18 #include <linux/const.h>
20 #include <asm/plpar_wrappers.h>
21 #include <linux/memblock.h>
22 #include <asm/firmware.h>
23 #include <asm/pgalloc.h>
27 const struct addr_marker
*marker
;
28 unsigned long start_address
;
34 unsigned long start_address
;
38 static struct addr_marker address_markers
[] = {
39 { 0, "Start of kernel VM" },
40 { 0, "vmalloc() Area" },
41 { 0, "vmalloc() End" },
42 { 0, "isa I/O start" },
44 { 0, "phb I/O start" },
46 { 0, "I/O remap start" },
47 { 0, "I/O remap end" },
48 { 0, "vmemmap start" },
61 static const struct flag_info v_flag_array
[] = {
64 .val
= SLB_VSID_B_256M
,
66 .clear
= "ssize: 1T ",
68 .mask
= HPTE_V_SECONDARY
,
69 .val
= HPTE_V_SECONDARY
,
78 .mask
= HPTE_V_BOLTED
,
85 static const struct flag_info r_flag_array
[] = {
87 .mask
= HPTE_R_PP0
| HPTE_R_PP
,
91 .mask
= HPTE_R_PP0
| HPTE_R_PP
,
95 .mask
= HPTE_R_PP0
| HPTE_R_PP
,
99 .mask
= HPTE_R_PP0
| HPTE_R_PP
,
103 .mask
= HPTE_R_PP0
| HPTE_R_PP
,
107 .mask
= HPTE_R_KEY_HI
| HPTE_R_KEY_LO
,
108 .val
= HPTE_R_KEY_HI
| HPTE_R_KEY_LO
,
141 static int calculate_pagesize(struct pg_state
*st
, int ps
, char s
[])
143 static const char units
[] = "BKMGTPE";
144 const char *unit
= units
;
146 while (ps
> 9 && unit
[1]) {
150 seq_printf(st
->seq
, " %s_ps: %i%c\t", s
, 1<<ps
, *unit
);
154 static void dump_flag_info(struct pg_state
*st
, const struct flag_info
155 *flag
, u64 pte
, int num
)
159 for (i
= 0; i
< num
; i
++, flag
++) {
160 const char *s
= NULL
;
163 /* flag not defined so don't check it */
166 /* Some 'flags' are actually values */
168 val
= pte
& flag
->val
;
170 val
= val
>> flag
->shift
;
171 seq_printf(st
->seq
, " %s:%llx", flag
->set
, val
);
173 if ((pte
& flag
->mask
) == flag
->val
)
178 seq_printf(st
->seq
, " %s", s
);
183 static void dump_hpte_info(struct pg_state
*st
, unsigned long ea
, u64 v
, u64 r
,
184 unsigned long rpn
, int bps
, int aps
, unsigned long lp
)
188 while (ea
>= st
->marker
[1].start_address
) {
190 seq_printf(st
->seq
, "---[ %s ]---\n", st
->marker
->name
);
192 seq_printf(st
->seq
, "0x%lx:\t", ea
);
193 seq_printf(st
->seq
, "AVPN:%llx\t", HPTE_V_AVPN_VAL(v
));
194 dump_flag_info(st
, v_flag_array
, v
, ARRAY_SIZE(v_flag_array
));
195 seq_printf(st
->seq
, " rpn: %lx\t", rpn
);
196 dump_flag_info(st
, r_flag_array
, r
, ARRAY_SIZE(r_flag_array
));
198 calculate_pagesize(st
, bps
, "base");
199 aps_index
= calculate_pagesize(st
, aps
, "actual");
201 seq_printf(st
->seq
, "LP enc: %lx", lp
);
202 seq_putc(st
->seq
, '\n');
206 static int native_find(unsigned long ea
, int psize
, bool primary
, u64
*v
, u64
209 struct hash_pte
*hptep
;
210 unsigned long hash
, vsid
, vpn
, hpte_group
, want_v
, hpte_v
;
211 int i
, ssize
= mmu_kernel_ssize
;
212 unsigned long shift
= mmu_psize_defs
[psize
].shift
;
215 vsid
= get_kernel_vsid(ea
, ssize
);
216 vpn
= hpt_vpn(ea
, vsid
, ssize
);
217 hash
= hpt_hash(vpn
, shift
, ssize
);
218 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
220 /* to check in the secondary hash table, we invert the hash */
223 hpte_group
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
224 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
225 hptep
= htab_address
+ hpte_group
;
226 hpte_v
= be64_to_cpu(hptep
->v
);
228 if (HPTE_V_COMPARE(hpte_v
, want_v
) && (hpte_v
& HPTE_V_VALID
)) {
230 *v
= be64_to_cpu(hptep
->v
);
231 *r
= be64_to_cpu(hptep
->r
);
239 static int pseries_find(unsigned long ea
, int psize
, bool primary
, u64
*v
, u64
*r
)
241 struct hash_pte ptes
[4];
242 unsigned long vsid
, vpn
, hash
, hpte_group
, want_v
;
243 int i
, j
, ssize
= mmu_kernel_ssize
;
245 unsigned long shift
= mmu_psize_defs
[psize
].shift
;
248 vsid
= get_kernel_vsid(ea
, ssize
);
249 vpn
= hpt_vpn(ea
, vsid
, ssize
);
250 hash
= hpt_hash(vpn
, shift
, ssize
);
251 want_v
= hpte_encode_avpn(vpn
, psize
, ssize
);
253 /* to check in the secondary hash table, we invert the hash */
256 hpte_group
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
257 /* see if we can find an entry in the hpte with this hash */
258 for (i
= 0; i
< HPTES_PER_GROUP
; i
+= 4, hpte_group
+= 4) {
259 lpar_rc
= plpar_pte_read_4(0, hpte_group
, (void *)ptes
);
263 for (j
= 0; j
< 4; j
++) {
264 if (HPTE_V_COMPARE(ptes
[j
].v
, want_v
) &&
265 (ptes
[j
].v
& HPTE_V_VALID
)) {
276 static void decode_r(int bps
, unsigned long r
, unsigned long *rpn
, int *aps
,
277 unsigned long *lp_bits
)
279 struct mmu_psize_def entry
;
280 unsigned long arpn
, mask
, lp
;
281 int penc
= -2, idx
= 0, shift
;
284 * The LP field has 8 bits. Depending on the actual page size, some of
285 * these bits are concatenated with the APRN to get the RPN. The rest
286 * of the bits in the LP field is the LP value and is an encoding for
287 * the base page size and the actual page size.
289 * - find the mmu entry for our base page size
290 * - go through all page encodings and use the associated mask to
291 * find an encoding that matches our encoding in the LP field.
293 arpn
= (r
& HPTE_R_RPN
) >> HPTE_R_RPN_SHIFT
;
296 entry
= mmu_psize_defs
[bps
];
297 while (idx
< MMU_PAGE_COUNT
) {
298 penc
= entry
.penc
[idx
];
299 if ((penc
!= -1) && (mmu_psize_defs
[idx
].shift
)) {
300 shift
= mmu_psize_defs
[idx
].shift
- HPTE_R_RPN_SHIFT
;
301 mask
= (0x1 << (shift
)) - 1;
302 if ((lp
& mask
) == penc
) {
303 *aps
= mmu_psize_to_shift(idx
);
304 *lp_bits
= lp
& mask
;
305 *rpn
= arpn
>> shift
;
313 static int base_hpte_find(unsigned long ea
, int psize
, bool primary
, u64
*v
,
316 if (IS_ENABLED(CONFIG_PPC_PSERIES
) && firmware_has_feature(FW_FEATURE_LPAR
))
317 return pseries_find(ea
, psize
, primary
, v
, r
);
319 return native_find(ea
, psize
, primary
, v
, r
);
322 static unsigned long hpte_find(struct pg_state
*st
, unsigned long ea
, int psize
)
326 unsigned long rpn
, lp_bits
;
327 int base_psize
= 0, actual_psize
= 0;
329 if (ea
< PAGE_OFFSET
)
332 /* Look in primary table */
333 slot
= base_hpte_find(ea
, psize
, true, &v
, &r
);
335 /* Look in secondary table */
337 slot
= base_hpte_find(ea
, psize
, false, &v
, &r
);
344 * We found an entry in the hash page table:
345 * - check that this has the same base page
346 * - find the actual page size
349 base_psize
= mmu_psize_to_shift(psize
);
351 if ((v
& HPTE_V_LARGE
) == HPTE_V_LARGE
) {
352 decode_r(psize
, r
, &rpn
, &actual_psize
, &lp_bits
);
354 /* 4K actual page size */
356 rpn
= (r
& HPTE_R_RPN
) >> HPTE_R_RPN_SHIFT
;
357 /* In this case there are no LP bits */
361 * We didn't find a matching encoding, so the PTE we found isn't for
364 if (actual_psize
== -1)
367 dump_hpte_info(st
, ea
, v
, r
, rpn
, base_psize
, actual_psize
, lp_bits
);
371 static void walk_pte(struct pg_state
*st
, pmd_t
*pmd
, unsigned long start
)
373 pte_t
*pte
= pte_offset_kernel(pmd
, 0);
374 unsigned long addr
, pteval
, psize
;
377 for (i
= 0; i
< PTRS_PER_PTE
; i
++, pte
++) {
378 addr
= start
+ i
* PAGE_SIZE
;
379 pteval
= pte_val(*pte
);
381 if (addr
< VMALLOC_END
)
382 psize
= mmu_vmalloc_psize
;
384 psize
= mmu_io_psize
;
386 /* check for secret 4K mappings */
387 if (IS_ENABLED(CONFIG_PPC_64K_PAGES
) &&
388 ((pteval
& H_PAGE_COMBO
) == H_PAGE_COMBO
||
389 (pteval
& H_PAGE_4K_PFN
) == H_PAGE_4K_PFN
))
390 psize
= mmu_io_psize
;
392 /* check for hashpte */
393 status
= hpte_find(st
, addr
, psize
);
395 if (((pteval
& H_PAGE_HASHPTE
) != H_PAGE_HASHPTE
)
397 /* found a hpte that is not in the linux page tables */
398 seq_printf(st
->seq
, "page probably bolted before linux"
399 " pagetables were set: addr:%lx, pteval:%lx\n",
405 static void walk_pmd(struct pg_state
*st
, pud_t
*pud
, unsigned long start
)
407 pmd_t
*pmd
= pmd_offset(pud
, 0);
411 for (i
= 0; i
< PTRS_PER_PMD
; i
++, pmd
++) {
412 addr
= start
+ i
* PMD_SIZE
;
415 walk_pte(st
, pmd
, addr
);
419 static void walk_pud(struct pg_state
*st
, p4d_t
*p4d
, unsigned long start
)
421 pud_t
*pud
= pud_offset(p4d
, 0);
425 for (i
= 0; i
< PTRS_PER_PUD
; i
++, pud
++) {
426 addr
= start
+ i
* PUD_SIZE
;
429 walk_pmd(st
, pud
, addr
);
433 static void walk_p4d(struct pg_state
*st
, pgd_t
*pgd
, unsigned long start
)
435 p4d_t
*p4d
= p4d_offset(pgd
, 0);
439 for (i
= 0; i
< PTRS_PER_P4D
; i
++, p4d
++) {
440 addr
= start
+ i
* P4D_SIZE
;
443 walk_pud(st
, p4d
, addr
);
447 static void walk_pagetables(struct pg_state
*st
)
449 pgd_t
*pgd
= pgd_offset_k(0UL);
454 * Traverse the linux pagetable structure and dump pages that are in
455 * the hash pagetable.
457 for (i
= 0; i
< PTRS_PER_PGD
; i
++, pgd
++) {
458 addr
= KERN_VIRT_START
+ i
* PGDIR_SIZE
;
461 walk_p4d(st
, pgd
, addr
);
466 static void walk_linearmapping(struct pg_state
*st
)
471 * Traverse the linear mapping section of virtual memory and dump pages
472 * that are in the hash pagetable.
474 unsigned long psize
= 1 << mmu_psize_defs
[mmu_linear_psize
].shift
;
476 for (addr
= PAGE_OFFSET
; addr
< PAGE_OFFSET
+
477 memblock_end_of_DRAM(); addr
+= psize
)
478 hpte_find(st
, addr
, mmu_linear_psize
);
481 static void walk_vmemmap(struct pg_state
*st
)
483 struct vmemmap_backing
*ptr
= vmemmap_list
;
485 if (!IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP
))
488 * Traverse the vmemmaped memory and dump pages that are in the hash
492 hpte_find(st
, ptr
->virt_addr
, mmu_vmemmap_psize
);
495 seq_puts(st
->seq
, "---[ vmemmap end ]---\n");
498 static void populate_markers(void)
500 address_markers
[0].start_address
= PAGE_OFFSET
;
501 address_markers
[1].start_address
= VMALLOC_START
;
502 address_markers
[2].start_address
= VMALLOC_END
;
503 address_markers
[3].start_address
= ISA_IO_BASE
;
504 address_markers
[4].start_address
= ISA_IO_END
;
505 address_markers
[5].start_address
= PHB_IO_BASE
;
506 address_markers
[6].start_address
= PHB_IO_END
;
507 address_markers
[7].start_address
= IOREMAP_BASE
;
508 address_markers
[8].start_address
= IOREMAP_END
;
509 address_markers
[9].start_address
= H_VMEMMAP_START
;
512 static int ptdump_show(struct seq_file
*m
, void *v
)
514 struct pg_state st
= {
516 .start_address
= PAGE_OFFSET
,
517 .marker
= address_markers
,
520 * Traverse the 0xc, 0xd and 0xf areas of the kernel virtual memory and
521 * dump pages that are in the hash pagetable.
523 walk_linearmapping(&st
);
524 walk_pagetables(&st
);
529 static int ptdump_open(struct inode
*inode
, struct file
*file
)
531 return single_open(file
, ptdump_show
, NULL
);
534 static const struct file_operations ptdump_fops
= {
538 .release
= single_release
,
541 static int ptdump_init(void)
543 if (!radix_enabled()) {
545 debugfs_create_file("kernel_hash_pagetable", 0400, NULL
, NULL
,
550 device_initcall(ptdump_init
);