1 // SPDX-License-Identifier: GPL-2.0-only
3 * This kernel test validates architecture page table helpers and
4 * accessors and helps in verifying their continued compliance with
5 * expected generic MM semantics.
7 * Copyright (C) 2019 ARM Ltd.
9 * Author: Anshuman Khandual <anshuman.khandual@arm.com>
11 #define pr_fmt(fmt) "debug_vm_pgtable: %s: " fmt, __func__
13 #include <linux/gfp.h>
14 #include <linux/highmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/kernel.h>
17 #include <linux/kconfig.h>
19 #include <linux/mman.h>
20 #include <linux/mm_types.h>
21 #include <linux/module.h>
22 #include <linux/pfn_t.h>
23 #include <linux/printk.h>
24 #include <linux/random.h>
25 #include <linux/spinlock.h>
26 #include <linux/swap.h>
27 #include <linux/swapops.h>
28 #include <linux/start_kernel.h>
29 #include <linux/sched/mm.h>
30 #include <asm/pgalloc.h>
32 #define VMFLAGS (VM_READ|VM_WRITE|VM_EXEC)
35 * On s390 platform, the lower 4 bits are used to identify given page table
36 * entry type. But these bits might affect the ability to clear entries with
37 * pxx_clear() because of how dynamic page table folding works on s390. So
38 * while loading up the entries do not change the lower 4 bits. It does not
39 * have affect any other platform.
41 #define S390_MASK_BITS 4
42 #define RANDOM_ORVALUE GENMASK(BITS_PER_LONG - 1, S390_MASK_BITS)
43 #define RANDOM_NZVALUE GENMASK(7, 0)
45 static void __init
pte_basic_tests(unsigned long pfn
, pgprot_t prot
)
47 pte_t pte
= pfn_pte(pfn
, prot
);
49 WARN_ON(!pte_same(pte
, pte
));
50 WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte
))));
51 WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte
))));
52 WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte
))));
53 WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte
))));
54 WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte
))));
55 WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte
))));
58 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
59 static void __init
pmd_basic_tests(unsigned long pfn
, pgprot_t prot
)
61 pmd_t pmd
= pfn_pmd(pfn
, prot
);
63 if (!has_transparent_hugepage())
66 WARN_ON(!pmd_same(pmd
, pmd
));
67 WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd
))));
68 WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd
))));
69 WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd
))));
70 WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd
))));
71 WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd
))));
72 WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd
))));
74 * A huge page does not point to next level page table
75 * entry. Hence this must qualify as pmd_bad().
77 WARN_ON(!pmd_bad(pmd_mkhuge(pmd
)));
80 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
81 static void __init
pud_basic_tests(unsigned long pfn
, pgprot_t prot
)
83 pud_t pud
= pfn_pud(pfn
, prot
);
85 if (!has_transparent_hugepage())
88 WARN_ON(!pud_same(pud
, pud
));
89 WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud
))));
90 WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud
))));
91 WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud
))));
92 WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud
))));
94 if (mm_pmd_folded(mm
))
98 * A huge page does not point to next level page table
99 * entry. Hence this must qualify as pud_bad().
101 WARN_ON(!pud_bad(pud_mkhuge(pud
)));
103 #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
104 static void __init
pud_basic_tests(unsigned long pfn
, pgprot_t prot
) { }
105 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
106 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
107 static void __init
pmd_basic_tests(unsigned long pfn
, pgprot_t prot
) { }
108 static void __init
pud_basic_tests(unsigned long pfn
, pgprot_t prot
) { }
109 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
111 static void __init
p4d_basic_tests(unsigned long pfn
, pgprot_t prot
)
115 memset(&p4d
, RANDOM_NZVALUE
, sizeof(p4d_t
));
116 WARN_ON(!p4d_same(p4d
, p4d
));
119 static void __init
pgd_basic_tests(unsigned long pfn
, pgprot_t prot
)
123 memset(&pgd
, RANDOM_NZVALUE
, sizeof(pgd_t
));
124 WARN_ON(!pgd_same(pgd
, pgd
));
127 #ifndef __PAGETABLE_PUD_FOLDED
128 static void __init
pud_clear_tests(struct mm_struct
*mm
, pud_t
*pudp
)
130 pud_t pud
= READ_ONCE(*pudp
);
132 if (mm_pmd_folded(mm
))
135 pud
= __pud(pud_val(pud
) | RANDOM_ORVALUE
);
136 WRITE_ONCE(*pudp
, pud
);
138 pud
= READ_ONCE(*pudp
);
139 WARN_ON(!pud_none(pud
));
142 static void __init
pud_populate_tests(struct mm_struct
*mm
, pud_t
*pudp
,
147 if (mm_pmd_folded(mm
))
150 * This entry points to next level page table page.
151 * Hence this must not qualify as pud_bad().
155 pud_populate(mm
, pudp
, pmdp
);
156 pud
= READ_ONCE(*pudp
);
157 WARN_ON(pud_bad(pud
));
159 #else /* !__PAGETABLE_PUD_FOLDED */
160 static void __init
pud_clear_tests(struct mm_struct
*mm
, pud_t
*pudp
) { }
161 static void __init
pud_populate_tests(struct mm_struct
*mm
, pud_t
*pudp
,
165 #endif /* PAGETABLE_PUD_FOLDED */
167 #ifndef __PAGETABLE_P4D_FOLDED
168 static void __init
p4d_clear_tests(struct mm_struct
*mm
, p4d_t
*p4dp
)
170 p4d_t p4d
= READ_ONCE(*p4dp
);
172 if (mm_pud_folded(mm
))
175 p4d
= __p4d(p4d_val(p4d
) | RANDOM_ORVALUE
);
176 WRITE_ONCE(*p4dp
, p4d
);
178 p4d
= READ_ONCE(*p4dp
);
179 WARN_ON(!p4d_none(p4d
));
182 static void __init
p4d_populate_tests(struct mm_struct
*mm
, p4d_t
*p4dp
,
187 if (mm_pud_folded(mm
))
191 * This entry points to next level page table page.
192 * Hence this must not qualify as p4d_bad().
196 p4d_populate(mm
, p4dp
, pudp
);
197 p4d
= READ_ONCE(*p4dp
);
198 WARN_ON(p4d_bad(p4d
));
201 static void __init
pgd_clear_tests(struct mm_struct
*mm
, pgd_t
*pgdp
)
203 pgd_t pgd
= READ_ONCE(*pgdp
);
205 if (mm_p4d_folded(mm
))
208 pgd
= __pgd(pgd_val(pgd
) | RANDOM_ORVALUE
);
209 WRITE_ONCE(*pgdp
, pgd
);
211 pgd
= READ_ONCE(*pgdp
);
212 WARN_ON(!pgd_none(pgd
));
215 static void __init
pgd_populate_tests(struct mm_struct
*mm
, pgd_t
*pgdp
,
220 if (mm_p4d_folded(mm
))
224 * This entry points to next level page table page.
225 * Hence this must not qualify as pgd_bad().
229 pgd_populate(mm
, pgdp
, p4dp
);
230 pgd
= READ_ONCE(*pgdp
);
231 WARN_ON(pgd_bad(pgd
));
233 #else /* !__PAGETABLE_P4D_FOLDED */
234 static void __init
p4d_clear_tests(struct mm_struct
*mm
, p4d_t
*p4dp
) { }
235 static void __init
pgd_clear_tests(struct mm_struct
*mm
, pgd_t
*pgdp
) { }
236 static void __init
p4d_populate_tests(struct mm_struct
*mm
, p4d_t
*p4dp
,
240 static void __init
pgd_populate_tests(struct mm_struct
*mm
, pgd_t
*pgdp
,
244 #endif /* PAGETABLE_P4D_FOLDED */
246 static void __init
pte_clear_tests(struct mm_struct
*mm
, pte_t
*ptep
,
249 pte_t pte
= ptep_get(ptep
);
251 pte
= __pte(pte_val(pte
) | RANDOM_ORVALUE
);
252 set_pte_at(mm
, vaddr
, ptep
, pte
);
254 pte_clear(mm
, vaddr
, ptep
);
255 pte
= ptep_get(ptep
);
256 WARN_ON(!pte_none(pte
));
259 static void __init
pmd_clear_tests(struct mm_struct
*mm
, pmd_t
*pmdp
)
261 pmd_t pmd
= READ_ONCE(*pmdp
);
263 pmd
= __pmd(pmd_val(pmd
) | RANDOM_ORVALUE
);
264 WRITE_ONCE(*pmdp
, pmd
);
266 pmd
= READ_ONCE(*pmdp
);
267 WARN_ON(!pmd_none(pmd
));
270 static void __init
pmd_populate_tests(struct mm_struct
*mm
, pmd_t
*pmdp
,
276 * This entry points to next level page table page.
277 * Hence this must not qualify as pmd_bad().
280 pmd_populate(mm
, pmdp
, pgtable
);
281 pmd
= READ_ONCE(*pmdp
);
282 WARN_ON(pmd_bad(pmd
));
285 static unsigned long __init
get_random_vaddr(void)
287 unsigned long random_vaddr
, random_pages
, total_user_pages
;
289 total_user_pages
= (TASK_SIZE
- FIRST_USER_ADDRESS
) / PAGE_SIZE
;
291 random_pages
= get_random_long() % total_user_pages
;
292 random_vaddr
= FIRST_USER_ADDRESS
+ random_pages
* PAGE_SIZE
;
297 static int __init
debug_vm_pgtable(void)
299 struct mm_struct
*mm
;
301 p4d_t
*p4dp
, *saved_p4dp
;
302 pud_t
*pudp
, *saved_pudp
;
303 pmd_t
*pmdp
, *saved_pmdp
, pmd
;
305 pgtable_t saved_ptep
;
308 unsigned long vaddr
, pte_aligned
, pmd_aligned
;
309 unsigned long pud_aligned
, p4d_aligned
, pgd_aligned
;
310 spinlock_t
*uninitialized_var(ptl
);
312 pr_info("Validating architecture page table helpers\n");
313 prot
= vm_get_page_prot(VMFLAGS
);
314 vaddr
= get_random_vaddr();
317 pr_err("mm_struct allocation failed\n");
322 * PFN for mapping at PTE level is determined from a standard kernel
323 * text symbol. But pfns for higher page table levels are derived by
324 * masking lower bits of this real pfn. These derived pfns might not
325 * exist on the platform but that does not really matter as pfn_pxx()
326 * helpers will still create appropriate entries for the test. This
327 * helps avoid large memory block allocations to be used for mapping
328 * at higher page table levels.
330 paddr
= __pa_symbol(&start_kernel
);
332 pte_aligned
= (paddr
& PAGE_MASK
) >> PAGE_SHIFT
;
333 pmd_aligned
= (paddr
& PMD_MASK
) >> PAGE_SHIFT
;
334 pud_aligned
= (paddr
& PUD_MASK
) >> PAGE_SHIFT
;
335 p4d_aligned
= (paddr
& P4D_MASK
) >> PAGE_SHIFT
;
336 pgd_aligned
= (paddr
& PGDIR_MASK
) >> PAGE_SHIFT
;
337 WARN_ON(!pfn_valid(pte_aligned
));
339 pgdp
= pgd_offset(mm
, vaddr
);
340 p4dp
= p4d_alloc(mm
, pgdp
, vaddr
);
341 pudp
= pud_alloc(mm
, p4dp
, vaddr
);
342 pmdp
= pmd_alloc(mm
, pudp
, vaddr
);
343 ptep
= pte_alloc_map_lock(mm
, pmdp
, vaddr
, &ptl
);
346 * Save all the page table page addresses as the page table
347 * entries will be used for testing with random or garbage
348 * values. These saved addresses will be used for freeing
351 pmd
= READ_ONCE(*pmdp
);
352 saved_p4dp
= p4d_offset(pgdp
, 0UL);
353 saved_pudp
= pud_offset(p4dp
, 0UL);
354 saved_pmdp
= pmd_offset(pudp
, 0UL);
355 saved_ptep
= pmd_pgtable(pmd
);
357 pte_basic_tests(pte_aligned
, prot
);
358 pmd_basic_tests(pmd_aligned
, prot
);
359 pud_basic_tests(pud_aligned
, prot
);
360 p4d_basic_tests(p4d_aligned
, prot
);
361 pgd_basic_tests(pgd_aligned
, prot
);
363 pte_clear_tests(mm
, ptep
, vaddr
);
364 pmd_clear_tests(mm
, pmdp
);
365 pud_clear_tests(mm
, pudp
);
366 p4d_clear_tests(mm
, p4dp
);
367 pgd_clear_tests(mm
, pgdp
);
369 pte_unmap_unlock(ptep
, ptl
);
371 pmd_populate_tests(mm
, pmdp
, saved_ptep
);
372 pud_populate_tests(mm
, pudp
, saved_pmdp
);
373 p4d_populate_tests(mm
, p4dp
, saved_pudp
);
374 pgd_populate_tests(mm
, pgdp
, saved_p4dp
);
376 p4d_free(mm
, saved_p4dp
);
377 pud_free(mm
, saved_pudp
);
378 pmd_free(mm
, saved_pmdp
);
379 pte_free(mm
, saved_ptep
);
387 late_initcall(debug_vm_pgtable
);