drm/panthor: Don't add write fences to the shared BOs
[drm/drm-misc.git] / arch / x86 / mm / pat / cpa-test.c
blob3d2f7f0a6ed1427e163eeccd7da35f79feac3208
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * self test for change_page_attr.
5 * Clears the a test pte bit on random pages in the direct mapping,
6 * then reverts and compares page tables forwards and afterwards.
7 */
8 #include <linux/memblock.h>
9 #include <linux/kthread.h>
10 #include <linux/random.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/mm.h>
14 #include <linux/vmalloc.h>
16 #include <asm/cacheflush.h>
17 #include <asm/kdebug.h>
20 * Only print the results of the first pass:
22 static __read_mostly int print = 1;
24 enum {
25 NTEST = 3 * 100,
26 NPAGES = 100,
27 #ifdef CONFIG_X86_64
28 LPS = (1 << PMD_SHIFT),
29 #elif defined(CONFIG_X86_PAE)
30 LPS = (1 << PMD_SHIFT),
31 #else
32 LPS = (1 << 22),
33 #endif
34 GPS = (1<<30)
37 #define PAGE_CPA_TEST __pgprot(_PAGE_CPA_TEST)
39 static int pte_testbit(pte_t pte)
41 return pte_flags(pte) & _PAGE_SOFTW1;
44 struct split_state {
45 long lpg, gpg, spg, exec;
46 long min_exec, max_exec;
49 static int print_split(struct split_state *s)
51 long i, expected, missed = 0;
52 int err = 0;
54 s->lpg = s->gpg = s->spg = s->exec = 0;
55 s->min_exec = ~0UL;
56 s->max_exec = 0;
57 for (i = 0; i < max_pfn_mapped; ) {
58 unsigned long addr = (unsigned long)__va(i << PAGE_SHIFT);
59 unsigned int level;
60 pte_t *pte;
62 pte = lookup_address(addr, &level);
63 if (!pte) {
64 missed++;
65 i++;
66 continue;
69 if (level == PG_LEVEL_1G && sizeof(long) == 8) {
70 s->gpg++;
71 i += GPS/PAGE_SIZE;
72 } else if (level == PG_LEVEL_2M) {
73 if ((pte_val(*pte) & _PAGE_PRESENT) && !(pte_val(*pte) & _PAGE_PSE)) {
74 printk(KERN_ERR
75 "%lx level %d but not PSE %Lx\n",
76 addr, level, (u64)pte_val(*pte));
77 err = 1;
79 s->lpg++;
80 i += LPS/PAGE_SIZE;
81 } else {
82 s->spg++;
83 i++;
85 if (!(pte_val(*pte) & _PAGE_NX)) {
86 s->exec++;
87 if (addr < s->min_exec)
88 s->min_exec = addr;
89 if (addr > s->max_exec)
90 s->max_exec = addr;
93 if (print) {
94 printk(KERN_INFO
95 " 4k %lu large %lu gb %lu x %lu[%lx-%lx] miss %lu\n",
96 s->spg, s->lpg, s->gpg, s->exec,
97 s->min_exec != ~0UL ? s->min_exec : 0,
98 s->max_exec, missed);
101 expected = (s->gpg*GPS + s->lpg*LPS)/PAGE_SIZE + s->spg + missed;
102 if (expected != i) {
103 printk(KERN_ERR "CPA max_pfn_mapped %lu but expected %lu\n",
104 max_pfn_mapped, expected);
105 return 1;
107 return err;
110 static unsigned long addr[NTEST];
111 static unsigned int len[NTEST];
113 static struct page *pages[NPAGES];
114 static unsigned long addrs[NPAGES];
116 /* Change the global bit on random pages in the direct mapping */
117 static int pageattr_test(void)
119 struct split_state sa, sb, sc;
120 unsigned long *bm;
121 pte_t *pte, pte0;
122 int failed = 0;
123 unsigned int level;
124 int i, k;
125 int err;
127 if (print)
128 printk(KERN_INFO "CPA self-test:\n");
130 bm = vzalloc((max_pfn_mapped + 7) / 8);
131 if (!bm) {
132 printk(KERN_ERR "CPA Cannot vmalloc bitmap\n");
133 return -ENOMEM;
136 failed += print_split(&sa);
138 for (i = 0; i < NTEST; i++) {
139 unsigned long pfn = get_random_u32_below(max_pfn_mapped);
141 addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT);
142 len[i] = get_random_u32_below(NPAGES);
143 len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1);
145 if (len[i] == 0)
146 len[i] = 1;
148 pte = NULL;
149 pte0 = pfn_pte(0, __pgprot(0)); /* shut gcc up */
151 for (k = 0; k < len[i]; k++) {
152 pte = lookup_address(addr[i] + k*PAGE_SIZE, &level);
153 if (!pte || pgprot_val(pte_pgprot(*pte)) == 0 ||
154 !(pte_val(*pte) & _PAGE_PRESENT)) {
155 addr[i] = 0;
156 break;
158 if (k == 0) {
159 pte0 = *pte;
160 } else {
161 if (pgprot_val(pte_pgprot(*pte)) !=
162 pgprot_val(pte_pgprot(pte0))) {
163 len[i] = k;
164 break;
167 if (test_bit(pfn + k, bm)) {
168 len[i] = k;
169 break;
171 __set_bit(pfn + k, bm);
172 addrs[k] = addr[i] + k*PAGE_SIZE;
173 pages[k] = pfn_to_page(pfn + k);
175 if (!addr[i] || !pte || !k) {
176 addr[i] = 0;
177 continue;
180 switch (i % 3) {
181 case 0:
182 err = change_page_attr_set(&addr[i], len[i], PAGE_CPA_TEST, 0);
183 break;
185 case 1:
186 err = change_page_attr_set(addrs, len[1], PAGE_CPA_TEST, 1);
187 break;
189 case 2:
190 err = cpa_set_pages_array(pages, len[i], PAGE_CPA_TEST);
191 break;
195 if (err < 0) {
196 printk(KERN_ERR "CPA %d failed %d\n", i, err);
197 failed++;
200 pte = lookup_address(addr[i], &level);
201 if (!pte || !pte_testbit(*pte) || pte_huge(*pte)) {
202 printk(KERN_ERR "CPA %lx: bad pte %Lx\n", addr[i],
203 pte ? (u64)pte_val(*pte) : 0ULL);
204 failed++;
206 if (level != PG_LEVEL_4K) {
207 printk(KERN_ERR "CPA %lx: unexpected level %d\n",
208 addr[i], level);
209 failed++;
213 vfree(bm);
215 failed += print_split(&sb);
217 for (i = 0; i < NTEST; i++) {
218 if (!addr[i])
219 continue;
220 pte = lookup_address(addr[i], &level);
221 if (!pte) {
222 printk(KERN_ERR "CPA lookup of %lx failed\n", addr[i]);
223 failed++;
224 continue;
226 err = change_page_attr_clear(&addr[i], len[i], PAGE_CPA_TEST, 0);
227 if (err < 0) {
228 printk(KERN_ERR "CPA reverting failed: %d\n", err);
229 failed++;
231 pte = lookup_address(addr[i], &level);
232 if (!pte || pte_testbit(*pte)) {
233 printk(KERN_ERR "CPA %lx: bad pte after revert %Lx\n",
234 addr[i], pte ? (u64)pte_val(*pte) : 0ULL);
235 failed++;
240 failed += print_split(&sc);
242 if (failed) {
243 WARN(1, KERN_ERR "NOT PASSED. Please report.\n");
244 return -EINVAL;
245 } else {
246 if (print)
247 printk(KERN_INFO "ok.\n");
250 return 0;
253 static int do_pageattr_test(void *__unused)
255 while (!kthread_should_stop()) {
256 schedule_timeout_interruptible(HZ*30);
257 if (pageattr_test() < 0)
258 break;
259 if (print)
260 print--;
262 return 0;
265 static int start_pageattr_test(void)
267 struct task_struct *p;
269 p = kthread_create(do_pageattr_test, NULL, "pageattr-test");
270 if (!IS_ERR(p))
271 wake_up_process(p);
272 else
273 WARN_ON(1);
275 return 0;
277 device_initcall(start_pageattr_test);