2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include "gt/intel_engine_pm.h"
8 #include "i915_selftest.h"
10 #include "gem/selftests/mock_context.h"
11 #include "selftests/igt_reset.h"
12 #include "selftests/igt_spinner.h"
15 struct drm_i915_mocs_table table
;
16 struct i915_vma
*scratch
;
20 static int request_add_sync(struct i915_request
*rq
, int err
)
24 if (i915_request_wait(rq
, 0, HZ
/ 5) < 0)
31 static int request_add_spin(struct i915_request
*rq
, struct igt_spinner
*spin
)
37 if (spin
&& !igt_wait_for_spinner(spin
, rq
))
44 static struct i915_vma
*create_scratch(struct intel_gt
*gt
)
46 struct drm_i915_gem_object
*obj
;
50 obj
= i915_gem_object_create_internal(gt
->i915
, PAGE_SIZE
);
54 i915_gem_object_set_cache_coherency(obj
, I915_CACHING_CACHED
);
56 vma
= i915_vma_instance(obj
, >
->ggtt
->vm
, NULL
);
58 i915_gem_object_put(obj
);
62 err
= i915_vma_pin(vma
, 0, 0, PIN_GLOBAL
);
64 i915_gem_object_put(obj
);
71 static int live_mocs_init(struct live_mocs
*arg
, struct intel_gt
*gt
)
75 if (!get_mocs_settings(gt
->i915
, &arg
->table
))
78 arg
->scratch
= create_scratch(gt
);
79 if (IS_ERR(arg
->scratch
))
80 return PTR_ERR(arg
->scratch
);
82 arg
->vaddr
= i915_gem_object_pin_map(arg
->scratch
->obj
, I915_MAP_WB
);
83 if (IS_ERR(arg
->vaddr
)) {
84 err
= PTR_ERR(arg
->vaddr
);
91 i915_vma_unpin_and_release(&arg
->scratch
, 0);
95 static void live_mocs_fini(struct live_mocs
*arg
)
97 i915_vma_unpin_and_release(&arg
->scratch
, I915_VMA_RELEASE_MAP
);
100 static int read_regs(struct i915_request
*rq
,
101 u32 addr
, unsigned int count
,
107 GEM_BUG_ON(!IS_ALIGNED(*offset
, sizeof(u32
)));
109 cs
= intel_ring_begin(rq
, 4 * count
);
113 for (i
= 0; i
< count
; i
++) {
114 *cs
++ = MI_STORE_REGISTER_MEM_GEN8
| MI_USE_GGTT
;
120 *offset
+= sizeof(u32
);
123 intel_ring_advance(rq
, cs
);
128 static int read_mocs_table(struct i915_request
*rq
,
129 const struct drm_i915_mocs_table
*table
,
134 if (HAS_GLOBAL_MOCS_REGISTERS(rq
->i915
))
135 addr
= global_mocs_offset();
137 addr
= mocs_offset(rq
->engine
);
139 return read_regs(rq
, addr
, table
->n_entries
, offset
);
142 static int read_l3cc_table(struct i915_request
*rq
,
143 const struct drm_i915_mocs_table
*table
,
146 u32 addr
= i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
148 return read_regs(rq
, addr
, (table
->n_entries
+ 1) / 2, offset
);
151 static int check_mocs_table(struct intel_engine_cs
*engine
,
152 const struct drm_i915_mocs_table
*table
,
158 for_each_mocs(expect
, table
, i
) {
159 if (**vaddr
!= expect
) {
160 pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n",
161 engine
->name
, i
, **vaddr
, expect
);
170 static bool mcr_range(struct drm_i915_private
*i915
, u32 offset
)
173 * Registers in this range are affected by the MCR selector
174 * which only controls CPU initiated MMIO. Routing does not
175 * work for CS access so we cannot verify them on this path.
177 return INTEL_GEN(i915
) >= 8 && offset
>= 0xb000 && offset
<= 0xb4ff;
180 static int check_l3cc_table(struct intel_engine_cs
*engine
,
181 const struct drm_i915_mocs_table
*table
,
184 /* Can we read the MCR range 0xb00 directly? See intel_workarounds! */
185 u32 reg
= i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
189 for_each_l3cc(expect
, table
, i
) {
190 if (!mcr_range(engine
->i915
, reg
) && **vaddr
!= expect
) {
191 pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
192 engine
->name
, i
, **vaddr
, expect
);
202 static int check_mocs_engine(struct live_mocs
*arg
,
203 struct intel_context
*ce
)
205 struct i915_vma
*vma
= arg
->scratch
;
206 struct i915_request
*rq
;
211 memset32(arg
->vaddr
, STACK_MAGIC
, PAGE_SIZE
/ sizeof(u32
));
213 rq
= intel_context_create_request(ce
);
218 err
= i915_request_await_object(rq
, vma
->obj
, true);
220 err
= i915_vma_move_to_active(vma
, rq
, EXEC_OBJECT_WRITE
);
221 i915_vma_unlock(vma
);
223 /* Read the mocs tables back using SRM */
224 offset
= i915_ggtt_offset(vma
);
226 err
= read_mocs_table(rq
, &arg
->table
, &offset
);
227 if (!err
&& ce
->engine
->class == RENDER_CLASS
)
228 err
= read_l3cc_table(rq
, &arg
->table
, &offset
);
229 offset
-= i915_ggtt_offset(vma
);
230 GEM_BUG_ON(offset
> PAGE_SIZE
);
232 err
= request_add_sync(rq
, err
);
236 /* Compare the results against the expected tables */
239 err
= check_mocs_table(ce
->engine
, &arg
->table
, &vaddr
);
240 if (!err
&& ce
->engine
->class == RENDER_CLASS
)
241 err
= check_l3cc_table(ce
->engine
, &arg
->table
, &vaddr
);
245 GEM_BUG_ON(arg
->vaddr
+ offset
!= vaddr
);
249 static int live_mocs_kernel(void *arg
)
251 struct intel_gt
*gt
= arg
;
252 struct intel_engine_cs
*engine
;
253 enum intel_engine_id id
;
254 struct live_mocs mocs
;
257 /* Basic check the system is configured with the expected mocs table */
259 err
= live_mocs_init(&mocs
, gt
);
263 for_each_engine(engine
, gt
, id
) {
264 intel_engine_pm_get(engine
);
265 err
= check_mocs_engine(&mocs
, engine
->kernel_context
);
266 intel_engine_pm_put(engine
);
271 live_mocs_fini(&mocs
);
275 static int live_mocs_clean(void *arg
)
277 struct intel_gt
*gt
= arg
;
278 struct intel_engine_cs
*engine
;
279 enum intel_engine_id id
;
280 struct live_mocs mocs
;
283 /* Every new context should see the same mocs table */
285 err
= live_mocs_init(&mocs
, gt
);
289 for_each_engine(engine
, gt
, id
) {
290 struct intel_context
*ce
;
292 ce
= intel_context_create(engine
);
298 err
= check_mocs_engine(&mocs
, ce
);
299 intel_context_put(ce
);
304 live_mocs_fini(&mocs
);
308 static int active_engine_reset(struct intel_context
*ce
,
311 struct igt_spinner spin
;
312 struct i915_request
*rq
;
315 err
= igt_spinner_init(&spin
, ce
->engine
->gt
);
319 rq
= igt_spinner_create_request(&spin
, ce
, MI_NOOP
);
321 igt_spinner_fini(&spin
);
325 err
= request_add_spin(rq
, &spin
);
327 err
= intel_engine_reset(ce
->engine
, reason
);
329 igt_spinner_end(&spin
);
330 igt_spinner_fini(&spin
);
335 static int __live_mocs_reset(struct live_mocs
*mocs
,
336 struct intel_context
*ce
)
340 err
= intel_engine_reset(ce
->engine
, "mocs");
344 err
= check_mocs_engine(mocs
, ce
);
348 err
= active_engine_reset(ce
, "mocs");
352 err
= check_mocs_engine(mocs
, ce
);
356 intel_gt_reset(ce
->engine
->gt
, ce
->engine
->mask
, "mocs");
358 err
= check_mocs_engine(mocs
, ce
);
365 static int live_mocs_reset(void *arg
)
367 struct intel_gt
*gt
= arg
;
368 struct intel_engine_cs
*engine
;
369 enum intel_engine_id id
;
370 struct live_mocs mocs
;
373 /* Check the mocs setup is retained over per-engine and global resets */
375 if (!intel_has_reset_engine(gt
))
378 err
= live_mocs_init(&mocs
, gt
);
382 igt_global_reset_lock(gt
);
383 for_each_engine(engine
, gt
, id
) {
384 struct intel_context
*ce
;
386 ce
= intel_context_create(engine
);
392 intel_engine_pm_get(engine
);
393 err
= __live_mocs_reset(&mocs
, ce
);
394 intel_engine_pm_put(engine
);
396 intel_context_put(ce
);
400 igt_global_reset_unlock(gt
);
402 live_mocs_fini(&mocs
);
406 int intel_mocs_live_selftests(struct drm_i915_private
*i915
)
408 static const struct i915_subtest tests
[] = {
409 SUBTEST(live_mocs_kernel
),
410 SUBTEST(live_mocs_clean
),
411 SUBTEST(live_mocs_reset
),
413 struct drm_i915_mocs_table table
;
415 if (!get_mocs_settings(i915
, &table
))
418 return intel_gt_live_subtests(tests
, &i915
->gt
);