2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include "gt/intel_engine_pm.h"
8 #include "i915_selftest.h"
10 #include "gem/selftests/mock_context.h"
11 #include "selftests/igt_reset.h"
12 #include "selftests/igt_spinner.h"
15 struct drm_i915_mocs_table mocs
;
16 struct drm_i915_mocs_table l3cc
;
17 struct i915_vma
*scratch
;
21 static struct intel_context
*mocs_context_create(struct intel_engine_cs
*engine
)
23 struct intel_context
*ce
;
25 ce
= intel_context_create(engine
);
29 /* We build large requests to read the registers from the ring */
30 ce
->ring
= __intel_context_ring_size(SZ_16K
);
35 static int request_add_sync(struct i915_request
*rq
, int err
)
39 if (i915_request_wait(rq
, 0, HZ
/ 5) < 0)
46 static int request_add_spin(struct i915_request
*rq
, struct igt_spinner
*spin
)
52 if (spin
&& !igt_wait_for_spinner(spin
, rq
))
59 static struct i915_vma
*create_scratch(struct intel_gt
*gt
)
61 struct drm_i915_gem_object
*obj
;
65 obj
= i915_gem_object_create_internal(gt
->i915
, PAGE_SIZE
);
69 i915_gem_object_set_cache_coherency(obj
, I915_CACHING_CACHED
);
71 vma
= i915_vma_instance(obj
, >
->ggtt
->vm
, NULL
);
73 i915_gem_object_put(obj
);
77 err
= i915_vma_pin(vma
, 0, 0, PIN_GLOBAL
);
79 i915_gem_object_put(obj
);
86 static int live_mocs_init(struct live_mocs
*arg
, struct intel_gt
*gt
)
88 struct drm_i915_mocs_table table
;
92 memset(arg
, 0, sizeof(*arg
));
94 flags
= get_mocs_settings(gt
->i915
, &table
);
98 if (flags
& HAS_RENDER_L3CC
)
101 if (flags
& (HAS_GLOBAL_MOCS
| HAS_ENGINE_MOCS
))
104 arg
->scratch
= create_scratch(gt
);
105 if (IS_ERR(arg
->scratch
))
106 return PTR_ERR(arg
->scratch
);
108 arg
->vaddr
= i915_gem_object_pin_map(arg
->scratch
->obj
, I915_MAP_WB
);
109 if (IS_ERR(arg
->vaddr
)) {
110 err
= PTR_ERR(arg
->vaddr
);
117 i915_vma_unpin_and_release(&arg
->scratch
, 0);
121 static void live_mocs_fini(struct live_mocs
*arg
)
123 i915_vma_unpin_and_release(&arg
->scratch
, I915_VMA_RELEASE_MAP
);
126 static int read_regs(struct i915_request
*rq
,
127 u32 addr
, unsigned int count
,
133 GEM_BUG_ON(!IS_ALIGNED(*offset
, sizeof(u32
)));
135 cs
= intel_ring_begin(rq
, 4 * count
);
139 for (i
= 0; i
< count
; i
++) {
140 *cs
++ = MI_STORE_REGISTER_MEM_GEN8
| MI_USE_GGTT
;
146 *offset
+= sizeof(u32
);
149 intel_ring_advance(rq
, cs
);
154 static int read_mocs_table(struct i915_request
*rq
,
155 const struct drm_i915_mocs_table
*table
,
160 if (HAS_GLOBAL_MOCS_REGISTERS(rq
->engine
->i915
))
161 addr
= global_mocs_offset();
163 addr
= mocs_offset(rq
->engine
);
165 return read_regs(rq
, addr
, table
->n_entries
, offset
);
168 static int read_l3cc_table(struct i915_request
*rq
,
169 const struct drm_i915_mocs_table
*table
,
172 u32 addr
= i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
174 return read_regs(rq
, addr
, (table
->n_entries
+ 1) / 2, offset
);
177 static int check_mocs_table(struct intel_engine_cs
*engine
,
178 const struct drm_i915_mocs_table
*table
,
184 for_each_mocs(expect
, table
, i
) {
185 if (**vaddr
!= expect
) {
186 pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n",
187 engine
->name
, i
, **vaddr
, expect
);
196 static bool mcr_range(struct drm_i915_private
*i915
, u32 offset
)
199 * Registers in this range are affected by the MCR selector
200 * which only controls CPU initiated MMIO. Routing does not
201 * work for CS access so we cannot verify them on this path.
203 return INTEL_GEN(i915
) >= 8 && offset
>= 0xb000 && offset
<= 0xb4ff;
206 static int check_l3cc_table(struct intel_engine_cs
*engine
,
207 const struct drm_i915_mocs_table
*table
,
210 /* Can we read the MCR range 0xb00 directly? See intel_workarounds! */
211 u32 reg
= i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
215 for_each_l3cc(expect
, table
, i
) {
216 if (!mcr_range(engine
->i915
, reg
) && **vaddr
!= expect
) {
217 pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
218 engine
->name
, i
, **vaddr
, expect
);
228 static int check_mocs_engine(struct live_mocs
*arg
,
229 struct intel_context
*ce
)
231 struct i915_vma
*vma
= arg
->scratch
;
232 struct i915_request
*rq
;
237 memset32(arg
->vaddr
, STACK_MAGIC
, PAGE_SIZE
/ sizeof(u32
));
239 rq
= intel_context_create_request(ce
);
244 err
= i915_request_await_object(rq
, vma
->obj
, true);
246 err
= i915_vma_move_to_active(vma
, rq
, EXEC_OBJECT_WRITE
);
247 i915_vma_unlock(vma
);
249 /* Read the mocs tables back using SRM */
250 offset
= i915_ggtt_offset(vma
);
252 err
= read_mocs_table(rq
, &arg
->mocs
, &offset
);
253 if (!err
&& ce
->engine
->class == RENDER_CLASS
)
254 err
= read_l3cc_table(rq
, &arg
->l3cc
, &offset
);
255 offset
-= i915_ggtt_offset(vma
);
256 GEM_BUG_ON(offset
> PAGE_SIZE
);
258 err
= request_add_sync(rq
, err
);
262 /* Compare the results against the expected tables */
265 err
= check_mocs_table(ce
->engine
, &arg
->mocs
, &vaddr
);
266 if (!err
&& ce
->engine
->class == RENDER_CLASS
)
267 err
= check_l3cc_table(ce
->engine
, &arg
->l3cc
, &vaddr
);
271 GEM_BUG_ON(arg
->vaddr
+ offset
!= vaddr
);
275 static int live_mocs_kernel(void *arg
)
277 struct intel_gt
*gt
= arg
;
278 struct intel_engine_cs
*engine
;
279 enum intel_engine_id id
;
280 struct live_mocs mocs
;
283 /* Basic check the system is configured with the expected mocs table */
285 err
= live_mocs_init(&mocs
, gt
);
289 for_each_engine(engine
, gt
, id
) {
290 intel_engine_pm_get(engine
);
291 err
= check_mocs_engine(&mocs
, engine
->kernel_context
);
292 intel_engine_pm_put(engine
);
297 live_mocs_fini(&mocs
);
301 static int live_mocs_clean(void *arg
)
303 struct intel_gt
*gt
= arg
;
304 struct intel_engine_cs
*engine
;
305 enum intel_engine_id id
;
306 struct live_mocs mocs
;
309 /* Every new context should see the same mocs table */
311 err
= live_mocs_init(&mocs
, gt
);
315 for_each_engine(engine
, gt
, id
) {
316 struct intel_context
*ce
;
318 ce
= mocs_context_create(engine
);
324 err
= check_mocs_engine(&mocs
, ce
);
325 intel_context_put(ce
);
330 live_mocs_fini(&mocs
);
334 static int active_engine_reset(struct intel_context
*ce
,
337 struct igt_spinner spin
;
338 struct i915_request
*rq
;
341 err
= igt_spinner_init(&spin
, ce
->engine
->gt
);
345 rq
= igt_spinner_create_request(&spin
, ce
, MI_NOOP
);
347 igt_spinner_fini(&spin
);
351 err
= request_add_spin(rq
, &spin
);
353 err
= intel_engine_reset(ce
->engine
, reason
);
355 igt_spinner_end(&spin
);
356 igt_spinner_fini(&spin
);
361 static int __live_mocs_reset(struct live_mocs
*mocs
,
362 struct intel_context
*ce
)
366 err
= intel_engine_reset(ce
->engine
, "mocs");
370 err
= check_mocs_engine(mocs
, ce
);
374 err
= active_engine_reset(ce
, "mocs");
378 err
= check_mocs_engine(mocs
, ce
);
382 intel_gt_reset(ce
->engine
->gt
, ce
->engine
->mask
, "mocs");
384 err
= check_mocs_engine(mocs
, ce
);
391 static int live_mocs_reset(void *arg
)
393 struct intel_gt
*gt
= arg
;
394 struct intel_engine_cs
*engine
;
395 enum intel_engine_id id
;
396 struct live_mocs mocs
;
399 /* Check the mocs setup is retained over per-engine and global resets */
401 if (!intel_has_reset_engine(gt
))
404 err
= live_mocs_init(&mocs
, gt
);
408 igt_global_reset_lock(gt
);
409 for_each_engine(engine
, gt
, id
) {
410 struct intel_context
*ce
;
412 ce
= mocs_context_create(engine
);
418 intel_engine_pm_get(engine
);
419 err
= __live_mocs_reset(&mocs
, ce
);
420 intel_engine_pm_put(engine
);
422 intel_context_put(ce
);
426 igt_global_reset_unlock(gt
);
428 live_mocs_fini(&mocs
);
432 int intel_mocs_live_selftests(struct drm_i915_private
*i915
)
434 static const struct i915_subtest tests
[] = {
435 SUBTEST(live_mocs_kernel
),
436 SUBTEST(live_mocs_clean
),
437 SUBTEST(live_mocs_reset
),
439 struct drm_i915_mocs_table table
;
441 if (!get_mocs_settings(i915
, &table
))
444 return intel_gt_live_subtests(tests
, &i915
->gt
);