WIP FPC-III support
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / gt / selftest_workarounds.c
blob61a0532d0f3d37a50ec355aac21888e2b6969e1e
1 /*
2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
5 */
7 #include "gem/i915_gem_pm.h"
8 #include "gt/intel_engine_user.h"
9 #include "gt/intel_gt.h"
10 #include "i915_selftest.h"
11 #include "intel_reset.h"
13 #include "selftests/igt_flush_test.h"
14 #include "selftests/igt_reset.h"
15 #include "selftests/igt_spinner.h"
16 #include "selftests/mock_drm.h"
18 #include "gem/selftests/igt_gem_utils.h"
19 #include "gem/selftests/mock_context.h"
21 static const struct wo_register {
22 enum intel_platform platform;
23 u32 reg;
24 } wo_registers[] = {
25 { INTEL_GEMINILAKE, 0x731c }
28 struct wa_lists {
29 struct i915_wa_list gt_wa_list;
30 struct {
31 struct i915_wa_list wa_list;
32 struct i915_wa_list ctx_wa_list;
33 } engine[I915_NUM_ENGINES];
36 static int request_add_sync(struct i915_request *rq, int err)
38 i915_request_get(rq);
39 i915_request_add(rq);
40 if (i915_request_wait(rq, 0, HZ / 5) < 0)
41 err = -EIO;
42 i915_request_put(rq);
44 return err;
47 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
49 int err = 0;
51 i915_request_get(rq);
52 i915_request_add(rq);
53 if (spin && !igt_wait_for_spinner(spin, rq))
54 err = -ETIMEDOUT;
55 i915_request_put(rq);
57 return err;
60 static void
61 reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
63 struct intel_engine_cs *engine;
64 enum intel_engine_id id;
66 memset(lists, 0, sizeof(*lists));
68 wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
69 gt_init_workarounds(gt->i915, &lists->gt_wa_list);
70 wa_init_finish(&lists->gt_wa_list);
72 for_each_engine(engine, gt, id) {
73 struct i915_wa_list *wal = &lists->engine[id].wa_list;
75 wa_init_start(wal, "REF", engine->name);
76 engine_init_workarounds(engine, wal);
77 wa_init_finish(wal);
79 __intel_engine_init_ctx_wa(engine,
80 &lists->engine[id].ctx_wa_list,
81 "CTX_REF");
85 static void
86 reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
88 struct intel_engine_cs *engine;
89 enum intel_engine_id id;
91 for_each_engine(engine, gt, id)
92 intel_wa_list_free(&lists->engine[id].wa_list);
94 intel_wa_list_free(&lists->gt_wa_list);
97 static struct drm_i915_gem_object *
98 read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
100 const u32 base = engine->mmio_base;
101 struct drm_i915_gem_object *result;
102 struct i915_request *rq;
103 struct i915_vma *vma;
104 u32 srm, *cs;
105 int err;
106 int i;
108 result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
109 if (IS_ERR(result))
110 return result;
112 i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
114 cs = i915_gem_object_pin_map(result, I915_MAP_WB);
115 if (IS_ERR(cs)) {
116 err = PTR_ERR(cs);
117 goto err_obj;
119 memset(cs, 0xc5, PAGE_SIZE);
120 i915_gem_object_flush_map(result);
121 i915_gem_object_unpin_map(result);
123 vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
124 if (IS_ERR(vma)) {
125 err = PTR_ERR(vma);
126 goto err_obj;
129 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
130 if (err)
131 goto err_obj;
133 rq = igt_request_alloc(ctx, engine);
134 if (IS_ERR(rq)) {
135 err = PTR_ERR(rq);
136 goto err_pin;
139 i915_vma_lock(vma);
140 err = i915_request_await_object(rq, vma->obj, true);
141 if (err == 0)
142 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
143 i915_vma_unlock(vma);
144 if (err)
145 goto err_req;
147 srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
148 if (INTEL_GEN(ctx->i915) >= 8)
149 srm++;
151 cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
152 if (IS_ERR(cs)) {
153 err = PTR_ERR(cs);
154 goto err_req;
157 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
158 *cs++ = srm;
159 *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
160 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
161 *cs++ = 0;
163 intel_ring_advance(rq, cs);
165 i915_request_add(rq);
166 i915_vma_unpin(vma);
168 return result;
170 err_req:
171 i915_request_add(rq);
172 err_pin:
173 i915_vma_unpin(vma);
174 err_obj:
175 i915_gem_object_put(result);
176 return ERR_PTR(err);
179 static u32
180 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
182 i915_reg_t reg = i < engine->whitelist.count ?
183 engine->whitelist.list[i].reg :
184 RING_NOPID(engine->mmio_base);
186 return i915_mmio_reg_offset(reg);
189 static void
190 print_results(const struct intel_engine_cs *engine, const u32 *results)
192 unsigned int i;
194 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
195 u32 expected = get_whitelist_reg(engine, i);
196 u32 actual = results[i];
198 pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
199 i, expected, actual);
203 static int check_whitelist(struct i915_gem_context *ctx,
204 struct intel_engine_cs *engine)
206 struct drm_i915_gem_object *results;
207 struct intel_wedge_me wedge;
208 u32 *vaddr;
209 int err;
210 int i;
212 results = read_nonprivs(ctx, engine);
213 if (IS_ERR(results))
214 return PTR_ERR(results);
216 err = 0;
217 i915_gem_object_lock(results, NULL);
218 intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
219 err = i915_gem_object_set_to_cpu_domain(results, false);
220 i915_gem_object_unlock(results);
221 if (intel_gt_is_wedged(engine->gt))
222 err = -EIO;
223 if (err)
224 goto out_put;
226 vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
227 if (IS_ERR(vaddr)) {
228 err = PTR_ERR(vaddr);
229 goto out_put;
232 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
233 u32 expected = get_whitelist_reg(engine, i);
234 u32 actual = vaddr[i];
236 if (expected != actual) {
237 print_results(engine, vaddr);
238 pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
239 i, expected, actual);
241 err = -EINVAL;
242 break;
246 i915_gem_object_unpin_map(results);
247 out_put:
248 i915_gem_object_put(results);
249 return err;
252 static int do_device_reset(struct intel_engine_cs *engine)
254 intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
255 return 0;
258 static int do_engine_reset(struct intel_engine_cs *engine)
260 return intel_engine_reset(engine, "live_workarounds");
263 static int
264 switch_to_scratch_context(struct intel_engine_cs *engine,
265 struct igt_spinner *spin)
267 struct intel_context *ce;
268 struct i915_request *rq;
269 int err = 0;
271 ce = intel_context_create(engine);
272 if (IS_ERR(ce))
273 return PTR_ERR(ce);
275 rq = igt_spinner_create_request(spin, ce, MI_NOOP);
276 intel_context_put(ce);
278 if (IS_ERR(rq)) {
279 spin = NULL;
280 err = PTR_ERR(rq);
281 goto err;
284 err = request_add_spin(rq, spin);
285 err:
286 if (err && spin)
287 igt_spinner_end(spin);
289 return err;
292 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
293 int (*reset)(struct intel_engine_cs *),
294 const char *name)
296 struct drm_i915_private *i915 = engine->i915;
297 struct i915_gem_context *ctx, *tmp;
298 struct igt_spinner spin;
299 intel_wakeref_t wakeref;
300 int err;
302 pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
303 engine->whitelist.count, engine->name, name);
305 ctx = kernel_context(i915);
306 if (IS_ERR(ctx))
307 return PTR_ERR(ctx);
309 err = igt_spinner_init(&spin, engine->gt);
310 if (err)
311 goto out_ctx;
313 err = check_whitelist(ctx, engine);
314 if (err) {
315 pr_err("Invalid whitelist *before* %s reset!\n", name);
316 goto out_spin;
319 err = switch_to_scratch_context(engine, &spin);
320 if (err)
321 goto out_spin;
323 with_intel_runtime_pm(engine->uncore->rpm, wakeref)
324 err = reset(engine);
326 igt_spinner_end(&spin);
328 if (err) {
329 pr_err("%s reset failed\n", name);
330 goto out_spin;
333 err = check_whitelist(ctx, engine);
334 if (err) {
335 pr_err("Whitelist not preserved in context across %s reset!\n",
336 name);
337 goto out_spin;
340 tmp = kernel_context(i915);
341 if (IS_ERR(tmp)) {
342 err = PTR_ERR(tmp);
343 goto out_spin;
345 kernel_context_close(ctx);
346 ctx = tmp;
348 err = check_whitelist(ctx, engine);
349 if (err) {
350 pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
351 name);
352 goto out_spin;
355 out_spin:
356 igt_spinner_fini(&spin);
357 out_ctx:
358 kernel_context_close(ctx);
359 return err;
362 static struct i915_vma *create_batch(struct i915_address_space *vm)
364 struct drm_i915_gem_object *obj;
365 struct i915_vma *vma;
366 int err;
368 obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
369 if (IS_ERR(obj))
370 return ERR_CAST(obj);
372 vma = i915_vma_instance(obj, vm, NULL);
373 if (IS_ERR(vma)) {
374 err = PTR_ERR(vma);
375 goto err_obj;
378 err = i915_vma_pin(vma, 0, 0, PIN_USER);
379 if (err)
380 goto err_obj;
382 return vma;
384 err_obj:
385 i915_gem_object_put(obj);
386 return ERR_PTR(err);
389 static u32 reg_write(u32 old, u32 new, u32 rsvd)
391 if (rsvd == 0x0000ffff) {
392 old &= ~(new >> 16);
393 old |= new & (new >> 16);
394 } else {
395 old &= ~rsvd;
396 old |= new & rsvd;
399 return old;
402 static bool wo_register(struct intel_engine_cs *engine, u32 reg)
404 enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
405 int i;
407 if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
408 RING_FORCE_TO_NONPRIV_ACCESS_WR)
409 return true;
411 for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
412 if (wo_registers[i].platform == platform &&
413 wo_registers[i].reg == reg)
414 return true;
417 return false;
420 static bool timestamp(const struct intel_engine_cs *engine, u32 reg)
422 reg = (reg - engine->mmio_base) & ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
423 switch (reg) {
424 case 0x358:
425 case 0x35c:
426 case 0x3a8:
427 return true;
429 default:
430 return false;
434 static bool ro_register(u32 reg)
436 if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
437 RING_FORCE_TO_NONPRIV_ACCESS_RD)
438 return true;
440 return false;
443 static int whitelist_writable_count(struct intel_engine_cs *engine)
445 int count = engine->whitelist.count;
446 int i;
448 for (i = 0; i < engine->whitelist.count; i++) {
449 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
451 if (ro_register(reg))
452 count--;
455 return count;
458 static int check_dirty_whitelist(struct intel_context *ce)
460 const u32 values[] = {
461 0x00000000,
462 0x01010101,
463 0x10100101,
464 0x03030303,
465 0x30300303,
466 0x05050505,
467 0x50500505,
468 0x0f0f0f0f,
469 0xf00ff00f,
470 0x10101010,
471 0xf0f01010,
472 0x30303030,
473 0xa0a03030,
474 0x50505050,
475 0xc0c05050,
476 0xf0f0f0f0,
477 0x11111111,
478 0x33333333,
479 0x55555555,
480 0x0000ffff,
481 0x00ff00ff,
482 0xff0000ff,
483 0xffff00ff,
484 0xffffffff,
486 struct intel_engine_cs *engine = ce->engine;
487 struct i915_vma *scratch;
488 struct i915_vma *batch;
489 int err = 0, i, v;
490 u32 *cs, *results;
492 scratch = create_scratch(ce->vm, 2 * ARRAY_SIZE(values) + 1);
493 if (IS_ERR(scratch))
494 return PTR_ERR(scratch);
496 batch = create_batch(ce->vm);
497 if (IS_ERR(batch)) {
498 err = PTR_ERR(batch);
499 goto out_scratch;
502 for (i = 0; i < engine->whitelist.count; i++) {
503 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
504 u64 addr = scratch->node.start;
505 struct i915_request *rq;
506 u32 srm, lrm, rsvd;
507 u32 expect;
508 int idx;
509 bool ro_reg;
511 if (wo_register(engine, reg))
512 continue;
514 if (timestamp(engine, reg))
515 continue; /* timestamps are expected to autoincrement */
517 ro_reg = ro_register(reg);
519 /* Clear non priv flags */
520 reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
522 srm = MI_STORE_REGISTER_MEM;
523 lrm = MI_LOAD_REGISTER_MEM;
524 if (INTEL_GEN(engine->i915) >= 8)
525 lrm++, srm++;
527 pr_debug("%s: Writing garbage to %x\n",
528 engine->name, reg);
530 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
531 if (IS_ERR(cs)) {
532 err = PTR_ERR(cs);
533 goto out_batch;
536 /* SRM original */
537 *cs++ = srm;
538 *cs++ = reg;
539 *cs++ = lower_32_bits(addr);
540 *cs++ = upper_32_bits(addr);
542 idx = 1;
543 for (v = 0; v < ARRAY_SIZE(values); v++) {
544 /* LRI garbage */
545 *cs++ = MI_LOAD_REGISTER_IMM(1);
546 *cs++ = reg;
547 *cs++ = values[v];
549 /* SRM result */
550 *cs++ = srm;
551 *cs++ = reg;
552 *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
553 *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
554 idx++;
556 for (v = 0; v < ARRAY_SIZE(values); v++) {
557 /* LRI garbage */
558 *cs++ = MI_LOAD_REGISTER_IMM(1);
559 *cs++ = reg;
560 *cs++ = ~values[v];
562 /* SRM result */
563 *cs++ = srm;
564 *cs++ = reg;
565 *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
566 *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
567 idx++;
569 GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
571 /* LRM original -- don't leave garbage in the context! */
572 *cs++ = lrm;
573 *cs++ = reg;
574 *cs++ = lower_32_bits(addr);
575 *cs++ = upper_32_bits(addr);
577 *cs++ = MI_BATCH_BUFFER_END;
579 i915_gem_object_flush_map(batch->obj);
580 i915_gem_object_unpin_map(batch->obj);
581 intel_gt_chipset_flush(engine->gt);
583 rq = intel_context_create_request(ce);
584 if (IS_ERR(rq)) {
585 err = PTR_ERR(rq);
586 goto out_batch;
589 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
590 err = engine->emit_init_breadcrumb(rq);
591 if (err)
592 goto err_request;
595 i915_vma_lock(batch);
596 err = i915_request_await_object(rq, batch->obj, false);
597 if (err == 0)
598 err = i915_vma_move_to_active(batch, rq, 0);
599 i915_vma_unlock(batch);
600 if (err)
601 goto err_request;
603 i915_vma_lock(scratch);
604 err = i915_request_await_object(rq, scratch->obj, true);
605 if (err == 0)
606 err = i915_vma_move_to_active(scratch, rq,
607 EXEC_OBJECT_WRITE);
608 i915_vma_unlock(scratch);
609 if (err)
610 goto err_request;
612 err = engine->emit_bb_start(rq,
613 batch->node.start, PAGE_SIZE,
615 if (err)
616 goto err_request;
618 err_request:
619 err = request_add_sync(rq, err);
620 if (err) {
621 pr_err("%s: Futzing %x timedout; cancelling test\n",
622 engine->name, reg);
623 intel_gt_set_wedged(engine->gt);
624 goto out_batch;
627 results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
628 if (IS_ERR(results)) {
629 err = PTR_ERR(results);
630 goto out_batch;
633 GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
634 if (!ro_reg) {
635 /* detect write masking */
636 rsvd = results[ARRAY_SIZE(values)];
637 if (!rsvd) {
638 pr_err("%s: Unable to write to whitelisted register %x\n",
639 engine->name, reg);
640 err = -EINVAL;
641 goto out_unpin;
643 } else {
644 rsvd = 0;
647 expect = results[0];
648 idx = 1;
649 for (v = 0; v < ARRAY_SIZE(values); v++) {
650 if (ro_reg)
651 expect = results[0];
652 else
653 expect = reg_write(expect, values[v], rsvd);
655 if (results[idx] != expect)
656 err++;
657 idx++;
659 for (v = 0; v < ARRAY_SIZE(values); v++) {
660 if (ro_reg)
661 expect = results[0];
662 else
663 expect = reg_write(expect, ~values[v], rsvd);
665 if (results[idx] != expect)
666 err++;
667 idx++;
669 if (err) {
670 pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
671 engine->name, err, reg);
673 if (ro_reg)
674 pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
675 engine->name, reg, results[0]);
676 else
677 pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
678 engine->name, reg, results[0], rsvd);
680 expect = results[0];
681 idx = 1;
682 for (v = 0; v < ARRAY_SIZE(values); v++) {
683 u32 w = values[v];
685 if (ro_reg)
686 expect = results[0];
687 else
688 expect = reg_write(expect, w, rsvd);
689 pr_info("Wrote %08x, read %08x, expect %08x\n",
690 w, results[idx], expect);
691 idx++;
693 for (v = 0; v < ARRAY_SIZE(values); v++) {
694 u32 w = ~values[v];
696 if (ro_reg)
697 expect = results[0];
698 else
699 expect = reg_write(expect, w, rsvd);
700 pr_info("Wrote %08x, read %08x, expect %08x\n",
701 w, results[idx], expect);
702 idx++;
705 err = -EINVAL;
707 out_unpin:
708 i915_gem_object_unpin_map(scratch->obj);
709 if (err)
710 break;
713 if (igt_flush_test(engine->i915))
714 err = -EIO;
715 out_batch:
716 i915_vma_unpin_and_release(&batch, 0);
717 out_scratch:
718 i915_vma_unpin_and_release(&scratch, 0);
719 return err;
722 static int live_dirty_whitelist(void *arg)
724 struct intel_gt *gt = arg;
725 struct intel_engine_cs *engine;
726 enum intel_engine_id id;
728 /* Can the user write to the whitelisted registers? */
730 if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
731 return 0;
733 for_each_engine(engine, gt, id) {
734 struct intel_context *ce;
735 int err;
737 if (engine->whitelist.count == 0)
738 continue;
740 ce = intel_context_create(engine);
741 if (IS_ERR(ce))
742 return PTR_ERR(ce);
744 err = check_dirty_whitelist(ce);
745 intel_context_put(ce);
746 if (err)
747 return err;
750 return 0;
753 static int live_reset_whitelist(void *arg)
755 struct intel_gt *gt = arg;
756 struct intel_engine_cs *engine;
757 enum intel_engine_id id;
758 int err = 0;
760 /* If we reset the gpu, we should not lose the RING_NONPRIV */
761 igt_global_reset_lock(gt);
763 for_each_engine(engine, gt, id) {
764 if (engine->whitelist.count == 0)
765 continue;
767 if (intel_has_reset_engine(gt)) {
768 err = check_whitelist_across_reset(engine,
769 do_engine_reset,
770 "engine");
771 if (err)
772 goto out;
775 if (intel_has_gpu_reset(gt)) {
776 err = check_whitelist_across_reset(engine,
777 do_device_reset,
778 "device");
779 if (err)
780 goto out;
784 out:
785 igt_global_reset_unlock(gt);
786 return err;
789 static int read_whitelisted_registers(struct i915_gem_context *ctx,
790 struct intel_engine_cs *engine,
791 struct i915_vma *results)
793 struct i915_request *rq;
794 int i, err = 0;
795 u32 srm, *cs;
797 rq = igt_request_alloc(ctx, engine);
798 if (IS_ERR(rq))
799 return PTR_ERR(rq);
801 i915_vma_lock(results);
802 err = i915_request_await_object(rq, results->obj, true);
803 if (err == 0)
804 err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
805 i915_vma_unlock(results);
806 if (err)
807 goto err_req;
809 srm = MI_STORE_REGISTER_MEM;
810 if (INTEL_GEN(ctx->i915) >= 8)
811 srm++;
813 cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
814 if (IS_ERR(cs)) {
815 err = PTR_ERR(cs);
816 goto err_req;
819 for (i = 0; i < engine->whitelist.count; i++) {
820 u64 offset = results->node.start + sizeof(u32) * i;
821 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
823 /* Clear non priv flags */
824 reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
826 *cs++ = srm;
827 *cs++ = reg;
828 *cs++ = lower_32_bits(offset);
829 *cs++ = upper_32_bits(offset);
831 intel_ring_advance(rq, cs);
833 err_req:
834 return request_add_sync(rq, err);
837 static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
838 struct intel_engine_cs *engine)
840 struct i915_address_space *vm;
841 struct i915_request *rq;
842 struct i915_vma *batch;
843 int i, err = 0;
844 u32 *cs;
846 vm = i915_gem_context_get_vm_rcu(ctx);
847 batch = create_batch(vm);
848 i915_vm_put(vm);
849 if (IS_ERR(batch))
850 return PTR_ERR(batch);
852 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
853 if (IS_ERR(cs)) {
854 err = PTR_ERR(cs);
855 goto err_batch;
858 *cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
859 for (i = 0; i < engine->whitelist.count; i++) {
860 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
862 if (ro_register(reg))
863 continue;
865 /* Clear non priv flags */
866 reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
868 *cs++ = reg;
869 *cs++ = 0xffffffff;
871 *cs++ = MI_BATCH_BUFFER_END;
873 i915_gem_object_flush_map(batch->obj);
874 intel_gt_chipset_flush(engine->gt);
876 rq = igt_request_alloc(ctx, engine);
877 if (IS_ERR(rq)) {
878 err = PTR_ERR(rq);
879 goto err_unpin;
882 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
883 err = engine->emit_init_breadcrumb(rq);
884 if (err)
885 goto err_request;
888 i915_vma_lock(batch);
889 err = i915_request_await_object(rq, batch->obj, false);
890 if (err == 0)
891 err = i915_vma_move_to_active(batch, rq, 0);
892 i915_vma_unlock(batch);
893 if (err)
894 goto err_request;
896 /* Perform the writes from an unprivileged "user" batch */
897 err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
899 err_request:
900 err = request_add_sync(rq, err);
902 err_unpin:
903 i915_gem_object_unpin_map(batch->obj);
904 err_batch:
905 i915_vma_unpin_and_release(&batch, 0);
906 return err;
909 struct regmask {
910 i915_reg_t reg;
911 unsigned long gen_mask;
914 static bool find_reg(struct drm_i915_private *i915,
915 i915_reg_t reg,
916 const struct regmask *tbl,
917 unsigned long count)
919 u32 offset = i915_mmio_reg_offset(reg);
921 while (count--) {
922 if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
923 i915_mmio_reg_offset(tbl->reg) == offset)
924 return true;
925 tbl++;
928 return false;
931 static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
933 /* Alas, we must pardon some whitelists. Mistakes already made */
934 static const struct regmask pardon[] = {
935 { GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
936 { GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
939 return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
942 static bool result_eq(struct intel_engine_cs *engine,
943 u32 a, u32 b, i915_reg_t reg)
945 if (a != b && !pardon_reg(engine->i915, reg)) {
946 pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
947 i915_mmio_reg_offset(reg), a, b);
948 return false;
951 return true;
954 static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
956 /* Some registers do not seem to behave and our writes unreadable */
957 static const struct regmask wo[] = {
958 { GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
961 return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
964 static bool result_neq(struct intel_engine_cs *engine,
965 u32 a, u32 b, i915_reg_t reg)
967 if (a == b && !writeonly_reg(engine->i915, reg)) {
968 pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
969 i915_mmio_reg_offset(reg), a);
970 return false;
973 return true;
976 static int
977 check_whitelisted_registers(struct intel_engine_cs *engine,
978 struct i915_vma *A,
979 struct i915_vma *B,
980 bool (*fn)(struct intel_engine_cs *engine,
981 u32 a, u32 b,
982 i915_reg_t reg))
984 u32 *a, *b;
985 int i, err;
987 a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
988 if (IS_ERR(a))
989 return PTR_ERR(a);
991 b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
992 if (IS_ERR(b)) {
993 err = PTR_ERR(b);
994 goto err_a;
997 err = 0;
998 for (i = 0; i < engine->whitelist.count; i++) {
999 const struct i915_wa *wa = &engine->whitelist.list[i];
1001 if (i915_mmio_reg_offset(wa->reg) &
1002 RING_FORCE_TO_NONPRIV_ACCESS_RD)
1003 continue;
1005 if (!fn(engine, a[i], b[i], wa->reg))
1006 err = -EINVAL;
1009 i915_gem_object_unpin_map(B->obj);
1010 err_a:
1011 i915_gem_object_unpin_map(A->obj);
1012 return err;
1015 static int live_isolated_whitelist(void *arg)
1017 struct intel_gt *gt = arg;
1018 struct {
1019 struct i915_gem_context *ctx;
1020 struct i915_vma *scratch[2];
1021 } client[2] = {};
1022 struct intel_engine_cs *engine;
1023 enum intel_engine_id id;
1024 int i, err = 0;
1027 * Check that a write into a whitelist register works, but
1028 * invisible to a second context.
1031 if (!intel_engines_has_context_isolation(gt->i915))
1032 return 0;
1034 for (i = 0; i < ARRAY_SIZE(client); i++) {
1035 struct i915_address_space *vm;
1036 struct i915_gem_context *c;
1038 c = kernel_context(gt->i915);
1039 if (IS_ERR(c)) {
1040 err = PTR_ERR(c);
1041 goto err;
1044 vm = i915_gem_context_get_vm_rcu(c);
1046 client[i].scratch[0] = create_scratch(vm, 1024);
1047 if (IS_ERR(client[i].scratch[0])) {
1048 err = PTR_ERR(client[i].scratch[0]);
1049 i915_vm_put(vm);
1050 kernel_context_close(c);
1051 goto err;
1054 client[i].scratch[1] = create_scratch(vm, 1024);
1055 if (IS_ERR(client[i].scratch[1])) {
1056 err = PTR_ERR(client[i].scratch[1]);
1057 i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1058 i915_vm_put(vm);
1059 kernel_context_close(c);
1060 goto err;
1063 client[i].ctx = c;
1064 i915_vm_put(vm);
1067 for_each_engine(engine, gt, id) {
1068 if (!engine->kernel_context->vm)
1069 continue;
1071 if (!whitelist_writable_count(engine))
1072 continue;
1074 /* Read default values */
1075 err = read_whitelisted_registers(client[0].ctx, engine,
1076 client[0].scratch[0]);
1077 if (err)
1078 goto err;
1080 /* Try to overwrite registers (should only affect ctx0) */
1081 err = scrub_whitelisted_registers(client[0].ctx, engine);
1082 if (err)
1083 goto err;
1085 /* Read values from ctx1, we expect these to be defaults */
1086 err = read_whitelisted_registers(client[1].ctx, engine,
1087 client[1].scratch[0]);
1088 if (err)
1089 goto err;
1091 /* Verify that both reads return the same default values */
1092 err = check_whitelisted_registers(engine,
1093 client[0].scratch[0],
1094 client[1].scratch[0],
1095 result_eq);
1096 if (err)
1097 goto err;
1099 /* Read back the updated values in ctx0 */
1100 err = read_whitelisted_registers(client[0].ctx, engine,
1101 client[0].scratch[1]);
1102 if (err)
1103 goto err;
1105 /* User should be granted privilege to overwhite regs */
1106 err = check_whitelisted_registers(engine,
1107 client[0].scratch[0],
1108 client[0].scratch[1],
1109 result_neq);
1110 if (err)
1111 goto err;
1114 err:
1115 for (i = 0; i < ARRAY_SIZE(client); i++) {
1116 if (!client[i].ctx)
1117 break;
1119 i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1120 i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1121 kernel_context_close(client[i].ctx);
1124 if (igt_flush_test(gt->i915))
1125 err = -EIO;
1127 return err;
1130 static bool
1131 verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
1132 const char *str)
1134 struct drm_i915_private *i915 = ctx->i915;
1135 struct i915_gem_engines_iter it;
1136 struct intel_context *ce;
1137 bool ok = true;
1139 ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
1141 for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) {
1142 enum intel_engine_id id = ce->engine->id;
1144 ok &= engine_wa_list_verify(ce,
1145 &lists->engine[id].wa_list,
1146 str) == 0;
1148 ok &= engine_wa_list_verify(ce,
1149 &lists->engine[id].ctx_wa_list,
1150 str) == 0;
1153 return ok;
1156 static int
1157 live_gpu_reset_workarounds(void *arg)
1159 struct intel_gt *gt = arg;
1160 struct i915_gem_context *ctx;
1161 intel_wakeref_t wakeref;
1162 struct wa_lists lists;
1163 bool ok;
1165 if (!intel_has_gpu_reset(gt))
1166 return 0;
1168 ctx = kernel_context(gt->i915);
1169 if (IS_ERR(ctx))
1170 return PTR_ERR(ctx);
1172 i915_gem_context_lock_engines(ctx);
1174 pr_info("Verifying after GPU reset...\n");
1176 igt_global_reset_lock(gt);
1177 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1179 reference_lists_init(gt, &lists);
1181 ok = verify_wa_lists(ctx, &lists, "before reset");
1182 if (!ok)
1183 goto out;
1185 intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
1187 ok = verify_wa_lists(ctx, &lists, "after reset");
1189 out:
1190 i915_gem_context_unlock_engines(ctx);
1191 kernel_context_close(ctx);
1192 reference_lists_fini(gt, &lists);
1193 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1194 igt_global_reset_unlock(gt);
1196 return ok ? 0 : -ESRCH;
1199 static int
1200 live_engine_reset_workarounds(void *arg)
1202 struct intel_gt *gt = arg;
1203 struct i915_gem_engines_iter it;
1204 struct i915_gem_context *ctx;
1205 struct intel_context *ce;
1206 struct igt_spinner spin;
1207 struct i915_request *rq;
1208 intel_wakeref_t wakeref;
1209 struct wa_lists lists;
1210 int ret = 0;
1212 if (!intel_has_reset_engine(gt))
1213 return 0;
1215 ctx = kernel_context(gt->i915);
1216 if (IS_ERR(ctx))
1217 return PTR_ERR(ctx);
1219 igt_global_reset_lock(gt);
1220 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1222 reference_lists_init(gt, &lists);
1224 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1225 struct intel_engine_cs *engine = ce->engine;
1226 bool ok;
1228 pr_info("Verifying after %s reset...\n", engine->name);
1230 ok = verify_wa_lists(ctx, &lists, "before reset");
1231 if (!ok) {
1232 ret = -ESRCH;
1233 goto err;
1236 intel_engine_reset(engine, "live_workarounds");
1238 ok = verify_wa_lists(ctx, &lists, "after idle reset");
1239 if (!ok) {
1240 ret = -ESRCH;
1241 goto err;
1244 ret = igt_spinner_init(&spin, engine->gt);
1245 if (ret)
1246 goto err;
1248 rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
1249 if (IS_ERR(rq)) {
1250 ret = PTR_ERR(rq);
1251 igt_spinner_fini(&spin);
1252 goto err;
1255 ret = request_add_spin(rq, &spin);
1256 if (ret) {
1257 pr_err("Spinner failed to start\n");
1258 igt_spinner_fini(&spin);
1259 goto err;
1262 intel_engine_reset(engine, "live_workarounds");
1264 igt_spinner_end(&spin);
1265 igt_spinner_fini(&spin);
1267 ok = verify_wa_lists(ctx, &lists, "after busy reset");
1268 if (!ok) {
1269 ret = -ESRCH;
1270 goto err;
1273 err:
1274 i915_gem_context_unlock_engines(ctx);
1275 reference_lists_fini(gt, &lists);
1276 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1277 igt_global_reset_unlock(gt);
1278 kernel_context_close(ctx);
1280 igt_flush_test(gt->i915);
1282 return ret;
1285 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1287 static const struct i915_subtest tests[] = {
1288 SUBTEST(live_dirty_whitelist),
1289 SUBTEST(live_reset_whitelist),
1290 SUBTEST(live_isolated_whitelist),
1291 SUBTEST(live_gpu_reset_workarounds),
1292 SUBTEST(live_engine_reset_workarounds),
1295 if (intel_gt_is_wedged(&i915->gt))
1296 return 0;
1298 return intel_gt_live_subtests(tests, &i915->gt);