treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / gem / selftests / i915_gem_context.c
blob7fc46861a54d51b48cb09baee4f8e617d6b4f602
1 /*
2 * SPDX-License-Identifier: MIT
4 * Copyright © 2017 Intel Corporation
5 */
7 #include <linux/prime_numbers.h>
9 #include "gem/i915_gem_pm.h"
10 #include "gt/intel_engine_pm.h"
11 #include "gt/intel_gt.h"
12 #include "gt/intel_gt_requests.h"
13 #include "gt/intel_reset.h"
14 #include "i915_selftest.h"
16 #include "gem/selftests/igt_gem_utils.h"
17 #include "selftests/i915_random.h"
18 #include "selftests/igt_flush_test.h"
19 #include "selftests/igt_live_test.h"
20 #include "selftests/igt_reset.h"
21 #include "selftests/igt_spinner.h"
22 #include "selftests/mock_drm.h"
23 #include "selftests/mock_gem_device.h"
25 #include "huge_gem_object.h"
26 #include "igt_gem_utils.h"
28 #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
30 static inline struct i915_address_space *ctx_vm(struct i915_gem_context *ctx)
32 /* single threaded, private ctx */
33 return rcu_dereference_protected(ctx->vm, true);
36 static int live_nop_switch(void *arg)
38 const unsigned int nctx = 1024;
39 struct drm_i915_private *i915 = arg;
40 struct intel_engine_cs *engine;
41 struct i915_gem_context **ctx;
42 struct igt_live_test t;
43 struct file *file;
44 unsigned long n;
45 int err = -ENODEV;
48 * Create as many contexts as we can feasibly get away with
49 * and check we can switch between them rapidly.
51 * Serves as very simple stress test for submission and HW switching
52 * between contexts.
55 if (!DRIVER_CAPS(i915)->has_logical_contexts)
56 return 0;
58 file = mock_file(i915);
59 if (IS_ERR(file))
60 return PTR_ERR(file);
62 ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
63 if (!ctx) {
64 err = -ENOMEM;
65 goto out_file;
68 for (n = 0; n < nctx; n++) {
69 ctx[n] = live_context(i915, file);
70 if (IS_ERR(ctx[n])) {
71 err = PTR_ERR(ctx[n]);
72 goto out_file;
76 for_each_uabi_engine(engine, i915) {
77 struct i915_request *rq = NULL;
78 unsigned long end_time, prime;
79 ktime_t times[2] = {};
81 times[0] = ktime_get_raw();
82 for (n = 0; n < nctx; n++) {
83 struct i915_request *this;
85 this = igt_request_alloc(ctx[n], engine);
86 if (IS_ERR(this)) {
87 err = PTR_ERR(this);
88 goto out_file;
90 if (rq) {
91 i915_request_await_dma_fence(this, &rq->fence);
92 i915_request_put(rq);
94 rq = i915_request_get(this);
95 i915_request_add(this);
97 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
98 pr_err("Failed to populated %d contexts\n", nctx);
99 intel_gt_set_wedged(&i915->gt);
100 i915_request_put(rq);
101 err = -EIO;
102 goto out_file;
104 i915_request_put(rq);
106 times[1] = ktime_get_raw();
108 pr_info("Populated %d contexts on %s in %lluns\n",
109 nctx, engine->name, ktime_to_ns(times[1] - times[0]));
111 err = igt_live_test_begin(&t, i915, __func__, engine->name);
112 if (err)
113 goto out_file;
115 end_time = jiffies + i915_selftest.timeout_jiffies;
116 for_each_prime_number_from(prime, 2, 8192) {
117 times[1] = ktime_get_raw();
119 rq = NULL;
120 for (n = 0; n < prime; n++) {
121 struct i915_request *this;
123 this = igt_request_alloc(ctx[n % nctx], engine);
124 if (IS_ERR(this)) {
125 err = PTR_ERR(this);
126 goto out_file;
129 if (rq) { /* Force submission order */
130 i915_request_await_dma_fence(this, &rq->fence);
131 i915_request_put(rq);
135 * This space is left intentionally blank.
137 * We do not actually want to perform any
138 * action with this request, we just want
139 * to measure the latency in allocation
140 * and submission of our breadcrumbs -
141 * ensuring that the bare request is sufficient
142 * for the system to work (i.e. proper HEAD
143 * tracking of the rings, interrupt handling,
144 * etc). It also gives us the lowest bounds
145 * for latency.
148 rq = i915_request_get(this);
149 i915_request_add(this);
151 GEM_BUG_ON(!rq);
152 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
153 pr_err("Switching between %ld contexts timed out\n",
154 prime);
155 intel_gt_set_wedged(&i915->gt);
156 i915_request_put(rq);
157 break;
159 i915_request_put(rq);
161 times[1] = ktime_sub(ktime_get_raw(), times[1]);
162 if (prime == 2)
163 times[0] = times[1];
165 if (__igt_timeout(end_time, NULL))
166 break;
169 err = igt_live_test_end(&t);
170 if (err)
171 goto out_file;
173 pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
174 engine->name,
175 ktime_to_ns(times[0]),
176 prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
179 out_file:
180 fput(file);
181 return err;
184 struct parallel_switch {
185 struct task_struct *tsk;
186 struct intel_context *ce[2];
189 static int __live_parallel_switch1(void *data)
191 struct parallel_switch *arg = data;
192 IGT_TIMEOUT(end_time);
193 unsigned long count;
195 count = 0;
196 do {
197 struct i915_request *rq = NULL;
198 int err, n;
200 err = 0;
201 for (n = 0; !err && n < ARRAY_SIZE(arg->ce); n++) {
202 struct i915_request *prev = rq;
204 rq = i915_request_create(arg->ce[n]);
205 if (IS_ERR(rq)) {
206 i915_request_put(prev);
207 return PTR_ERR(rq);
210 i915_request_get(rq);
211 if (prev) {
212 err = i915_request_await_dma_fence(rq, &prev->fence);
213 i915_request_put(prev);
216 i915_request_add(rq);
218 if (i915_request_wait(rq, 0, HZ / 5) < 0)
219 err = -ETIME;
220 i915_request_put(rq);
221 if (err)
222 return err;
224 count++;
225 } while (!__igt_timeout(end_time, NULL));
227 pr_info("%s: %lu switches (sync)\n", arg->ce[0]->engine->name, count);
228 return 0;
231 static int __live_parallel_switchN(void *data)
233 struct parallel_switch *arg = data;
234 struct i915_request *rq = NULL;
235 IGT_TIMEOUT(end_time);
236 unsigned long count;
237 int n;
239 count = 0;
240 do {
241 for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
242 struct i915_request *prev = rq;
243 int err = 0;
245 rq = i915_request_create(arg->ce[n]);
246 if (IS_ERR(rq)) {
247 i915_request_put(prev);
248 return PTR_ERR(rq);
251 i915_request_get(rq);
252 if (prev) {
253 err = i915_request_await_dma_fence(rq, &prev->fence);
254 i915_request_put(prev);
257 i915_request_add(rq);
258 if (err) {
259 i915_request_put(rq);
260 return err;
264 count++;
265 } while (!__igt_timeout(end_time, NULL));
266 i915_request_put(rq);
268 pr_info("%s: %lu switches (many)\n", arg->ce[0]->engine->name, count);
269 return 0;
272 static int live_parallel_switch(void *arg)
274 struct drm_i915_private *i915 = arg;
275 static int (* const func[])(void *arg) = {
276 __live_parallel_switch1,
277 __live_parallel_switchN,
278 NULL,
280 struct parallel_switch *data = NULL;
281 struct i915_gem_engines *engines;
282 struct i915_gem_engines_iter it;
283 int (* const *fn)(void *arg);
284 struct i915_gem_context *ctx;
285 struct intel_context *ce;
286 struct file *file;
287 int n, m, count;
288 int err = 0;
291 * Check we can process switches on all engines simultaneously.
294 if (!DRIVER_CAPS(i915)->has_logical_contexts)
295 return 0;
297 file = mock_file(i915);
298 if (IS_ERR(file))
299 return PTR_ERR(file);
301 ctx = live_context(i915, file);
302 if (IS_ERR(ctx)) {
303 err = PTR_ERR(ctx);
304 goto out_file;
307 engines = i915_gem_context_lock_engines(ctx);
308 count = engines->num_engines;
310 data = kcalloc(count, sizeof(*data), GFP_KERNEL);
311 if (!data) {
312 i915_gem_context_unlock_engines(ctx);
313 err = -ENOMEM;
314 goto out_file;
317 m = 0; /* Use the first context as our template for the engines */
318 for_each_gem_engine(ce, engines, it) {
319 err = intel_context_pin(ce);
320 if (err) {
321 i915_gem_context_unlock_engines(ctx);
322 goto out;
324 data[m++].ce[0] = intel_context_get(ce);
326 i915_gem_context_unlock_engines(ctx);
328 /* Clone the same set of engines into the other contexts */
329 for (n = 1; n < ARRAY_SIZE(data->ce); n++) {
330 ctx = live_context(i915, file);
331 if (IS_ERR(ctx)) {
332 err = PTR_ERR(ctx);
333 goto out;
336 for (m = 0; m < count; m++) {
337 if (!data[m].ce[0])
338 continue;
340 ce = intel_context_create(data[m].ce[0]->engine);
341 if (IS_ERR(ce))
342 goto out;
344 err = intel_context_pin(ce);
345 if (err) {
346 intel_context_put(ce);
347 goto out;
350 data[m].ce[n] = ce;
354 for (fn = func; !err && *fn; fn++) {
355 struct igt_live_test t;
356 int n;
358 err = igt_live_test_begin(&t, i915, __func__, "");
359 if (err)
360 break;
362 for (n = 0; n < count; n++) {
363 if (!data[n].ce[0])
364 continue;
366 data[n].tsk = kthread_run(*fn, &data[n],
367 "igt/parallel:%s",
368 data[n].ce[0]->engine->name);
369 if (IS_ERR(data[n].tsk)) {
370 err = PTR_ERR(data[n].tsk);
371 break;
373 get_task_struct(data[n].tsk);
376 yield(); /* start all threads before we kthread_stop() */
378 for (n = 0; n < count; n++) {
379 int status;
381 if (IS_ERR_OR_NULL(data[n].tsk))
382 continue;
384 status = kthread_stop(data[n].tsk);
385 if (status && !err)
386 err = status;
388 put_task_struct(data[n].tsk);
389 data[n].tsk = NULL;
392 if (igt_live_test_end(&t))
393 err = -EIO;
396 out:
397 for (n = 0; n < count; n++) {
398 for (m = 0; m < ARRAY_SIZE(data->ce); m++) {
399 if (!data[n].ce[m])
400 continue;
402 intel_context_unpin(data[n].ce[m]);
403 intel_context_put(data[n].ce[m]);
406 kfree(data);
407 out_file:
408 fput(file);
409 return err;
412 static unsigned long real_page_count(struct drm_i915_gem_object *obj)
414 return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
417 static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
419 return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
422 static int gpu_fill(struct intel_context *ce,
423 struct drm_i915_gem_object *obj,
424 unsigned int dw)
426 struct i915_vma *vma;
427 int err;
429 GEM_BUG_ON(obj->base.size > ce->vm->total);
430 GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
432 vma = i915_vma_instance(obj, ce->vm, NULL);
433 if (IS_ERR(vma))
434 return PTR_ERR(vma);
436 err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
437 if (err)
438 return err;
441 * Within the GTT the huge objects maps every page onto
442 * its 1024 real pages (using phys_pfn = dma_pfn % 1024).
443 * We set the nth dword within the page using the nth
444 * mapping via the GTT - this should exercise the GTT mapping
445 * whilst checking that each context provides a unique view
446 * into the object.
448 err = igt_gpu_fill_dw(ce, vma,
449 (dw * real_page_count(obj)) << PAGE_SHIFT |
450 (dw * sizeof(u32)),
451 real_page_count(obj),
452 dw);
453 i915_vma_unpin(vma);
455 return err;
458 static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
460 const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
461 unsigned int n, m, need_flush;
462 int err;
464 err = i915_gem_object_prepare_write(obj, &need_flush);
465 if (err)
466 return err;
468 for (n = 0; n < real_page_count(obj); n++) {
469 u32 *map;
471 map = kmap_atomic(i915_gem_object_get_page(obj, n));
472 for (m = 0; m < DW_PER_PAGE; m++)
473 map[m] = value;
474 if (!has_llc)
475 drm_clflush_virt_range(map, PAGE_SIZE);
476 kunmap_atomic(map);
479 i915_gem_object_finish_access(obj);
480 obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
481 obj->write_domain = 0;
482 return 0;
485 static noinline int cpu_check(struct drm_i915_gem_object *obj,
486 unsigned int idx, unsigned int max)
488 unsigned int n, m, needs_flush;
489 int err;
491 err = i915_gem_object_prepare_read(obj, &needs_flush);
492 if (err)
493 return err;
495 for (n = 0; n < real_page_count(obj); n++) {
496 u32 *map;
498 map = kmap_atomic(i915_gem_object_get_page(obj, n));
499 if (needs_flush & CLFLUSH_BEFORE)
500 drm_clflush_virt_range(map, PAGE_SIZE);
502 for (m = 0; m < max; m++) {
503 if (map[m] != m) {
504 pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n",
505 __builtin_return_address(0), idx,
506 n, real_page_count(obj), m, max,
507 map[m], m);
508 err = -EINVAL;
509 goto out_unmap;
513 for (; m < DW_PER_PAGE; m++) {
514 if (map[m] != STACK_MAGIC) {
515 pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n",
516 __builtin_return_address(0), idx, n, m,
517 map[m], STACK_MAGIC);
518 err = -EINVAL;
519 goto out_unmap;
523 out_unmap:
524 kunmap_atomic(map);
525 if (err)
526 break;
529 i915_gem_object_finish_access(obj);
530 return err;
533 static int file_add_object(struct file *file, struct drm_i915_gem_object *obj)
535 int err;
537 GEM_BUG_ON(obj->base.handle_count);
539 /* tie the object to the drm_file for easy reaping */
540 err = idr_alloc(&to_drm_file(file)->object_idr,
541 &obj->base, 1, 0, GFP_KERNEL);
542 if (err < 0)
543 return err;
545 i915_gem_object_get(obj);
546 obj->base.handle_count++;
547 return 0;
550 static struct drm_i915_gem_object *
551 create_test_object(struct i915_address_space *vm,
552 struct file *file,
553 struct list_head *objects)
555 struct drm_i915_gem_object *obj;
556 u64 size;
557 int err;
559 /* Keep in GEM's good graces */
560 intel_gt_retire_requests(vm->gt);
562 size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
563 size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
565 obj = huge_gem_object(vm->i915, DW_PER_PAGE * PAGE_SIZE, size);
566 if (IS_ERR(obj))
567 return obj;
569 err = file_add_object(file, obj);
570 i915_gem_object_put(obj);
571 if (err)
572 return ERR_PTR(err);
574 err = cpu_fill(obj, STACK_MAGIC);
575 if (err) {
576 pr_err("Failed to fill object with cpu, err=%d\n",
577 err);
578 return ERR_PTR(err);
581 list_add_tail(&obj->st_link, objects);
582 return obj;
585 static unsigned long max_dwords(struct drm_i915_gem_object *obj)
587 unsigned long npages = fake_page_count(obj);
589 GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE));
590 return npages / DW_PER_PAGE;
593 static void throttle_release(struct i915_request **q, int count)
595 int i;
597 for (i = 0; i < count; i++) {
598 if (IS_ERR_OR_NULL(q[i]))
599 continue;
601 i915_request_put(fetch_and_zero(&q[i]));
605 static int throttle(struct intel_context *ce,
606 struct i915_request **q, int count)
608 int i;
610 if (!IS_ERR_OR_NULL(q[0])) {
611 if (i915_request_wait(q[0],
612 I915_WAIT_INTERRUPTIBLE,
613 MAX_SCHEDULE_TIMEOUT) < 0)
614 return -EINTR;
616 i915_request_put(q[0]);
619 for (i = 0; i < count - 1; i++)
620 q[i] = q[i + 1];
622 q[i] = intel_context_create_request(ce);
623 if (IS_ERR(q[i]))
624 return PTR_ERR(q[i]);
626 i915_request_get(q[i]);
627 i915_request_add(q[i]);
629 return 0;
632 static int igt_ctx_exec(void *arg)
634 struct drm_i915_private *i915 = arg;
635 struct intel_engine_cs *engine;
636 int err = -ENODEV;
639 * Create a few different contexts (with different mm) and write
640 * through each ctx/mm using the GPU making sure those writes end
641 * up in the expected pages of our obj.
644 if (!DRIVER_CAPS(i915)->has_logical_contexts)
645 return 0;
647 for_each_uabi_engine(engine, i915) {
648 struct drm_i915_gem_object *obj = NULL;
649 unsigned long ncontexts, ndwords, dw;
650 struct i915_request *tq[5] = {};
651 struct igt_live_test t;
652 IGT_TIMEOUT(end_time);
653 LIST_HEAD(objects);
654 struct file *file;
656 if (!intel_engine_can_store_dword(engine))
657 continue;
659 if (!engine->context_size)
660 continue; /* No logical context support in HW */
662 file = mock_file(i915);
663 if (IS_ERR(file))
664 return PTR_ERR(file);
666 err = igt_live_test_begin(&t, i915, __func__, engine->name);
667 if (err)
668 goto out_file;
670 ncontexts = 0;
671 ndwords = 0;
672 dw = 0;
673 while (!time_after(jiffies, end_time)) {
674 struct i915_gem_context *ctx;
675 struct intel_context *ce;
677 ctx = kernel_context(i915);
678 if (IS_ERR(ctx)) {
679 err = PTR_ERR(ctx);
680 goto out_file;
683 ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
684 GEM_BUG_ON(IS_ERR(ce));
686 if (!obj) {
687 obj = create_test_object(ce->vm, file, &objects);
688 if (IS_ERR(obj)) {
689 err = PTR_ERR(obj);
690 intel_context_put(ce);
691 kernel_context_close(ctx);
692 goto out_file;
696 err = gpu_fill(ce, obj, dw);
697 if (err) {
698 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
699 ndwords, dw, max_dwords(obj),
700 engine->name,
701 yesno(!!rcu_access_pointer(ctx->vm)),
702 err);
703 intel_context_put(ce);
704 kernel_context_close(ctx);
705 goto out_file;
708 err = throttle(ce, tq, ARRAY_SIZE(tq));
709 if (err) {
710 intel_context_put(ce);
711 kernel_context_close(ctx);
712 goto out_file;
715 if (++dw == max_dwords(obj)) {
716 obj = NULL;
717 dw = 0;
720 ndwords++;
721 ncontexts++;
723 intel_context_put(ce);
724 kernel_context_close(ctx);
727 pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
728 ncontexts, engine->name, ndwords);
730 ncontexts = dw = 0;
731 list_for_each_entry(obj, &objects, st_link) {
732 unsigned int rem =
733 min_t(unsigned int, ndwords - dw, max_dwords(obj));
735 err = cpu_check(obj, ncontexts++, rem);
736 if (err)
737 break;
739 dw += rem;
742 out_file:
743 throttle_release(tq, ARRAY_SIZE(tq));
744 if (igt_live_test_end(&t))
745 err = -EIO;
747 fput(file);
748 if (err)
749 return err;
751 i915_gem_drain_freed_objects(i915);
754 return 0;
757 static int igt_shared_ctx_exec(void *arg)
759 struct drm_i915_private *i915 = arg;
760 struct i915_request *tq[5] = {};
761 struct i915_gem_context *parent;
762 struct intel_engine_cs *engine;
763 struct igt_live_test t;
764 struct file *file;
765 int err = 0;
768 * Create a few different contexts with the same mm and write
769 * through each ctx using the GPU making sure those writes end
770 * up in the expected pages of our obj.
772 if (!DRIVER_CAPS(i915)->has_logical_contexts)
773 return 0;
775 file = mock_file(i915);
776 if (IS_ERR(file))
777 return PTR_ERR(file);
779 parent = live_context(i915, file);
780 if (IS_ERR(parent)) {
781 err = PTR_ERR(parent);
782 goto out_file;
785 if (!parent->vm) { /* not full-ppgtt; nothing to share */
786 err = 0;
787 goto out_file;
790 err = igt_live_test_begin(&t, i915, __func__, "");
791 if (err)
792 goto out_file;
794 for_each_uabi_engine(engine, i915) {
795 unsigned long ncontexts, ndwords, dw;
796 struct drm_i915_gem_object *obj = NULL;
797 IGT_TIMEOUT(end_time);
798 LIST_HEAD(objects);
800 if (!intel_engine_can_store_dword(engine))
801 continue;
803 dw = 0;
804 ndwords = 0;
805 ncontexts = 0;
806 while (!time_after(jiffies, end_time)) {
807 struct i915_gem_context *ctx;
808 struct intel_context *ce;
810 ctx = kernel_context(i915);
811 if (IS_ERR(ctx)) {
812 err = PTR_ERR(ctx);
813 goto out_test;
816 mutex_lock(&ctx->mutex);
817 __assign_ppgtt(ctx, ctx_vm(parent));
818 mutex_unlock(&ctx->mutex);
820 ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
821 GEM_BUG_ON(IS_ERR(ce));
823 if (!obj) {
824 obj = create_test_object(ctx_vm(parent),
825 file, &objects);
826 if (IS_ERR(obj)) {
827 err = PTR_ERR(obj);
828 intel_context_put(ce);
829 kernel_context_close(ctx);
830 goto out_test;
834 err = gpu_fill(ce, obj, dw);
835 if (err) {
836 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
837 ndwords, dw, max_dwords(obj),
838 engine->name,
839 yesno(!!rcu_access_pointer(ctx->vm)),
840 err);
841 intel_context_put(ce);
842 kernel_context_close(ctx);
843 goto out_test;
846 err = throttle(ce, tq, ARRAY_SIZE(tq));
847 if (err) {
848 intel_context_put(ce);
849 kernel_context_close(ctx);
850 goto out_test;
853 if (++dw == max_dwords(obj)) {
854 obj = NULL;
855 dw = 0;
858 ndwords++;
859 ncontexts++;
861 intel_context_put(ce);
862 kernel_context_close(ctx);
864 pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
865 ncontexts, engine->name, ndwords);
867 ncontexts = dw = 0;
868 list_for_each_entry(obj, &objects, st_link) {
869 unsigned int rem =
870 min_t(unsigned int, ndwords - dw, max_dwords(obj));
872 err = cpu_check(obj, ncontexts++, rem);
873 if (err)
874 goto out_test;
876 dw += rem;
879 i915_gem_drain_freed_objects(i915);
881 out_test:
882 throttle_release(tq, ARRAY_SIZE(tq));
883 if (igt_live_test_end(&t))
884 err = -EIO;
885 out_file:
886 fput(file);
887 return err;
890 static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
892 struct drm_i915_gem_object *obj;
893 u32 *cmd;
894 int err;
896 if (INTEL_GEN(vma->vm->i915) < 8)
897 return ERR_PTR(-EINVAL);
899 obj = i915_gem_object_create_internal(vma->vm->i915, PAGE_SIZE);
900 if (IS_ERR(obj))
901 return ERR_CAST(obj);
903 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
904 if (IS_ERR(cmd)) {
905 err = PTR_ERR(cmd);
906 goto err;
909 *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
910 *cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
911 *cmd++ = lower_32_bits(vma->node.start);
912 *cmd++ = upper_32_bits(vma->node.start);
913 *cmd = MI_BATCH_BUFFER_END;
915 __i915_gem_object_flush_map(obj, 0, 64);
916 i915_gem_object_unpin_map(obj);
918 intel_gt_chipset_flush(vma->vm->gt);
920 vma = i915_vma_instance(obj, vma->vm, NULL);
921 if (IS_ERR(vma)) {
922 err = PTR_ERR(vma);
923 goto err;
926 err = i915_vma_pin(vma, 0, 0, PIN_USER);
927 if (err)
928 goto err;
930 return vma;
932 err:
933 i915_gem_object_put(obj);
934 return ERR_PTR(err);
937 static int
938 emit_rpcs_query(struct drm_i915_gem_object *obj,
939 struct intel_context *ce,
940 struct i915_request **rq_out)
942 struct i915_request *rq;
943 struct i915_vma *batch;
944 struct i915_vma *vma;
945 int err;
947 GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
949 vma = i915_vma_instance(obj, ce->vm, NULL);
950 if (IS_ERR(vma))
951 return PTR_ERR(vma);
953 i915_gem_object_lock(obj);
954 err = i915_gem_object_set_to_gtt_domain(obj, false);
955 i915_gem_object_unlock(obj);
956 if (err)
957 return err;
959 err = i915_vma_pin(vma, 0, 0, PIN_USER);
960 if (err)
961 return err;
963 batch = rpcs_query_batch(vma);
964 if (IS_ERR(batch)) {
965 err = PTR_ERR(batch);
966 goto err_vma;
969 rq = i915_request_create(ce);
970 if (IS_ERR(rq)) {
971 err = PTR_ERR(rq);
972 goto err_batch;
975 err = rq->engine->emit_bb_start(rq,
976 batch->node.start, batch->node.size,
978 if (err)
979 goto err_request;
981 i915_vma_lock(batch);
982 err = i915_request_await_object(rq, batch->obj, false);
983 if (err == 0)
984 err = i915_vma_move_to_active(batch, rq, 0);
985 i915_vma_unlock(batch);
986 if (err)
987 goto skip_request;
989 i915_vma_lock(vma);
990 err = i915_request_await_object(rq, vma->obj, true);
991 if (err == 0)
992 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
993 i915_vma_unlock(vma);
994 if (err)
995 goto skip_request;
997 i915_vma_unpin_and_release(&batch, 0);
998 i915_vma_unpin(vma);
1000 *rq_out = i915_request_get(rq);
1002 i915_request_add(rq);
1004 return 0;
1006 skip_request:
1007 i915_request_skip(rq, err);
1008 err_request:
1009 i915_request_add(rq);
1010 err_batch:
1011 i915_vma_unpin_and_release(&batch, 0);
1012 err_vma:
1013 i915_vma_unpin(vma);
1015 return err;
1018 #define TEST_IDLE BIT(0)
1019 #define TEST_BUSY BIT(1)
1020 #define TEST_RESET BIT(2)
1022 static int
1023 __sseu_prepare(const char *name,
1024 unsigned int flags,
1025 struct intel_context *ce,
1026 struct igt_spinner **spin)
1028 struct i915_request *rq;
1029 int ret;
1031 *spin = NULL;
1032 if (!(flags & (TEST_BUSY | TEST_RESET)))
1033 return 0;
1035 *spin = kzalloc(sizeof(**spin), GFP_KERNEL);
1036 if (!*spin)
1037 return -ENOMEM;
1039 ret = igt_spinner_init(*spin, ce->engine->gt);
1040 if (ret)
1041 goto err_free;
1043 rq = igt_spinner_create_request(*spin, ce, MI_NOOP);
1044 if (IS_ERR(rq)) {
1045 ret = PTR_ERR(rq);
1046 goto err_fini;
1049 i915_request_add(rq);
1051 if (!igt_wait_for_spinner(*spin, rq)) {
1052 pr_err("%s: Spinner failed to start!\n", name);
1053 ret = -ETIMEDOUT;
1054 goto err_end;
1057 return 0;
1059 err_end:
1060 igt_spinner_end(*spin);
1061 err_fini:
1062 igt_spinner_fini(*spin);
1063 err_free:
1064 kfree(fetch_and_zero(spin));
1065 return ret;
1068 static int
1069 __read_slice_count(struct intel_context *ce,
1070 struct drm_i915_gem_object *obj,
1071 struct igt_spinner *spin,
1072 u32 *rpcs)
1074 struct i915_request *rq = NULL;
1075 u32 s_mask, s_shift;
1076 unsigned int cnt;
1077 u32 *buf, val;
1078 long ret;
1080 ret = emit_rpcs_query(obj, ce, &rq);
1081 if (ret)
1082 return ret;
1084 if (spin)
1085 igt_spinner_end(spin);
1087 ret = i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
1088 i915_request_put(rq);
1089 if (ret < 0)
1090 return ret;
1092 buf = i915_gem_object_pin_map(obj, I915_MAP_WB);
1093 if (IS_ERR(buf)) {
1094 ret = PTR_ERR(buf);
1095 return ret;
1098 if (INTEL_GEN(ce->engine->i915) >= 11) {
1099 s_mask = GEN11_RPCS_S_CNT_MASK;
1100 s_shift = GEN11_RPCS_S_CNT_SHIFT;
1101 } else {
1102 s_mask = GEN8_RPCS_S_CNT_MASK;
1103 s_shift = GEN8_RPCS_S_CNT_SHIFT;
1106 val = *buf;
1107 cnt = (val & s_mask) >> s_shift;
1108 *rpcs = val;
1110 i915_gem_object_unpin_map(obj);
1112 return cnt;
1115 static int
1116 __check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected,
1117 const char *prefix, const char *suffix)
1119 if (slices == expected)
1120 return 0;
1122 if (slices < 0) {
1123 pr_err("%s: %s read slice count failed with %d%s\n",
1124 name, prefix, slices, suffix);
1125 return slices;
1128 pr_err("%s: %s slice count %d is not %u%s\n",
1129 name, prefix, slices, expected, suffix);
1131 pr_info("RPCS=0x%x; %u%sx%u%s\n",
1132 rpcs, slices,
1133 (rpcs & GEN8_RPCS_S_CNT_ENABLE) ? "*" : "",
1134 (rpcs & GEN8_RPCS_SS_CNT_MASK) >> GEN8_RPCS_SS_CNT_SHIFT,
1135 (rpcs & GEN8_RPCS_SS_CNT_ENABLE) ? "*" : "");
1137 return -EINVAL;
1140 static int
1141 __sseu_finish(const char *name,
1142 unsigned int flags,
1143 struct intel_context *ce,
1144 struct drm_i915_gem_object *obj,
1145 unsigned int expected,
1146 struct igt_spinner *spin)
1148 unsigned int slices = hweight32(ce->engine->sseu.slice_mask);
1149 u32 rpcs = 0;
1150 int ret = 0;
1152 if (flags & TEST_RESET) {
1153 ret = intel_engine_reset(ce->engine, "sseu");
1154 if (ret)
1155 goto out;
1158 ret = __read_slice_count(ce, obj,
1159 flags & TEST_RESET ? NULL : spin, &rpcs);
1160 ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!");
1161 if (ret)
1162 goto out;
1164 ret = __read_slice_count(ce->engine->kernel_context, obj, NULL, &rpcs);
1165 ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!");
1167 out:
1168 if (spin)
1169 igt_spinner_end(spin);
1171 if ((flags & TEST_IDLE) && ret == 0) {
1172 ret = igt_flush_test(ce->engine->i915);
1173 if (ret)
1174 return ret;
1176 ret = __read_slice_count(ce, obj, NULL, &rpcs);
1177 ret = __check_rpcs(name, rpcs, ret, expected,
1178 "Context", " after idle!");
1181 return ret;
1184 static int
1185 __sseu_test(const char *name,
1186 unsigned int flags,
1187 struct intel_context *ce,
1188 struct drm_i915_gem_object *obj,
1189 struct intel_sseu sseu)
1191 struct igt_spinner *spin = NULL;
1192 int ret;
1194 intel_engine_pm_get(ce->engine);
1196 ret = __sseu_prepare(name, flags, ce, &spin);
1197 if (ret)
1198 goto out_pm;
1200 ret = intel_context_reconfigure_sseu(ce, sseu);
1201 if (ret)
1202 goto out_spin;
1204 ret = __sseu_finish(name, flags, ce, obj,
1205 hweight32(sseu.slice_mask), spin);
1207 out_spin:
1208 if (spin) {
1209 igt_spinner_end(spin);
1210 igt_spinner_fini(spin);
1211 kfree(spin);
1213 out_pm:
1214 intel_engine_pm_put(ce->engine);
1215 return ret;
1218 static int
1219 __igt_ctx_sseu(struct drm_i915_private *i915,
1220 const char *name,
1221 unsigned int flags)
1223 struct drm_i915_gem_object *obj;
1224 int inst = 0;
1225 int ret = 0;
1227 if (INTEL_GEN(i915) < 9 || !RUNTIME_INFO(i915)->sseu.has_slice_pg)
1228 return 0;
1230 if (flags & TEST_RESET)
1231 igt_global_reset_lock(&i915->gt);
1233 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1234 if (IS_ERR(obj)) {
1235 ret = PTR_ERR(obj);
1236 goto out_unlock;
1239 do {
1240 struct intel_engine_cs *engine;
1241 struct intel_context *ce;
1242 struct intel_sseu pg_sseu;
1244 engine = intel_engine_lookup_user(i915,
1245 I915_ENGINE_CLASS_RENDER,
1246 inst++);
1247 if (!engine)
1248 break;
1250 if (hweight32(engine->sseu.slice_mask) < 2)
1251 continue;
1254 * Gen11 VME friendly power-gated configuration with
1255 * half enabled sub-slices.
1257 pg_sseu = engine->sseu;
1258 pg_sseu.slice_mask = 1;
1259 pg_sseu.subslice_mask =
1260 ~(~0 << (hweight32(engine->sseu.subslice_mask) / 2));
1262 pr_info("%s: SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
1263 engine->name, name, flags,
1264 hweight32(engine->sseu.slice_mask),
1265 hweight32(pg_sseu.slice_mask));
1267 ce = intel_context_create(engine);
1268 if (IS_ERR(ce)) {
1269 ret = PTR_ERR(ce);
1270 goto out_put;
1273 ret = intel_context_pin(ce);
1274 if (ret)
1275 goto out_ce;
1277 /* First set the default mask. */
1278 ret = __sseu_test(name, flags, ce, obj, engine->sseu);
1279 if (ret)
1280 goto out_unpin;
1282 /* Then set a power-gated configuration. */
1283 ret = __sseu_test(name, flags, ce, obj, pg_sseu);
1284 if (ret)
1285 goto out_unpin;
1287 /* Back to defaults. */
1288 ret = __sseu_test(name, flags, ce, obj, engine->sseu);
1289 if (ret)
1290 goto out_unpin;
1292 /* One last power-gated configuration for the road. */
1293 ret = __sseu_test(name, flags, ce, obj, pg_sseu);
1294 if (ret)
1295 goto out_unpin;
1297 out_unpin:
1298 intel_context_unpin(ce);
1299 out_ce:
1300 intel_context_put(ce);
1301 } while (!ret);
1303 if (igt_flush_test(i915))
1304 ret = -EIO;
1306 out_put:
1307 i915_gem_object_put(obj);
1309 out_unlock:
1310 if (flags & TEST_RESET)
1311 igt_global_reset_unlock(&i915->gt);
1313 if (ret)
1314 pr_err("%s: Failed with %d!\n", name, ret);
1316 return ret;
1319 static int igt_ctx_sseu(void *arg)
1321 struct {
1322 const char *name;
1323 unsigned int flags;
1324 } *phase, phases[] = {
1325 { .name = "basic", .flags = 0 },
1326 { .name = "idle", .flags = TEST_IDLE },
1327 { .name = "busy", .flags = TEST_BUSY },
1328 { .name = "busy-reset", .flags = TEST_BUSY | TEST_RESET },
1329 { .name = "busy-idle", .flags = TEST_BUSY | TEST_IDLE },
1330 { .name = "reset-idle", .flags = TEST_RESET | TEST_IDLE },
1332 unsigned int i;
1333 int ret = 0;
1335 for (i = 0, phase = phases; ret == 0 && i < ARRAY_SIZE(phases);
1336 i++, phase++)
1337 ret = __igt_ctx_sseu(arg, phase->name, phase->flags);
1339 return ret;
1342 static int igt_ctx_readonly(void *arg)
1344 struct drm_i915_private *i915 = arg;
1345 unsigned long idx, ndwords, dw, num_engines;
1346 struct drm_i915_gem_object *obj = NULL;
1347 struct i915_request *tq[5] = {};
1348 struct i915_gem_engines_iter it;
1349 struct i915_address_space *vm;
1350 struct i915_gem_context *ctx;
1351 struct intel_context *ce;
1352 struct igt_live_test t;
1353 I915_RND_STATE(prng);
1354 IGT_TIMEOUT(end_time);
1355 LIST_HEAD(objects);
1356 struct file *file;
1357 int err = -ENODEV;
1360 * Create a few read-only objects (with the occasional writable object)
1361 * and try to write into these object checking that the GPU discards
1362 * any write to a read-only object.
1365 file = mock_file(i915);
1366 if (IS_ERR(file))
1367 return PTR_ERR(file);
1369 err = igt_live_test_begin(&t, i915, __func__, "");
1370 if (err)
1371 goto out_file;
1373 ctx = live_context(i915, file);
1374 if (IS_ERR(ctx)) {
1375 err = PTR_ERR(ctx);
1376 goto out_file;
1379 vm = ctx_vm(ctx) ?: &i915->ggtt.alias->vm;
1380 if (!vm || !vm->has_read_only) {
1381 err = 0;
1382 goto out_file;
1385 num_engines = 0;
1386 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
1387 if (intel_engine_can_store_dword(ce->engine))
1388 num_engines++;
1389 i915_gem_context_unlock_engines(ctx);
1391 ndwords = 0;
1392 dw = 0;
1393 while (!time_after(jiffies, end_time)) {
1394 for_each_gem_engine(ce,
1395 i915_gem_context_lock_engines(ctx), it) {
1396 if (!intel_engine_can_store_dword(ce->engine))
1397 continue;
1399 if (!obj) {
1400 obj = create_test_object(ce->vm, file, &objects);
1401 if (IS_ERR(obj)) {
1402 err = PTR_ERR(obj);
1403 i915_gem_context_unlock_engines(ctx);
1404 goto out_file;
1407 if (prandom_u32_state(&prng) & 1)
1408 i915_gem_object_set_readonly(obj);
1411 err = gpu_fill(ce, obj, dw);
1412 if (err) {
1413 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
1414 ndwords, dw, max_dwords(obj),
1415 ce->engine->name,
1416 yesno(!!ctx_vm(ctx)),
1417 err);
1418 i915_gem_context_unlock_engines(ctx);
1419 goto out_file;
1422 err = throttle(ce, tq, ARRAY_SIZE(tq));
1423 if (err) {
1424 i915_gem_context_unlock_engines(ctx);
1425 goto out_file;
1428 if (++dw == max_dwords(obj)) {
1429 obj = NULL;
1430 dw = 0;
1432 ndwords++;
1434 i915_gem_context_unlock_engines(ctx);
1436 pr_info("Submitted %lu dwords (across %lu engines)\n",
1437 ndwords, num_engines);
1439 dw = 0;
1440 idx = 0;
1441 list_for_each_entry(obj, &objects, st_link) {
1442 unsigned int rem =
1443 min_t(unsigned int, ndwords - dw, max_dwords(obj));
1444 unsigned int num_writes;
1446 num_writes = rem;
1447 if (i915_gem_object_is_readonly(obj))
1448 num_writes = 0;
1450 err = cpu_check(obj, idx++, num_writes);
1451 if (err)
1452 break;
1454 dw += rem;
1457 out_file:
1458 throttle_release(tq, ARRAY_SIZE(tq));
1459 if (igt_live_test_end(&t))
1460 err = -EIO;
1462 fput(file);
1463 return err;
1466 static int check_scratch(struct i915_address_space *vm, u64 offset)
1468 struct drm_mm_node *node =
1469 __drm_mm_interval_first(&vm->mm,
1470 offset, offset + sizeof(u32) - 1);
1471 if (!node || node->start > offset)
1472 return 0;
1474 GEM_BUG_ON(offset >= node->start + node->size);
1476 pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n",
1477 upper_32_bits(offset), lower_32_bits(offset));
1478 return -EINVAL;
1481 static int write_to_scratch(struct i915_gem_context *ctx,
1482 struct intel_engine_cs *engine,
1483 u64 offset, u32 value)
1485 struct drm_i915_private *i915 = ctx->i915;
1486 struct drm_i915_gem_object *obj;
1487 struct i915_address_space *vm;
1488 struct i915_request *rq;
1489 struct i915_vma *vma;
1490 u32 *cmd;
1491 int err;
1493 GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
1495 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1496 if (IS_ERR(obj))
1497 return PTR_ERR(obj);
1499 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1500 if (IS_ERR(cmd)) {
1501 err = PTR_ERR(cmd);
1502 goto out;
1505 *cmd++ = MI_STORE_DWORD_IMM_GEN4;
1506 if (INTEL_GEN(i915) >= 8) {
1507 *cmd++ = lower_32_bits(offset);
1508 *cmd++ = upper_32_bits(offset);
1509 } else {
1510 *cmd++ = 0;
1511 *cmd++ = offset;
1513 *cmd++ = value;
1514 *cmd = MI_BATCH_BUFFER_END;
1515 __i915_gem_object_flush_map(obj, 0, 64);
1516 i915_gem_object_unpin_map(obj);
1518 intel_gt_chipset_flush(engine->gt);
1520 vm = i915_gem_context_get_vm_rcu(ctx);
1521 vma = i915_vma_instance(obj, vm, NULL);
1522 if (IS_ERR(vma)) {
1523 err = PTR_ERR(vma);
1524 goto out_vm;
1527 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
1528 if (err)
1529 goto out_vm;
1531 err = check_scratch(vm, offset);
1532 if (err)
1533 goto err_unpin;
1535 rq = igt_request_alloc(ctx, engine);
1536 if (IS_ERR(rq)) {
1537 err = PTR_ERR(rq);
1538 goto err_unpin;
1541 err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
1542 if (err)
1543 goto err_request;
1545 i915_vma_lock(vma);
1546 err = i915_request_await_object(rq, vma->obj, false);
1547 if (err == 0)
1548 err = i915_vma_move_to_active(vma, rq, 0);
1549 i915_vma_unlock(vma);
1550 if (err)
1551 goto skip_request;
1553 i915_vma_unpin(vma);
1555 i915_request_add(rq);
1557 goto out_vm;
1558 skip_request:
1559 i915_request_skip(rq, err);
1560 err_request:
1561 i915_request_add(rq);
1562 err_unpin:
1563 i915_vma_unpin(vma);
1564 out_vm:
1565 i915_vm_put(vm);
1566 out:
1567 i915_gem_object_put(obj);
1568 return err;
1571 static int read_from_scratch(struct i915_gem_context *ctx,
1572 struct intel_engine_cs *engine,
1573 u64 offset, u32 *value)
1575 struct drm_i915_private *i915 = ctx->i915;
1576 struct drm_i915_gem_object *obj;
1577 struct i915_address_space *vm;
1578 const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
1579 const u32 result = 0x100;
1580 struct i915_request *rq;
1581 struct i915_vma *vma;
1582 u32 *cmd;
1583 int err;
1585 GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
1587 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1588 if (IS_ERR(obj))
1589 return PTR_ERR(obj);
1591 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1592 if (IS_ERR(cmd)) {
1593 err = PTR_ERR(cmd);
1594 goto out;
1597 memset(cmd, POISON_INUSE, PAGE_SIZE);
1598 if (INTEL_GEN(i915) >= 8) {
1599 *cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
1600 *cmd++ = RCS_GPR0;
1601 *cmd++ = lower_32_bits(offset);
1602 *cmd++ = upper_32_bits(offset);
1603 *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
1604 *cmd++ = RCS_GPR0;
1605 *cmd++ = result;
1606 *cmd++ = 0;
1607 } else {
1608 *cmd++ = MI_LOAD_REGISTER_MEM;
1609 *cmd++ = RCS_GPR0;
1610 *cmd++ = offset;
1611 *cmd++ = MI_STORE_REGISTER_MEM;
1612 *cmd++ = RCS_GPR0;
1613 *cmd++ = result;
1615 *cmd = MI_BATCH_BUFFER_END;
1617 i915_gem_object_flush_map(obj);
1618 i915_gem_object_unpin_map(obj);
1620 intel_gt_chipset_flush(engine->gt);
1622 vm = i915_gem_context_get_vm_rcu(ctx);
1623 vma = i915_vma_instance(obj, vm, NULL);
1624 if (IS_ERR(vma)) {
1625 err = PTR_ERR(vma);
1626 goto out_vm;
1629 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
1630 if (err)
1631 goto out_vm;
1633 err = check_scratch(vm, offset);
1634 if (err)
1635 goto err_unpin;
1637 rq = igt_request_alloc(ctx, engine);
1638 if (IS_ERR(rq)) {
1639 err = PTR_ERR(rq);
1640 goto err_unpin;
1643 err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
1644 if (err)
1645 goto err_request;
1647 i915_vma_lock(vma);
1648 err = i915_request_await_object(rq, vma->obj, true);
1649 if (err == 0)
1650 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
1651 i915_vma_unlock(vma);
1652 if (err)
1653 goto skip_request;
1655 i915_vma_unpin(vma);
1656 i915_vma_close(vma);
1658 i915_request_add(rq);
1660 i915_gem_object_lock(obj);
1661 err = i915_gem_object_set_to_cpu_domain(obj, false);
1662 i915_gem_object_unlock(obj);
1663 if (err)
1664 goto out_vm;
1666 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1667 if (IS_ERR(cmd)) {
1668 err = PTR_ERR(cmd);
1669 goto out_vm;
1672 *value = cmd[result / sizeof(*cmd)];
1673 i915_gem_object_unpin_map(obj);
1675 goto out_vm;
1676 skip_request:
1677 i915_request_skip(rq, err);
1678 err_request:
1679 i915_request_add(rq);
1680 err_unpin:
1681 i915_vma_unpin(vma);
1682 out_vm:
1683 i915_vm_put(vm);
1684 out:
1685 i915_gem_object_put(obj);
1686 return err;
1689 static int igt_vm_isolation(void *arg)
1691 struct drm_i915_private *i915 = arg;
1692 struct i915_gem_context *ctx_a, *ctx_b;
1693 unsigned long num_engines, count;
1694 struct intel_engine_cs *engine;
1695 struct igt_live_test t;
1696 I915_RND_STATE(prng);
1697 struct file *file;
1698 u64 vm_total;
1699 int err;
1701 if (INTEL_GEN(i915) < 7)
1702 return 0;
1705 * The simple goal here is that a write into one context is not
1706 * observed in a second (separate page tables and scratch).
1709 file = mock_file(i915);
1710 if (IS_ERR(file))
1711 return PTR_ERR(file);
1713 err = igt_live_test_begin(&t, i915, __func__, "");
1714 if (err)
1715 goto out_file;
1717 ctx_a = live_context(i915, file);
1718 if (IS_ERR(ctx_a)) {
1719 err = PTR_ERR(ctx_a);
1720 goto out_file;
1723 ctx_b = live_context(i915, file);
1724 if (IS_ERR(ctx_b)) {
1725 err = PTR_ERR(ctx_b);
1726 goto out_file;
1729 /* We can only test vm isolation, if the vm are distinct */
1730 if (ctx_vm(ctx_a) == ctx_vm(ctx_b))
1731 goto out_file;
1733 vm_total = ctx_vm(ctx_a)->total;
1734 GEM_BUG_ON(ctx_vm(ctx_b)->total != vm_total);
1735 vm_total -= I915_GTT_PAGE_SIZE;
1737 count = 0;
1738 num_engines = 0;
1739 for_each_uabi_engine(engine, i915) {
1740 IGT_TIMEOUT(end_time);
1741 unsigned long this = 0;
1743 if (!intel_engine_can_store_dword(engine))
1744 continue;
1746 while (!__igt_timeout(end_time, NULL)) {
1747 u32 value = 0xc5c5c5c5;
1748 u64 offset;
1750 div64_u64_rem(i915_prandom_u64_state(&prng),
1751 vm_total, &offset);
1752 offset = round_down(offset, alignof_dword);
1753 offset += I915_GTT_PAGE_SIZE;
1755 err = write_to_scratch(ctx_a, engine,
1756 offset, 0xdeadbeef);
1757 if (err == 0)
1758 err = read_from_scratch(ctx_b, engine,
1759 offset, &value);
1760 if (err)
1761 goto out_file;
1763 if (value) {
1764 pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
1765 engine->name, value,
1766 upper_32_bits(offset),
1767 lower_32_bits(offset),
1768 this);
1769 err = -EINVAL;
1770 goto out_file;
1773 this++;
1775 count += this;
1776 num_engines++;
1778 pr_info("Checked %lu scratch offsets across %lu engines\n",
1779 count, num_engines);
1781 out_file:
1782 if (igt_live_test_end(&t))
1783 err = -EIO;
1784 fput(file);
1785 return err;
1788 static bool skip_unused_engines(struct intel_context *ce, void *data)
1790 return !ce->state;
1793 static void mock_barrier_task(void *data)
1795 unsigned int *counter = data;
1797 ++*counter;
1800 static int mock_context_barrier(void *arg)
1802 #undef pr_fmt
1803 #define pr_fmt(x) "context_barrier_task():" # x
1804 struct drm_i915_private *i915 = arg;
1805 struct i915_gem_context *ctx;
1806 struct i915_request *rq;
1807 unsigned int counter;
1808 int err;
1811 * The context barrier provides us with a callback after it emits
1812 * a request; useful for retiring old state after loading new.
1815 ctx = mock_context(i915, "mock");
1816 if (!ctx)
1817 return -ENOMEM;
1819 counter = 0;
1820 err = context_barrier_task(ctx, 0,
1821 NULL, NULL, mock_barrier_task, &counter);
1822 if (err) {
1823 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1824 goto out;
1826 if (counter == 0) {
1827 pr_err("Did not retire immediately with 0 engines\n");
1828 err = -EINVAL;
1829 goto out;
1832 counter = 0;
1833 err = context_barrier_task(ctx, ALL_ENGINES,
1834 skip_unused_engines,
1835 NULL,
1836 mock_barrier_task,
1837 &counter);
1838 if (err) {
1839 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1840 goto out;
1842 if (counter == 0) {
1843 pr_err("Did not retire immediately for all unused engines\n");
1844 err = -EINVAL;
1845 goto out;
1848 rq = igt_request_alloc(ctx, i915->engine[RCS0]);
1849 if (IS_ERR(rq)) {
1850 pr_err("Request allocation failed!\n");
1851 goto out;
1853 i915_request_add(rq);
1855 counter = 0;
1856 context_barrier_inject_fault = BIT(RCS0);
1857 err = context_barrier_task(ctx, ALL_ENGINES,
1858 NULL, NULL, mock_barrier_task, &counter);
1859 context_barrier_inject_fault = 0;
1860 if (err == -ENXIO)
1861 err = 0;
1862 else
1863 pr_err("Did not hit fault injection!\n");
1864 if (counter != 0) {
1865 pr_err("Invoked callback on error!\n");
1866 err = -EIO;
1868 if (err)
1869 goto out;
1871 counter = 0;
1872 err = context_barrier_task(ctx, ALL_ENGINES,
1873 skip_unused_engines,
1874 NULL,
1875 mock_barrier_task,
1876 &counter);
1877 if (err) {
1878 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1879 goto out;
1881 mock_device_flush(i915);
1882 if (counter == 0) {
1883 pr_err("Did not retire on each active engines\n");
1884 err = -EINVAL;
1885 goto out;
1888 out:
1889 mock_context_close(ctx);
1890 return err;
1891 #undef pr_fmt
1892 #define pr_fmt(x) x
1895 int i915_gem_context_mock_selftests(void)
1897 static const struct i915_subtest tests[] = {
1898 SUBTEST(mock_context_barrier),
1900 struct drm_i915_private *i915;
1901 int err;
1903 i915 = mock_gem_device();
1904 if (!i915)
1905 return -ENOMEM;
1907 err = i915_subtests(tests, i915);
1909 drm_dev_put(&i915->drm);
1910 return err;
1913 int i915_gem_context_live_selftests(struct drm_i915_private *i915)
1915 static const struct i915_subtest tests[] = {
1916 SUBTEST(live_nop_switch),
1917 SUBTEST(live_parallel_switch),
1918 SUBTEST(igt_ctx_exec),
1919 SUBTEST(igt_ctx_readonly),
1920 SUBTEST(igt_ctx_sseu),
1921 SUBTEST(igt_shared_ctx_exec),
1922 SUBTEST(igt_vm_isolation),
1925 if (intel_gt_is_wedged(&i915->gt))
1926 return 0;
1928 return i915_live_subtests(tests, i915);