2 * SPDX-License-Identifier: MIT
4 * Copyright © 2017 Intel Corporation
7 #include <linux/prime_numbers.h>
9 #include "gem/i915_gem_pm.h"
10 #include "gt/intel_engine_pm.h"
11 #include "gt/intel_gt.h"
12 #include "gt/intel_gt_requests.h"
13 #include "gt/intel_reset.h"
14 #include "i915_selftest.h"
16 #include "gem/selftests/igt_gem_utils.h"
17 #include "selftests/i915_random.h"
18 #include "selftests/igt_flush_test.h"
19 #include "selftests/igt_live_test.h"
20 #include "selftests/igt_reset.h"
21 #include "selftests/igt_spinner.h"
22 #include "selftests/mock_drm.h"
23 #include "selftests/mock_gem_device.h"
25 #include "huge_gem_object.h"
26 #include "igt_gem_utils.h"
28 #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
30 static inline struct i915_address_space
*ctx_vm(struct i915_gem_context
*ctx
)
32 /* single threaded, private ctx */
33 return rcu_dereference_protected(ctx
->vm
, true);
36 static int live_nop_switch(void *arg
)
38 const unsigned int nctx
= 1024;
39 struct drm_i915_private
*i915
= arg
;
40 struct intel_engine_cs
*engine
;
41 struct i915_gem_context
**ctx
;
42 struct igt_live_test t
;
48 * Create as many contexts as we can feasibly get away with
49 * and check we can switch between them rapidly.
51 * Serves as very simple stress test for submission and HW switching
55 if (!DRIVER_CAPS(i915
)->has_logical_contexts
)
58 file
= mock_file(i915
);
62 ctx
= kcalloc(nctx
, sizeof(*ctx
), GFP_KERNEL
);
68 for (n
= 0; n
< nctx
; n
++) {
69 ctx
[n
] = live_context(i915
, file
);
71 err
= PTR_ERR(ctx
[n
]);
76 for_each_uabi_engine(engine
, i915
) {
77 struct i915_request
*rq
= NULL
;
78 unsigned long end_time
, prime
;
79 ktime_t times
[2] = {};
81 times
[0] = ktime_get_raw();
82 for (n
= 0; n
< nctx
; n
++) {
83 struct i915_request
*this;
85 this = igt_request_alloc(ctx
[n
], engine
);
91 i915_request_await_dma_fence(this, &rq
->fence
);
94 rq
= i915_request_get(this);
95 i915_request_add(this);
97 if (i915_request_wait(rq
, 0, HZ
/ 5) < 0) {
98 pr_err("Failed to populated %d contexts\n", nctx
);
99 intel_gt_set_wedged(&i915
->gt
);
100 i915_request_put(rq
);
104 i915_request_put(rq
);
106 times
[1] = ktime_get_raw();
108 pr_info("Populated %d contexts on %s in %lluns\n",
109 nctx
, engine
->name
, ktime_to_ns(times
[1] - times
[0]));
111 err
= igt_live_test_begin(&t
, i915
, __func__
, engine
->name
);
115 end_time
= jiffies
+ i915_selftest
.timeout_jiffies
;
116 for_each_prime_number_from(prime
, 2, 8192) {
117 times
[1] = ktime_get_raw();
120 for (n
= 0; n
< prime
; n
++) {
121 struct i915_request
*this;
123 this = igt_request_alloc(ctx
[n
% nctx
], engine
);
129 if (rq
) { /* Force submission order */
130 i915_request_await_dma_fence(this, &rq
->fence
);
131 i915_request_put(rq
);
135 * This space is left intentionally blank.
137 * We do not actually want to perform any
138 * action with this request, we just want
139 * to measure the latency in allocation
140 * and submission of our breadcrumbs -
141 * ensuring that the bare request is sufficient
142 * for the system to work (i.e. proper HEAD
143 * tracking of the rings, interrupt handling,
144 * etc). It also gives us the lowest bounds
148 rq
= i915_request_get(this);
149 i915_request_add(this);
152 if (i915_request_wait(rq
, 0, HZ
/ 5) < 0) {
153 pr_err("Switching between %ld contexts timed out\n",
155 intel_gt_set_wedged(&i915
->gt
);
156 i915_request_put(rq
);
159 i915_request_put(rq
);
161 times
[1] = ktime_sub(ktime_get_raw(), times
[1]);
165 if (__igt_timeout(end_time
, NULL
))
169 err
= igt_live_test_end(&t
);
173 pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
175 ktime_to_ns(times
[0]),
176 prime
- 1, div64_u64(ktime_to_ns(times
[1]), prime
- 1));
184 struct parallel_switch
{
185 struct task_struct
*tsk
;
186 struct intel_context
*ce
[2];
189 static int __live_parallel_switch1(void *data
)
191 struct parallel_switch
*arg
= data
;
192 IGT_TIMEOUT(end_time
);
197 struct i915_request
*rq
= NULL
;
201 for (n
= 0; !err
&& n
< ARRAY_SIZE(arg
->ce
); n
++) {
202 struct i915_request
*prev
= rq
;
204 rq
= i915_request_create(arg
->ce
[n
]);
206 i915_request_put(prev
);
210 i915_request_get(rq
);
212 err
= i915_request_await_dma_fence(rq
, &prev
->fence
);
213 i915_request_put(prev
);
216 i915_request_add(rq
);
218 if (i915_request_wait(rq
, 0, HZ
/ 5) < 0)
220 i915_request_put(rq
);
225 } while (!__igt_timeout(end_time
, NULL
));
227 pr_info("%s: %lu switches (sync)\n", arg
->ce
[0]->engine
->name
, count
);
231 static int __live_parallel_switchN(void *data
)
233 struct parallel_switch
*arg
= data
;
234 struct i915_request
*rq
= NULL
;
235 IGT_TIMEOUT(end_time
);
241 for (n
= 0; n
< ARRAY_SIZE(arg
->ce
); n
++) {
242 struct i915_request
*prev
= rq
;
245 rq
= i915_request_create(arg
->ce
[n
]);
247 i915_request_put(prev
);
251 i915_request_get(rq
);
253 err
= i915_request_await_dma_fence(rq
, &prev
->fence
);
254 i915_request_put(prev
);
257 i915_request_add(rq
);
259 i915_request_put(rq
);
265 } while (!__igt_timeout(end_time
, NULL
));
266 i915_request_put(rq
);
268 pr_info("%s: %lu switches (many)\n", arg
->ce
[0]->engine
->name
, count
);
272 static int live_parallel_switch(void *arg
)
274 struct drm_i915_private
*i915
= arg
;
275 static int (* const func
[])(void *arg
) = {
276 __live_parallel_switch1
,
277 __live_parallel_switchN
,
280 struct parallel_switch
*data
= NULL
;
281 struct i915_gem_engines
*engines
;
282 struct i915_gem_engines_iter it
;
283 int (* const *fn
)(void *arg
);
284 struct i915_gem_context
*ctx
;
285 struct intel_context
*ce
;
291 * Check we can process switches on all engines simultaneously.
294 if (!DRIVER_CAPS(i915
)->has_logical_contexts
)
297 file
= mock_file(i915
);
299 return PTR_ERR(file
);
301 ctx
= live_context(i915
, file
);
307 engines
= i915_gem_context_lock_engines(ctx
);
308 count
= engines
->num_engines
;
310 data
= kcalloc(count
, sizeof(*data
), GFP_KERNEL
);
312 i915_gem_context_unlock_engines(ctx
);
317 m
= 0; /* Use the first context as our template for the engines */
318 for_each_gem_engine(ce
, engines
, it
) {
319 err
= intel_context_pin(ce
);
321 i915_gem_context_unlock_engines(ctx
);
324 data
[m
++].ce
[0] = intel_context_get(ce
);
326 i915_gem_context_unlock_engines(ctx
);
328 /* Clone the same set of engines into the other contexts */
329 for (n
= 1; n
< ARRAY_SIZE(data
->ce
); n
++) {
330 ctx
= live_context(i915
, file
);
336 for (m
= 0; m
< count
; m
++) {
340 ce
= intel_context_create(data
[m
].ce
[0]->engine
);
344 err
= intel_context_pin(ce
);
346 intel_context_put(ce
);
354 for (fn
= func
; !err
&& *fn
; fn
++) {
355 struct igt_live_test t
;
358 err
= igt_live_test_begin(&t
, i915
, __func__
, "");
362 for (n
= 0; n
< count
; n
++) {
366 data
[n
].tsk
= kthread_run(*fn
, &data
[n
],
368 data
[n
].ce
[0]->engine
->name
);
369 if (IS_ERR(data
[n
].tsk
)) {
370 err
= PTR_ERR(data
[n
].tsk
);
373 get_task_struct(data
[n
].tsk
);
376 yield(); /* start all threads before we kthread_stop() */
378 for (n
= 0; n
< count
; n
++) {
381 if (IS_ERR_OR_NULL(data
[n
].tsk
))
384 status
= kthread_stop(data
[n
].tsk
);
388 put_task_struct(data
[n
].tsk
);
392 if (igt_live_test_end(&t
))
397 for (n
= 0; n
< count
; n
++) {
398 for (m
= 0; m
< ARRAY_SIZE(data
->ce
); m
++) {
402 intel_context_unpin(data
[n
].ce
[m
]);
403 intel_context_put(data
[n
].ce
[m
]);
412 static unsigned long real_page_count(struct drm_i915_gem_object
*obj
)
414 return huge_gem_object_phys_size(obj
) >> PAGE_SHIFT
;
417 static unsigned long fake_page_count(struct drm_i915_gem_object
*obj
)
419 return huge_gem_object_dma_size(obj
) >> PAGE_SHIFT
;
422 static int gpu_fill(struct intel_context
*ce
,
423 struct drm_i915_gem_object
*obj
,
426 struct i915_vma
*vma
;
429 GEM_BUG_ON(obj
->base
.size
> ce
->vm
->total
);
430 GEM_BUG_ON(!intel_engine_can_store_dword(ce
->engine
));
432 vma
= i915_vma_instance(obj
, ce
->vm
, NULL
);
436 err
= i915_vma_pin(vma
, 0, 0, PIN_HIGH
| PIN_USER
);
441 * Within the GTT the huge objects maps every page onto
442 * its 1024 real pages (using phys_pfn = dma_pfn % 1024).
443 * We set the nth dword within the page using the nth
444 * mapping via the GTT - this should exercise the GTT mapping
445 * whilst checking that each context provides a unique view
448 err
= igt_gpu_fill_dw(ce
, vma
,
449 (dw
* real_page_count(obj
)) << PAGE_SHIFT
|
451 real_page_count(obj
),
458 static int cpu_fill(struct drm_i915_gem_object
*obj
, u32 value
)
460 const bool has_llc
= HAS_LLC(to_i915(obj
->base
.dev
));
461 unsigned int n
, m
, need_flush
;
464 i915_gem_object_lock(obj
, NULL
);
465 err
= i915_gem_object_prepare_write(obj
, &need_flush
);
469 for (n
= 0; n
< real_page_count(obj
); n
++) {
472 map
= kmap_atomic(i915_gem_object_get_page(obj
, n
));
473 for (m
= 0; m
< DW_PER_PAGE
; m
++)
476 drm_clflush_virt_range(map
, PAGE_SIZE
);
480 i915_gem_object_finish_access(obj
);
481 obj
->read_domains
= I915_GEM_DOMAIN_GTT
| I915_GEM_DOMAIN_CPU
;
482 obj
->write_domain
= 0;
484 i915_gem_object_unlock(obj
);
488 static noinline
int cpu_check(struct drm_i915_gem_object
*obj
,
489 unsigned int idx
, unsigned int max
)
491 unsigned int n
, m
, needs_flush
;
494 i915_gem_object_lock(obj
, NULL
);
495 err
= i915_gem_object_prepare_read(obj
, &needs_flush
);
499 for (n
= 0; n
< real_page_count(obj
); n
++) {
502 map
= kmap_atomic(i915_gem_object_get_page(obj
, n
));
503 if (needs_flush
& CLFLUSH_BEFORE
)
504 drm_clflush_virt_range(map
, PAGE_SIZE
);
506 for (m
= 0; m
< max
; m
++) {
508 pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n",
509 __builtin_return_address(0), idx
,
510 n
, real_page_count(obj
), m
, max
,
517 for (; m
< DW_PER_PAGE
; m
++) {
518 if (map
[m
] != STACK_MAGIC
) {
519 pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n",
520 __builtin_return_address(0), idx
, n
, m
,
521 map
[m
], STACK_MAGIC
);
533 i915_gem_object_finish_access(obj
);
535 i915_gem_object_unlock(obj
);
539 static int file_add_object(struct file
*file
, struct drm_i915_gem_object
*obj
)
543 GEM_BUG_ON(obj
->base
.handle_count
);
545 /* tie the object to the drm_file for easy reaping */
546 err
= idr_alloc(&to_drm_file(file
)->object_idr
,
547 &obj
->base
, 1, 0, GFP_KERNEL
);
551 i915_gem_object_get(obj
);
552 obj
->base
.handle_count
++;
556 static struct drm_i915_gem_object
*
557 create_test_object(struct i915_address_space
*vm
,
559 struct list_head
*objects
)
561 struct drm_i915_gem_object
*obj
;
565 /* Keep in GEM's good graces */
566 intel_gt_retire_requests(vm
->gt
);
568 size
= min(vm
->total
/ 2, 1024ull * DW_PER_PAGE
* PAGE_SIZE
);
569 size
= round_down(size
, DW_PER_PAGE
* PAGE_SIZE
);
571 obj
= huge_gem_object(vm
->i915
, DW_PER_PAGE
* PAGE_SIZE
, size
);
575 err
= file_add_object(file
, obj
);
576 i915_gem_object_put(obj
);
580 err
= cpu_fill(obj
, STACK_MAGIC
);
582 pr_err("Failed to fill object with cpu, err=%d\n",
587 list_add_tail(&obj
->st_link
, objects
);
591 static unsigned long max_dwords(struct drm_i915_gem_object
*obj
)
593 unsigned long npages
= fake_page_count(obj
);
595 GEM_BUG_ON(!IS_ALIGNED(npages
, DW_PER_PAGE
));
596 return npages
/ DW_PER_PAGE
;
599 static void throttle_release(struct i915_request
**q
, int count
)
603 for (i
= 0; i
< count
; i
++) {
604 if (IS_ERR_OR_NULL(q
[i
]))
607 i915_request_put(fetch_and_zero(&q
[i
]));
611 static int throttle(struct intel_context
*ce
,
612 struct i915_request
**q
, int count
)
616 if (!IS_ERR_OR_NULL(q
[0])) {
617 if (i915_request_wait(q
[0],
618 I915_WAIT_INTERRUPTIBLE
,
619 MAX_SCHEDULE_TIMEOUT
) < 0)
622 i915_request_put(q
[0]);
625 for (i
= 0; i
< count
- 1; i
++)
628 q
[i
] = intel_context_create_request(ce
);
630 return PTR_ERR(q
[i
]);
632 i915_request_get(q
[i
]);
633 i915_request_add(q
[i
]);
638 static int igt_ctx_exec(void *arg
)
640 struct drm_i915_private
*i915
= arg
;
641 struct intel_engine_cs
*engine
;
645 * Create a few different contexts (with different mm) and write
646 * through each ctx/mm using the GPU making sure those writes end
647 * up in the expected pages of our obj.
650 if (!DRIVER_CAPS(i915
)->has_logical_contexts
)
653 for_each_uabi_engine(engine
, i915
) {
654 struct drm_i915_gem_object
*obj
= NULL
;
655 unsigned long ncontexts
, ndwords
, dw
;
656 struct i915_request
*tq
[5] = {};
657 struct igt_live_test t
;
658 IGT_TIMEOUT(end_time
);
662 if (!intel_engine_can_store_dword(engine
))
665 if (!engine
->context_size
)
666 continue; /* No logical context support in HW */
668 file
= mock_file(i915
);
670 return PTR_ERR(file
);
672 err
= igt_live_test_begin(&t
, i915
, __func__
, engine
->name
);
679 while (!time_after(jiffies
, end_time
)) {
680 struct i915_gem_context
*ctx
;
681 struct intel_context
*ce
;
683 ctx
= kernel_context(i915
);
689 ce
= i915_gem_context_get_engine(ctx
, engine
->legacy_idx
);
690 GEM_BUG_ON(IS_ERR(ce
));
693 obj
= create_test_object(ce
->vm
, file
, &objects
);
696 intel_context_put(ce
);
697 kernel_context_close(ctx
);
702 err
= gpu_fill(ce
, obj
, dw
);
704 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
705 ndwords
, dw
, max_dwords(obj
),
707 yesno(!!rcu_access_pointer(ctx
->vm
)),
709 intel_context_put(ce
);
710 kernel_context_close(ctx
);
714 err
= throttle(ce
, tq
, ARRAY_SIZE(tq
));
716 intel_context_put(ce
);
717 kernel_context_close(ctx
);
721 if (++dw
== max_dwords(obj
)) {
729 intel_context_put(ce
);
730 kernel_context_close(ctx
);
733 pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
734 ncontexts
, engine
->name
, ndwords
);
737 list_for_each_entry(obj
, &objects
, st_link
) {
739 min_t(unsigned int, ndwords
- dw
, max_dwords(obj
));
741 err
= cpu_check(obj
, ncontexts
++, rem
);
749 throttle_release(tq
, ARRAY_SIZE(tq
));
750 if (igt_live_test_end(&t
))
757 i915_gem_drain_freed_objects(i915
);
763 static int igt_shared_ctx_exec(void *arg
)
765 struct drm_i915_private
*i915
= arg
;
766 struct i915_request
*tq
[5] = {};
767 struct i915_gem_context
*parent
;
768 struct intel_engine_cs
*engine
;
769 struct igt_live_test t
;
774 * Create a few different contexts with the same mm and write
775 * through each ctx using the GPU making sure those writes end
776 * up in the expected pages of our obj.
778 if (!DRIVER_CAPS(i915
)->has_logical_contexts
)
781 file
= mock_file(i915
);
783 return PTR_ERR(file
);
785 parent
= live_context(i915
, file
);
786 if (IS_ERR(parent
)) {
787 err
= PTR_ERR(parent
);
791 if (!parent
->vm
) { /* not full-ppgtt; nothing to share */
796 err
= igt_live_test_begin(&t
, i915
, __func__
, "");
800 for_each_uabi_engine(engine
, i915
) {
801 unsigned long ncontexts
, ndwords
, dw
;
802 struct drm_i915_gem_object
*obj
= NULL
;
803 IGT_TIMEOUT(end_time
);
806 if (!intel_engine_can_store_dword(engine
))
812 while (!time_after(jiffies
, end_time
)) {
813 struct i915_gem_context
*ctx
;
814 struct intel_context
*ce
;
816 ctx
= kernel_context(i915
);
822 mutex_lock(&ctx
->mutex
);
823 __assign_ppgtt(ctx
, ctx_vm(parent
));
824 mutex_unlock(&ctx
->mutex
);
826 ce
= i915_gem_context_get_engine(ctx
, engine
->legacy_idx
);
827 GEM_BUG_ON(IS_ERR(ce
));
830 obj
= create_test_object(ctx_vm(parent
),
834 intel_context_put(ce
);
835 kernel_context_close(ctx
);
840 err
= gpu_fill(ce
, obj
, dw
);
842 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
843 ndwords
, dw
, max_dwords(obj
),
845 yesno(!!rcu_access_pointer(ctx
->vm
)),
847 intel_context_put(ce
);
848 kernel_context_close(ctx
);
852 err
= throttle(ce
, tq
, ARRAY_SIZE(tq
));
854 intel_context_put(ce
);
855 kernel_context_close(ctx
);
859 if (++dw
== max_dwords(obj
)) {
867 intel_context_put(ce
);
868 kernel_context_close(ctx
);
870 pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
871 ncontexts
, engine
->name
, ndwords
);
874 list_for_each_entry(obj
, &objects
, st_link
) {
876 min_t(unsigned int, ndwords
- dw
, max_dwords(obj
));
878 err
= cpu_check(obj
, ncontexts
++, rem
);
885 i915_gem_drain_freed_objects(i915
);
888 throttle_release(tq
, ARRAY_SIZE(tq
));
889 if (igt_live_test_end(&t
))
896 static int rpcs_query_batch(struct drm_i915_gem_object
*rpcs
, struct i915_vma
*vma
)
900 GEM_BUG_ON(INTEL_GEN(vma
->vm
->i915
) < 8);
902 cmd
= i915_gem_object_pin_map(rpcs
, I915_MAP_WB
);
906 *cmd
++ = MI_STORE_REGISTER_MEM_GEN8
;
907 *cmd
++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE
);
908 *cmd
++ = lower_32_bits(vma
->node
.start
);
909 *cmd
++ = upper_32_bits(vma
->node
.start
);
910 *cmd
= MI_BATCH_BUFFER_END
;
912 __i915_gem_object_flush_map(rpcs
, 0, 64);
913 i915_gem_object_unpin_map(rpcs
);
915 intel_gt_chipset_flush(vma
->vm
->gt
);
921 emit_rpcs_query(struct drm_i915_gem_object
*obj
,
922 struct intel_context
*ce
,
923 struct i915_request
**rq_out
)
925 struct drm_i915_private
*i915
= to_i915(obj
->base
.dev
);
926 struct i915_request
*rq
;
927 struct i915_gem_ww_ctx ww
;
928 struct i915_vma
*batch
;
929 struct i915_vma
*vma
;
930 struct drm_i915_gem_object
*rpcs
;
933 GEM_BUG_ON(!intel_engine_can_store_dword(ce
->engine
));
935 if (INTEL_GEN(i915
) < 8)
938 vma
= i915_vma_instance(obj
, ce
->vm
, NULL
);
942 rpcs
= i915_gem_object_create_internal(i915
, PAGE_SIZE
);
944 return PTR_ERR(rpcs
);
946 batch
= i915_vma_instance(rpcs
, ce
->vm
, NULL
);
948 err
= PTR_ERR(batch
);
952 i915_gem_ww_ctx_init(&ww
, false);
954 err
= i915_gem_object_lock(obj
, &ww
);
956 err
= i915_gem_object_lock(rpcs
, &ww
);
958 err
= i915_gem_object_set_to_gtt_domain(obj
, false);
960 err
= i915_vma_pin_ww(vma
, &ww
, 0, 0, PIN_USER
);
964 err
= i915_vma_pin_ww(batch
, &ww
, 0, 0, PIN_USER
);
968 err
= rpcs_query_batch(rpcs
, vma
);
972 rq
= i915_request_create(ce
);
978 err
= i915_request_await_object(rq
, batch
->obj
, false);
980 err
= i915_vma_move_to_active(batch
, rq
, 0);
984 err
= i915_request_await_object(rq
, vma
->obj
, true);
986 err
= i915_vma_move_to_active(vma
, rq
, EXEC_OBJECT_WRITE
);
990 if (rq
->engine
->emit_init_breadcrumb
) {
991 err
= rq
->engine
->emit_init_breadcrumb(rq
);
996 err
= rq
->engine
->emit_bb_start(rq
,
997 batch
->node
.start
, batch
->node
.size
,
1002 *rq_out
= i915_request_get(rq
);
1006 i915_request_set_error_once(rq
, err
);
1007 i915_request_add(rq
);
1009 i915_vma_unpin(batch
);
1011 i915_vma_unpin(vma
);
1013 if (err
== -EDEADLK
) {
1014 err
= i915_gem_ww_ctx_backoff(&ww
);
1018 i915_gem_ww_ctx_fini(&ww
);
1019 i915_gem_object_put(rpcs
);
1023 #define TEST_IDLE BIT(0)
1024 #define TEST_BUSY BIT(1)
1025 #define TEST_RESET BIT(2)
1028 __sseu_prepare(const char *name
,
1030 struct intel_context
*ce
,
1031 struct igt_spinner
**spin
)
1033 struct i915_request
*rq
;
1037 if (!(flags
& (TEST_BUSY
| TEST_RESET
)))
1040 *spin
= kzalloc(sizeof(**spin
), GFP_KERNEL
);
1044 ret
= igt_spinner_init(*spin
, ce
->engine
->gt
);
1048 rq
= igt_spinner_create_request(*spin
, ce
, MI_NOOP
);
1054 i915_request_add(rq
);
1056 if (!igt_wait_for_spinner(*spin
, rq
)) {
1057 pr_err("%s: Spinner failed to start!\n", name
);
1065 igt_spinner_end(*spin
);
1067 igt_spinner_fini(*spin
);
1069 kfree(fetch_and_zero(spin
));
1074 __read_slice_count(struct intel_context
*ce
,
1075 struct drm_i915_gem_object
*obj
,
1076 struct igt_spinner
*spin
,
1079 struct i915_request
*rq
= NULL
;
1080 u32 s_mask
, s_shift
;
1085 ret
= emit_rpcs_query(obj
, ce
, &rq
);
1090 igt_spinner_end(spin
);
1092 ret
= i915_request_wait(rq
, 0, MAX_SCHEDULE_TIMEOUT
);
1093 i915_request_put(rq
);
1097 buf
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
1103 if (INTEL_GEN(ce
->engine
->i915
) >= 11) {
1104 s_mask
= GEN11_RPCS_S_CNT_MASK
;
1105 s_shift
= GEN11_RPCS_S_CNT_SHIFT
;
1107 s_mask
= GEN8_RPCS_S_CNT_MASK
;
1108 s_shift
= GEN8_RPCS_S_CNT_SHIFT
;
1112 cnt
= (val
& s_mask
) >> s_shift
;
1115 i915_gem_object_unpin_map(obj
);
1121 __check_rpcs(const char *name
, u32 rpcs
, int slices
, unsigned int expected
,
1122 const char *prefix
, const char *suffix
)
1124 if (slices
== expected
)
1128 pr_err("%s: %s read slice count failed with %d%s\n",
1129 name
, prefix
, slices
, suffix
);
1133 pr_err("%s: %s slice count %d is not %u%s\n",
1134 name
, prefix
, slices
, expected
, suffix
);
1136 pr_info("RPCS=0x%x; %u%sx%u%s\n",
1138 (rpcs
& GEN8_RPCS_S_CNT_ENABLE
) ? "*" : "",
1139 (rpcs
& GEN8_RPCS_SS_CNT_MASK
) >> GEN8_RPCS_SS_CNT_SHIFT
,
1140 (rpcs
& GEN8_RPCS_SS_CNT_ENABLE
) ? "*" : "");
1146 __sseu_finish(const char *name
,
1148 struct intel_context
*ce
,
1149 struct drm_i915_gem_object
*obj
,
1150 unsigned int expected
,
1151 struct igt_spinner
*spin
)
1153 unsigned int slices
= hweight32(ce
->engine
->sseu
.slice_mask
);
1157 if (flags
& TEST_RESET
) {
1158 ret
= intel_engine_reset(ce
->engine
, "sseu");
1163 ret
= __read_slice_count(ce
, obj
,
1164 flags
& TEST_RESET
? NULL
: spin
, &rpcs
);
1165 ret
= __check_rpcs(name
, rpcs
, ret
, expected
, "Context", "!");
1169 ret
= __read_slice_count(ce
->engine
->kernel_context
, obj
, NULL
, &rpcs
);
1170 ret
= __check_rpcs(name
, rpcs
, ret
, slices
, "Kernel context", "!");
1174 igt_spinner_end(spin
);
1176 if ((flags
& TEST_IDLE
) && ret
== 0) {
1177 ret
= igt_flush_test(ce
->engine
->i915
);
1181 ret
= __read_slice_count(ce
, obj
, NULL
, &rpcs
);
1182 ret
= __check_rpcs(name
, rpcs
, ret
, expected
,
1183 "Context", " after idle!");
1190 __sseu_test(const char *name
,
1192 struct intel_context
*ce
,
1193 struct drm_i915_gem_object
*obj
,
1194 struct intel_sseu sseu
)
1196 struct igt_spinner
*spin
= NULL
;
1199 intel_engine_pm_get(ce
->engine
);
1201 ret
= __sseu_prepare(name
, flags
, ce
, &spin
);
1205 ret
= intel_context_reconfigure_sseu(ce
, sseu
);
1209 ret
= __sseu_finish(name
, flags
, ce
, obj
,
1210 hweight32(sseu
.slice_mask
), spin
);
1214 igt_spinner_end(spin
);
1215 igt_spinner_fini(spin
);
1219 intel_engine_pm_put(ce
->engine
);
1224 __igt_ctx_sseu(struct drm_i915_private
*i915
,
1228 struct drm_i915_gem_object
*obj
;
1232 if (INTEL_GEN(i915
) < 9)
1235 if (flags
& TEST_RESET
)
1236 igt_global_reset_lock(&i915
->gt
);
1238 obj
= i915_gem_object_create_internal(i915
, PAGE_SIZE
);
1245 struct intel_engine_cs
*engine
;
1246 struct intel_context
*ce
;
1247 struct intel_sseu pg_sseu
;
1249 engine
= intel_engine_lookup_user(i915
,
1250 I915_ENGINE_CLASS_RENDER
,
1255 if (hweight32(engine
->sseu
.slice_mask
) < 2)
1258 if (!engine
->gt
->info
.sseu
.has_slice_pg
)
1262 * Gen11 VME friendly power-gated configuration with
1263 * half enabled sub-slices.
1265 pg_sseu
= engine
->sseu
;
1266 pg_sseu
.slice_mask
= 1;
1267 pg_sseu
.subslice_mask
=
1268 ~(~0 << (hweight32(engine
->sseu
.subslice_mask
) / 2));
1270 pr_info("%s: SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
1271 engine
->name
, name
, flags
,
1272 hweight32(engine
->sseu
.slice_mask
),
1273 hweight32(pg_sseu
.slice_mask
));
1275 ce
= intel_context_create(engine
);
1281 ret
= intel_context_pin(ce
);
1285 /* First set the default mask. */
1286 ret
= __sseu_test(name
, flags
, ce
, obj
, engine
->sseu
);
1290 /* Then set a power-gated configuration. */
1291 ret
= __sseu_test(name
, flags
, ce
, obj
, pg_sseu
);
1295 /* Back to defaults. */
1296 ret
= __sseu_test(name
, flags
, ce
, obj
, engine
->sseu
);
1300 /* One last power-gated configuration for the road. */
1301 ret
= __sseu_test(name
, flags
, ce
, obj
, pg_sseu
);
1306 intel_context_unpin(ce
);
1308 intel_context_put(ce
);
1311 if (igt_flush_test(i915
))
1315 i915_gem_object_put(obj
);
1318 if (flags
& TEST_RESET
)
1319 igt_global_reset_unlock(&i915
->gt
);
1322 pr_err("%s: Failed with %d!\n", name
, ret
);
1327 static int igt_ctx_sseu(void *arg
)
1332 } *phase
, phases
[] = {
1333 { .name
= "basic", .flags
= 0 },
1334 { .name
= "idle", .flags
= TEST_IDLE
},
1335 { .name
= "busy", .flags
= TEST_BUSY
},
1336 { .name
= "busy-reset", .flags
= TEST_BUSY
| TEST_RESET
},
1337 { .name
= "busy-idle", .flags
= TEST_BUSY
| TEST_IDLE
},
1338 { .name
= "reset-idle", .flags
= TEST_RESET
| TEST_IDLE
},
1343 for (i
= 0, phase
= phases
; ret
== 0 && i
< ARRAY_SIZE(phases
);
1345 ret
= __igt_ctx_sseu(arg
, phase
->name
, phase
->flags
);
1350 static int igt_ctx_readonly(void *arg
)
1352 struct drm_i915_private
*i915
= arg
;
1353 unsigned long idx
, ndwords
, dw
, num_engines
;
1354 struct drm_i915_gem_object
*obj
= NULL
;
1355 struct i915_request
*tq
[5] = {};
1356 struct i915_gem_engines_iter it
;
1357 struct i915_address_space
*vm
;
1358 struct i915_gem_context
*ctx
;
1359 struct intel_context
*ce
;
1360 struct igt_live_test t
;
1361 I915_RND_STATE(prng
);
1362 IGT_TIMEOUT(end_time
);
1368 * Create a few read-only objects (with the occasional writable object)
1369 * and try to write into these object checking that the GPU discards
1370 * any write to a read-only object.
1373 file
= mock_file(i915
);
1375 return PTR_ERR(file
);
1377 err
= igt_live_test_begin(&t
, i915
, __func__
, "");
1381 ctx
= live_context(i915
, file
);
1387 vm
= ctx_vm(ctx
) ?: &i915
->ggtt
.alias
->vm
;
1388 if (!vm
|| !vm
->has_read_only
) {
1394 for_each_gem_engine(ce
, i915_gem_context_lock_engines(ctx
), it
)
1395 if (intel_engine_can_store_dword(ce
->engine
))
1397 i915_gem_context_unlock_engines(ctx
);
1401 while (!time_after(jiffies
, end_time
)) {
1402 for_each_gem_engine(ce
,
1403 i915_gem_context_lock_engines(ctx
), it
) {
1404 if (!intel_engine_can_store_dword(ce
->engine
))
1408 obj
= create_test_object(ce
->vm
, file
, &objects
);
1411 i915_gem_context_unlock_engines(ctx
);
1415 if (prandom_u32_state(&prng
) & 1)
1416 i915_gem_object_set_readonly(obj
);
1419 err
= gpu_fill(ce
, obj
, dw
);
1421 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
1422 ndwords
, dw
, max_dwords(obj
),
1424 yesno(!!ctx_vm(ctx
)),
1426 i915_gem_context_unlock_engines(ctx
);
1430 err
= throttle(ce
, tq
, ARRAY_SIZE(tq
));
1432 i915_gem_context_unlock_engines(ctx
);
1436 if (++dw
== max_dwords(obj
)) {
1442 i915_gem_context_unlock_engines(ctx
);
1444 pr_info("Submitted %lu dwords (across %lu engines)\n",
1445 ndwords
, num_engines
);
1449 list_for_each_entry(obj
, &objects
, st_link
) {
1451 min_t(unsigned int, ndwords
- dw
, max_dwords(obj
));
1452 unsigned int num_writes
;
1455 if (i915_gem_object_is_readonly(obj
))
1458 err
= cpu_check(obj
, idx
++, num_writes
);
1466 throttle_release(tq
, ARRAY_SIZE(tq
));
1467 if (igt_live_test_end(&t
))
1474 static int check_scratch(struct i915_address_space
*vm
, u64 offset
)
1476 struct drm_mm_node
*node
;
1478 mutex_lock(&vm
->mutex
);
1479 node
= __drm_mm_interval_first(&vm
->mm
,
1480 offset
, offset
+ sizeof(u32
) - 1);
1481 mutex_unlock(&vm
->mutex
);
1482 if (!node
|| node
->start
> offset
)
1485 GEM_BUG_ON(offset
>= node
->start
+ node
->size
);
1487 pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n",
1488 upper_32_bits(offset
), lower_32_bits(offset
));
1492 static int write_to_scratch(struct i915_gem_context
*ctx
,
1493 struct intel_engine_cs
*engine
,
1494 u64 offset
, u32 value
)
1496 struct drm_i915_private
*i915
= ctx
->i915
;
1497 struct drm_i915_gem_object
*obj
;
1498 struct i915_address_space
*vm
;
1499 struct i915_request
*rq
;
1500 struct i915_vma
*vma
;
1504 GEM_BUG_ON(offset
< I915_GTT_PAGE_SIZE
);
1506 err
= check_scratch(ctx_vm(ctx
), offset
);
1510 obj
= i915_gem_object_create_internal(i915
, PAGE_SIZE
);
1512 return PTR_ERR(obj
);
1514 cmd
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
1520 *cmd
++ = MI_STORE_DWORD_IMM_GEN4
;
1521 if (INTEL_GEN(i915
) >= 8) {
1522 *cmd
++ = lower_32_bits(offset
);
1523 *cmd
++ = upper_32_bits(offset
);
1529 *cmd
= MI_BATCH_BUFFER_END
;
1530 __i915_gem_object_flush_map(obj
, 0, 64);
1531 i915_gem_object_unpin_map(obj
);
1533 intel_gt_chipset_flush(engine
->gt
);
1535 vm
= i915_gem_context_get_vm_rcu(ctx
);
1536 vma
= i915_vma_instance(obj
, vm
, NULL
);
1542 err
= i915_vma_pin(vma
, 0, 0, PIN_USER
| PIN_OFFSET_FIXED
);
1546 rq
= igt_request_alloc(ctx
, engine
);
1553 err
= i915_request_await_object(rq
, vma
->obj
, false);
1555 err
= i915_vma_move_to_active(vma
, rq
, 0);
1556 i915_vma_unlock(vma
);
1560 if (rq
->engine
->emit_init_breadcrumb
) {
1561 err
= rq
->engine
->emit_init_breadcrumb(rq
);
1566 err
= engine
->emit_bb_start(rq
, vma
->node
.start
, vma
->node
.size
, 0);
1570 i915_vma_unpin(vma
);
1572 i915_request_add(rq
);
1576 i915_request_set_error_once(rq
, err
);
1577 i915_request_add(rq
);
1579 i915_vma_unpin(vma
);
1583 i915_gem_object_put(obj
);
1587 static int read_from_scratch(struct i915_gem_context
*ctx
,
1588 struct intel_engine_cs
*engine
,
1589 u64 offset
, u32
*value
)
1591 struct drm_i915_private
*i915
= ctx
->i915
;
1592 struct drm_i915_gem_object
*obj
;
1593 struct i915_address_space
*vm
;
1594 const u32 result
= 0x100;
1595 struct i915_request
*rq
;
1596 struct i915_vma
*vma
;
1601 GEM_BUG_ON(offset
< I915_GTT_PAGE_SIZE
);
1603 err
= check_scratch(ctx_vm(ctx
), offset
);
1607 obj
= i915_gem_object_create_internal(i915
, PAGE_SIZE
);
1609 return PTR_ERR(obj
);
1611 if (INTEL_GEN(i915
) >= 8) {
1612 const u32 GPR0
= engine
->mmio_base
+ 0x600;
1614 vm
= i915_gem_context_get_vm_rcu(ctx
);
1615 vma
= i915_vma_instance(obj
, vm
, NULL
);
1621 err
= i915_vma_pin(vma
, 0, 0, PIN_USER
| PIN_OFFSET_FIXED
);
1625 cmd
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
1631 memset(cmd
, POISON_INUSE
, PAGE_SIZE
);
1632 *cmd
++ = MI_LOAD_REGISTER_MEM_GEN8
;
1634 *cmd
++ = lower_32_bits(offset
);
1635 *cmd
++ = upper_32_bits(offset
);
1636 *cmd
++ = MI_STORE_REGISTER_MEM_GEN8
;
1640 *cmd
= MI_BATCH_BUFFER_END
;
1642 i915_gem_object_flush_map(obj
);
1643 i915_gem_object_unpin_map(obj
);
1647 const u32 reg
= engine
->mmio_base
+ 0x420;
1649 /* hsw: register access even to 3DPRIM! is protected */
1650 vm
= i915_vm_get(&engine
->gt
->ggtt
->vm
);
1651 vma
= i915_vma_instance(obj
, vm
, NULL
);
1657 err
= i915_vma_pin(vma
, 0, 0, PIN_GLOBAL
);
1661 cmd
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
1667 memset(cmd
, POISON_INUSE
, PAGE_SIZE
);
1668 *cmd
++ = MI_LOAD_REGISTER_MEM
;
1671 *cmd
++ = MI_STORE_REGISTER_MEM
| MI_USE_GGTT
;
1673 *cmd
++ = vma
->node
.start
+ result
;
1674 *cmd
= MI_BATCH_BUFFER_END
;
1676 i915_gem_object_flush_map(obj
);
1677 i915_gem_object_unpin_map(obj
);
1679 flags
= I915_DISPATCH_SECURE
;
1682 intel_gt_chipset_flush(engine
->gt
);
1684 rq
= igt_request_alloc(ctx
, engine
);
1691 err
= i915_request_await_object(rq
, vma
->obj
, true);
1693 err
= i915_vma_move_to_active(vma
, rq
, EXEC_OBJECT_WRITE
);
1694 i915_vma_unlock(vma
);
1698 if (rq
->engine
->emit_init_breadcrumb
) {
1699 err
= rq
->engine
->emit_init_breadcrumb(rq
);
1704 err
= engine
->emit_bb_start(rq
, vma
->node
.start
, vma
->node
.size
, flags
);
1708 i915_vma_unpin(vma
);
1710 i915_request_add(rq
);
1712 i915_gem_object_lock(obj
, NULL
);
1713 err
= i915_gem_object_set_to_cpu_domain(obj
, false);
1714 i915_gem_object_unlock(obj
);
1718 cmd
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
1724 *value
= cmd
[result
/ sizeof(*cmd
)];
1725 i915_gem_object_unpin_map(obj
);
1729 i915_request_set_error_once(rq
, err
);
1730 i915_request_add(rq
);
1732 i915_vma_unpin(vma
);
1736 i915_gem_object_put(obj
);
1740 static int check_scratch_page(struct i915_gem_context
*ctx
, u32
*out
)
1742 struct i915_address_space
*vm
;
1751 page
= __px_page(vm
->scratch
[0]);
1753 pr_err("No scratch page!\n");
1759 pr_err("No (mappable) scratch page!\n");
1763 memcpy(out
, vaddr
, sizeof(*out
));
1764 if (memchr_inv(vaddr
, *out
, PAGE_SIZE
)) {
1765 pr_err("Inconsistent initial state of scratch page!\n");
1773 static int igt_vm_isolation(void *arg
)
1775 struct drm_i915_private
*i915
= arg
;
1776 struct i915_gem_context
*ctx_a
, *ctx_b
;
1777 unsigned long num_engines
, count
;
1778 struct intel_engine_cs
*engine
;
1779 struct igt_live_test t
;
1780 I915_RND_STATE(prng
);
1786 if (INTEL_GEN(i915
) < 7)
1790 * The simple goal here is that a write into one context is not
1791 * observed in a second (separate page tables and scratch).
1794 file
= mock_file(i915
);
1796 return PTR_ERR(file
);
1798 err
= igt_live_test_begin(&t
, i915
, __func__
, "");
1802 ctx_a
= live_context(i915
, file
);
1803 if (IS_ERR(ctx_a
)) {
1804 err
= PTR_ERR(ctx_a
);
1808 ctx_b
= live_context(i915
, file
);
1809 if (IS_ERR(ctx_b
)) {
1810 err
= PTR_ERR(ctx_b
);
1814 /* We can only test vm isolation, if the vm are distinct */
1815 if (ctx_vm(ctx_a
) == ctx_vm(ctx_b
))
1818 /* Read the initial state of the scratch page */
1819 err
= check_scratch_page(ctx_a
, &expected
);
1823 err
= check_scratch_page(ctx_b
, &expected
);
1827 vm_total
= ctx_vm(ctx_a
)->total
;
1828 GEM_BUG_ON(ctx_vm(ctx_b
)->total
!= vm_total
);
1832 for_each_uabi_engine(engine
, i915
) {
1833 IGT_TIMEOUT(end_time
);
1834 unsigned long this = 0;
1836 if (!intel_engine_can_store_dword(engine
))
1839 /* Not all engines have their own GPR! */
1840 if (INTEL_GEN(i915
) < 8 && engine
->class != RENDER_CLASS
)
1843 while (!__igt_timeout(end_time
, NULL
)) {
1844 u32 value
= 0xc5c5c5c5;
1847 /* Leave enough space at offset 0 for the batch */
1848 offset
= igt_random_offset(&prng
,
1849 I915_GTT_PAGE_SIZE
, vm_total
,
1850 sizeof(u32
), alignof_dword
);
1852 err
= write_to_scratch(ctx_a
, engine
,
1853 offset
, 0xdeadbeef);
1855 err
= read_from_scratch(ctx_b
, engine
,
1860 if (value
!= expected
) {
1861 pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
1862 engine
->name
, value
,
1863 upper_32_bits(offset
),
1864 lower_32_bits(offset
),
1875 pr_info("Checked %lu scratch offsets across %lu engines\n",
1876 count
, num_engines
);
1879 if (igt_live_test_end(&t
))
1885 static bool skip_unused_engines(struct intel_context
*ce
, void *data
)
1890 static void mock_barrier_task(void *data
)
1892 unsigned int *counter
= data
;
1897 static int mock_context_barrier(void *arg
)
1900 #define pr_fmt(x) "context_barrier_task():" # x
1901 struct drm_i915_private
*i915
= arg
;
1902 struct i915_gem_context
*ctx
;
1903 struct i915_request
*rq
;
1904 unsigned int counter
;
1908 * The context barrier provides us with a callback after it emits
1909 * a request; useful for retiring old state after loading new.
1912 ctx
= mock_context(i915
, "mock");
1917 err
= context_barrier_task(ctx
, 0, NULL
, NULL
, NULL
,
1918 mock_barrier_task
, &counter
);
1920 pr_err("Failed at line %d, err=%d\n", __LINE__
, err
);
1924 pr_err("Did not retire immediately with 0 engines\n");
1930 err
= context_barrier_task(ctx
, ALL_ENGINES
, skip_unused_engines
,
1931 NULL
, NULL
, mock_barrier_task
, &counter
);
1933 pr_err("Failed at line %d, err=%d\n", __LINE__
, err
);
1937 pr_err("Did not retire immediately for all unused engines\n");
1942 rq
= igt_request_alloc(ctx
, i915
->gt
.engine
[RCS0
]);
1944 pr_err("Request allocation failed!\n");
1947 i915_request_add(rq
);
1950 context_barrier_inject_fault
= BIT(RCS0
);
1951 err
= context_barrier_task(ctx
, ALL_ENGINES
, NULL
, NULL
, NULL
,
1952 mock_barrier_task
, &counter
);
1953 context_barrier_inject_fault
= 0;
1957 pr_err("Did not hit fault injection!\n");
1959 pr_err("Invoked callback on error!\n");
1966 err
= context_barrier_task(ctx
, ALL_ENGINES
, skip_unused_engines
,
1967 NULL
, NULL
, mock_barrier_task
, &counter
);
1969 pr_err("Failed at line %d, err=%d\n", __LINE__
, err
);
1972 mock_device_flush(i915
);
1974 pr_err("Did not retire on each active engines\n");
1980 mock_context_close(ctx
);
1986 int i915_gem_context_mock_selftests(void)
1988 static const struct i915_subtest tests
[] = {
1989 SUBTEST(mock_context_barrier
),
1991 struct drm_i915_private
*i915
;
1994 i915
= mock_gem_device();
1998 err
= i915_subtests(tests
, i915
);
2000 mock_destroy_device(i915
);
2004 int i915_gem_context_live_selftests(struct drm_i915_private
*i915
)
2006 static const struct i915_subtest tests
[] = {
2007 SUBTEST(live_nop_switch
),
2008 SUBTEST(live_parallel_switch
),
2009 SUBTEST(igt_ctx_exec
),
2010 SUBTEST(igt_ctx_readonly
),
2011 SUBTEST(igt_ctx_sseu
),
2012 SUBTEST(igt_shared_ctx_exec
),
2013 SUBTEST(igt_vm_isolation
),
2016 if (intel_gt_is_wedged(&i915
->gt
))
2019 return i915_live_subtests(tests
, i915
);