2 * SPDX-License-Identifier: MIT
4 * Copyright © 2017 Intel Corporation
7 #include <linux/prime_numbers.h>
9 #include "gem/i915_gem_pm.h"
10 #include "gt/intel_engine_pm.h"
11 #include "gt/intel_gt.h"
12 #include "gt/intel_gt_requests.h"
13 #include "gt/intel_reset.h"
14 #include "i915_selftest.h"
16 #include "gem/selftests/igt_gem_utils.h"
17 #include "selftests/i915_random.h"
18 #include "selftests/igt_flush_test.h"
19 #include "selftests/igt_live_test.h"
20 #include "selftests/igt_reset.h"
21 #include "selftests/igt_spinner.h"
22 #include "selftests/mock_drm.h"
23 #include "selftests/mock_gem_device.h"
25 #include "huge_gem_object.h"
26 #include "igt_gem_utils.h"
28 #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
30 static inline struct i915_address_space
*ctx_vm(struct i915_gem_context
*ctx
)
32 /* single threaded, private ctx */
33 return rcu_dereference_protected(ctx
->vm
, true);
36 static int live_nop_switch(void *arg
)
38 const unsigned int nctx
= 1024;
39 struct drm_i915_private
*i915
= arg
;
40 struct intel_engine_cs
*engine
;
41 struct i915_gem_context
**ctx
;
42 struct igt_live_test t
;
48 * Create as many contexts as we can feasibly get away with
49 * and check we can switch between them rapidly.
51 * Serves as very simple stress test for submission and HW switching
55 if (!DRIVER_CAPS(i915
)->has_logical_contexts
)
58 file
= mock_file(i915
);
62 ctx
= kcalloc(nctx
, sizeof(*ctx
), GFP_KERNEL
);
68 for (n
= 0; n
< nctx
; n
++) {
69 ctx
[n
] = live_context(i915
, file
);
71 err
= PTR_ERR(ctx
[n
]);
76 for_each_uabi_engine(engine
, i915
) {
77 struct i915_request
*rq
= NULL
;
78 unsigned long end_time
, prime
;
79 ktime_t times
[2] = {};
81 times
[0] = ktime_get_raw();
82 for (n
= 0; n
< nctx
; n
++) {
83 struct i915_request
*this;
85 this = igt_request_alloc(ctx
[n
], engine
);
91 i915_request_await_dma_fence(this, &rq
->fence
);
94 rq
= i915_request_get(this);
95 i915_request_add(this);
97 if (i915_request_wait(rq
, 0, HZ
/ 5) < 0) {
98 pr_err("Failed to populated %d contexts\n", nctx
);
99 intel_gt_set_wedged(&i915
->gt
);
100 i915_request_put(rq
);
104 i915_request_put(rq
);
106 times
[1] = ktime_get_raw();
108 pr_info("Populated %d contexts on %s in %lluns\n",
109 nctx
, engine
->name
, ktime_to_ns(times
[1] - times
[0]));
111 err
= igt_live_test_begin(&t
, i915
, __func__
, engine
->name
);
115 end_time
= jiffies
+ i915_selftest
.timeout_jiffies
;
116 for_each_prime_number_from(prime
, 2, 8192) {
117 times
[1] = ktime_get_raw();
120 for (n
= 0; n
< prime
; n
++) {
121 struct i915_request
*this;
123 this = igt_request_alloc(ctx
[n
% nctx
], engine
);
129 if (rq
) { /* Force submission order */
130 i915_request_await_dma_fence(this, &rq
->fence
);
131 i915_request_put(rq
);
135 * This space is left intentionally blank.
137 * We do not actually want to perform any
138 * action with this request, we just want
139 * to measure the latency in allocation
140 * and submission of our breadcrumbs -
141 * ensuring that the bare request is sufficient
142 * for the system to work (i.e. proper HEAD
143 * tracking of the rings, interrupt handling,
144 * etc). It also gives us the lowest bounds
148 rq
= i915_request_get(this);
149 i915_request_add(this);
152 if (i915_request_wait(rq
, 0, HZ
/ 5) < 0) {
153 pr_err("Switching between %ld contexts timed out\n",
155 intel_gt_set_wedged(&i915
->gt
);
156 i915_request_put(rq
);
159 i915_request_put(rq
);
161 times
[1] = ktime_sub(ktime_get_raw(), times
[1]);
165 if (__igt_timeout(end_time
, NULL
))
169 err
= igt_live_test_end(&t
);
173 pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
175 ktime_to_ns(times
[0]),
176 prime
- 1, div64_u64(ktime_to_ns(times
[1]), prime
- 1));
184 struct parallel_switch
{
185 struct task_struct
*tsk
;
186 struct intel_context
*ce
[2];
189 static int __live_parallel_switch1(void *data
)
191 struct parallel_switch
*arg
= data
;
192 IGT_TIMEOUT(end_time
);
197 struct i915_request
*rq
= NULL
;
201 for (n
= 0; !err
&& n
< ARRAY_SIZE(arg
->ce
); n
++) {
202 struct i915_request
*prev
= rq
;
204 rq
= i915_request_create(arg
->ce
[n
]);
206 i915_request_put(prev
);
210 i915_request_get(rq
);
212 err
= i915_request_await_dma_fence(rq
, &prev
->fence
);
213 i915_request_put(prev
);
216 i915_request_add(rq
);
218 if (i915_request_wait(rq
, 0, HZ
/ 5) < 0)
220 i915_request_put(rq
);
225 } while (!__igt_timeout(end_time
, NULL
));
227 pr_info("%s: %lu switches (sync)\n", arg
->ce
[0]->engine
->name
, count
);
231 static int __live_parallel_switchN(void *data
)
233 struct parallel_switch
*arg
= data
;
234 struct i915_request
*rq
= NULL
;
235 IGT_TIMEOUT(end_time
);
241 for (n
= 0; n
< ARRAY_SIZE(arg
->ce
); n
++) {
242 struct i915_request
*prev
= rq
;
245 rq
= i915_request_create(arg
->ce
[n
]);
247 i915_request_put(prev
);
251 i915_request_get(rq
);
253 err
= i915_request_await_dma_fence(rq
, &prev
->fence
);
254 i915_request_put(prev
);
257 i915_request_add(rq
);
259 i915_request_put(rq
);
265 } while (!__igt_timeout(end_time
, NULL
));
266 i915_request_put(rq
);
268 pr_info("%s: %lu switches (many)\n", arg
->ce
[0]->engine
->name
, count
);
272 static int live_parallel_switch(void *arg
)
274 struct drm_i915_private
*i915
= arg
;
275 static int (* const func
[])(void *arg
) = {
276 __live_parallel_switch1
,
277 __live_parallel_switchN
,
280 struct parallel_switch
*data
= NULL
;
281 struct i915_gem_engines
*engines
;
282 struct i915_gem_engines_iter it
;
283 int (* const *fn
)(void *arg
);
284 struct i915_gem_context
*ctx
;
285 struct intel_context
*ce
;
291 * Check we can process switches on all engines simultaneously.
294 if (!DRIVER_CAPS(i915
)->has_logical_contexts
)
297 file
= mock_file(i915
);
299 return PTR_ERR(file
);
301 ctx
= live_context(i915
, file
);
307 engines
= i915_gem_context_lock_engines(ctx
);
308 count
= engines
->num_engines
;
310 data
= kcalloc(count
, sizeof(*data
), GFP_KERNEL
);
312 i915_gem_context_unlock_engines(ctx
);
317 m
= 0; /* Use the first context as our template for the engines */
318 for_each_gem_engine(ce
, engines
, it
) {
319 err
= intel_context_pin(ce
);
321 i915_gem_context_unlock_engines(ctx
);
324 data
[m
++].ce
[0] = intel_context_get(ce
);
326 i915_gem_context_unlock_engines(ctx
);
328 /* Clone the same set of engines into the other contexts */
329 for (n
= 1; n
< ARRAY_SIZE(data
->ce
); n
++) {
330 ctx
= live_context(i915
, file
);
336 for (m
= 0; m
< count
; m
++) {
340 ce
= intel_context_create(data
[m
].ce
[0]->engine
);
344 err
= intel_context_pin(ce
);
346 intel_context_put(ce
);
354 for (fn
= func
; !err
&& *fn
; fn
++) {
355 struct igt_live_test t
;
358 err
= igt_live_test_begin(&t
, i915
, __func__
, "");
362 for (n
= 0; n
< count
; n
++) {
366 data
[n
].tsk
= kthread_run(*fn
, &data
[n
],
368 data
[n
].ce
[0]->engine
->name
);
369 if (IS_ERR(data
[n
].tsk
)) {
370 err
= PTR_ERR(data
[n
].tsk
);
373 get_task_struct(data
[n
].tsk
);
376 yield(); /* start all threads before we kthread_stop() */
378 for (n
= 0; n
< count
; n
++) {
381 if (IS_ERR_OR_NULL(data
[n
].tsk
))
384 status
= kthread_stop(data
[n
].tsk
);
388 put_task_struct(data
[n
].tsk
);
392 if (igt_live_test_end(&t
))
397 for (n
= 0; n
< count
; n
++) {
398 for (m
= 0; m
< ARRAY_SIZE(data
->ce
); m
++) {
402 intel_context_unpin(data
[n
].ce
[m
]);
403 intel_context_put(data
[n
].ce
[m
]);
412 static unsigned long real_page_count(struct drm_i915_gem_object
*obj
)
414 return huge_gem_object_phys_size(obj
) >> PAGE_SHIFT
;
417 static unsigned long fake_page_count(struct drm_i915_gem_object
*obj
)
419 return huge_gem_object_dma_size(obj
) >> PAGE_SHIFT
;
422 static int gpu_fill(struct intel_context
*ce
,
423 struct drm_i915_gem_object
*obj
,
426 struct i915_vma
*vma
;
429 GEM_BUG_ON(obj
->base
.size
> ce
->vm
->total
);
430 GEM_BUG_ON(!intel_engine_can_store_dword(ce
->engine
));
432 vma
= i915_vma_instance(obj
, ce
->vm
, NULL
);
436 err
= i915_vma_pin(vma
, 0, 0, PIN_HIGH
| PIN_USER
);
441 * Within the GTT the huge objects maps every page onto
442 * its 1024 real pages (using phys_pfn = dma_pfn % 1024).
443 * We set the nth dword within the page using the nth
444 * mapping via the GTT - this should exercise the GTT mapping
445 * whilst checking that each context provides a unique view
448 err
= igt_gpu_fill_dw(ce
, vma
,
449 (dw
* real_page_count(obj
)) << PAGE_SHIFT
|
451 real_page_count(obj
),
458 static int cpu_fill(struct drm_i915_gem_object
*obj
, u32 value
)
460 const bool has_llc
= HAS_LLC(to_i915(obj
->base
.dev
));
461 unsigned int n
, m
, need_flush
;
464 err
= i915_gem_object_prepare_write(obj
, &need_flush
);
468 for (n
= 0; n
< real_page_count(obj
); n
++) {
471 map
= kmap_atomic(i915_gem_object_get_page(obj
, n
));
472 for (m
= 0; m
< DW_PER_PAGE
; m
++)
475 drm_clflush_virt_range(map
, PAGE_SIZE
);
479 i915_gem_object_finish_access(obj
);
480 obj
->read_domains
= I915_GEM_DOMAIN_GTT
| I915_GEM_DOMAIN_CPU
;
481 obj
->write_domain
= 0;
485 static noinline
int cpu_check(struct drm_i915_gem_object
*obj
,
486 unsigned int idx
, unsigned int max
)
488 unsigned int n
, m
, needs_flush
;
491 err
= i915_gem_object_prepare_read(obj
, &needs_flush
);
495 for (n
= 0; n
< real_page_count(obj
); n
++) {
498 map
= kmap_atomic(i915_gem_object_get_page(obj
, n
));
499 if (needs_flush
& CLFLUSH_BEFORE
)
500 drm_clflush_virt_range(map
, PAGE_SIZE
);
502 for (m
= 0; m
< max
; m
++) {
504 pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n",
505 __builtin_return_address(0), idx
,
506 n
, real_page_count(obj
), m
, max
,
513 for (; m
< DW_PER_PAGE
; m
++) {
514 if (map
[m
] != STACK_MAGIC
) {
515 pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n",
516 __builtin_return_address(0), idx
, n
, m
,
517 map
[m
], STACK_MAGIC
);
529 i915_gem_object_finish_access(obj
);
533 static int file_add_object(struct file
*file
, struct drm_i915_gem_object
*obj
)
537 GEM_BUG_ON(obj
->base
.handle_count
);
539 /* tie the object to the drm_file for easy reaping */
540 err
= idr_alloc(&to_drm_file(file
)->object_idr
,
541 &obj
->base
, 1, 0, GFP_KERNEL
);
545 i915_gem_object_get(obj
);
546 obj
->base
.handle_count
++;
550 static struct drm_i915_gem_object
*
551 create_test_object(struct i915_address_space
*vm
,
553 struct list_head
*objects
)
555 struct drm_i915_gem_object
*obj
;
559 /* Keep in GEM's good graces */
560 intel_gt_retire_requests(vm
->gt
);
562 size
= min(vm
->total
/ 2, 1024ull * DW_PER_PAGE
* PAGE_SIZE
);
563 size
= round_down(size
, DW_PER_PAGE
* PAGE_SIZE
);
565 obj
= huge_gem_object(vm
->i915
, DW_PER_PAGE
* PAGE_SIZE
, size
);
569 err
= file_add_object(file
, obj
);
570 i915_gem_object_put(obj
);
574 err
= cpu_fill(obj
, STACK_MAGIC
);
576 pr_err("Failed to fill object with cpu, err=%d\n",
581 list_add_tail(&obj
->st_link
, objects
);
585 static unsigned long max_dwords(struct drm_i915_gem_object
*obj
)
587 unsigned long npages
= fake_page_count(obj
);
589 GEM_BUG_ON(!IS_ALIGNED(npages
, DW_PER_PAGE
));
590 return npages
/ DW_PER_PAGE
;
593 static void throttle_release(struct i915_request
**q
, int count
)
597 for (i
= 0; i
< count
; i
++) {
598 if (IS_ERR_OR_NULL(q
[i
]))
601 i915_request_put(fetch_and_zero(&q
[i
]));
605 static int throttle(struct intel_context
*ce
,
606 struct i915_request
**q
, int count
)
610 if (!IS_ERR_OR_NULL(q
[0])) {
611 if (i915_request_wait(q
[0],
612 I915_WAIT_INTERRUPTIBLE
,
613 MAX_SCHEDULE_TIMEOUT
) < 0)
616 i915_request_put(q
[0]);
619 for (i
= 0; i
< count
- 1; i
++)
622 q
[i
] = intel_context_create_request(ce
);
624 return PTR_ERR(q
[i
]);
626 i915_request_get(q
[i
]);
627 i915_request_add(q
[i
]);
632 static int igt_ctx_exec(void *arg
)
634 struct drm_i915_private
*i915
= arg
;
635 struct intel_engine_cs
*engine
;
639 * Create a few different contexts (with different mm) and write
640 * through each ctx/mm using the GPU making sure those writes end
641 * up in the expected pages of our obj.
644 if (!DRIVER_CAPS(i915
)->has_logical_contexts
)
647 for_each_uabi_engine(engine
, i915
) {
648 struct drm_i915_gem_object
*obj
= NULL
;
649 unsigned long ncontexts
, ndwords
, dw
;
650 struct i915_request
*tq
[5] = {};
651 struct igt_live_test t
;
652 IGT_TIMEOUT(end_time
);
656 if (!intel_engine_can_store_dword(engine
))
659 if (!engine
->context_size
)
660 continue; /* No logical context support in HW */
662 file
= mock_file(i915
);
664 return PTR_ERR(file
);
666 err
= igt_live_test_begin(&t
, i915
, __func__
, engine
->name
);
673 while (!time_after(jiffies
, end_time
)) {
674 struct i915_gem_context
*ctx
;
675 struct intel_context
*ce
;
677 ctx
= kernel_context(i915
);
683 ce
= i915_gem_context_get_engine(ctx
, engine
->legacy_idx
);
684 GEM_BUG_ON(IS_ERR(ce
));
687 obj
= create_test_object(ce
->vm
, file
, &objects
);
690 intel_context_put(ce
);
691 kernel_context_close(ctx
);
696 err
= gpu_fill(ce
, obj
, dw
);
698 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
699 ndwords
, dw
, max_dwords(obj
),
701 yesno(!!rcu_access_pointer(ctx
->vm
)),
703 intel_context_put(ce
);
704 kernel_context_close(ctx
);
708 err
= throttle(ce
, tq
, ARRAY_SIZE(tq
));
710 intel_context_put(ce
);
711 kernel_context_close(ctx
);
715 if (++dw
== max_dwords(obj
)) {
723 intel_context_put(ce
);
724 kernel_context_close(ctx
);
727 pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
728 ncontexts
, engine
->name
, ndwords
);
731 list_for_each_entry(obj
, &objects
, st_link
) {
733 min_t(unsigned int, ndwords
- dw
, max_dwords(obj
));
735 err
= cpu_check(obj
, ncontexts
++, rem
);
743 throttle_release(tq
, ARRAY_SIZE(tq
));
744 if (igt_live_test_end(&t
))
751 i915_gem_drain_freed_objects(i915
);
757 static int igt_shared_ctx_exec(void *arg
)
759 struct drm_i915_private
*i915
= arg
;
760 struct i915_request
*tq
[5] = {};
761 struct i915_gem_context
*parent
;
762 struct intel_engine_cs
*engine
;
763 struct igt_live_test t
;
768 * Create a few different contexts with the same mm and write
769 * through each ctx using the GPU making sure those writes end
770 * up in the expected pages of our obj.
772 if (!DRIVER_CAPS(i915
)->has_logical_contexts
)
775 file
= mock_file(i915
);
777 return PTR_ERR(file
);
779 parent
= live_context(i915
, file
);
780 if (IS_ERR(parent
)) {
781 err
= PTR_ERR(parent
);
785 if (!parent
->vm
) { /* not full-ppgtt; nothing to share */
790 err
= igt_live_test_begin(&t
, i915
, __func__
, "");
794 for_each_uabi_engine(engine
, i915
) {
795 unsigned long ncontexts
, ndwords
, dw
;
796 struct drm_i915_gem_object
*obj
= NULL
;
797 IGT_TIMEOUT(end_time
);
800 if (!intel_engine_can_store_dword(engine
))
806 while (!time_after(jiffies
, end_time
)) {
807 struct i915_gem_context
*ctx
;
808 struct intel_context
*ce
;
810 ctx
= kernel_context(i915
);
816 mutex_lock(&ctx
->mutex
);
817 __assign_ppgtt(ctx
, ctx_vm(parent
));
818 mutex_unlock(&ctx
->mutex
);
820 ce
= i915_gem_context_get_engine(ctx
, engine
->legacy_idx
);
821 GEM_BUG_ON(IS_ERR(ce
));
824 obj
= create_test_object(ctx_vm(parent
),
828 intel_context_put(ce
);
829 kernel_context_close(ctx
);
834 err
= gpu_fill(ce
, obj
, dw
);
836 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
837 ndwords
, dw
, max_dwords(obj
),
839 yesno(!!rcu_access_pointer(ctx
->vm
)),
841 intel_context_put(ce
);
842 kernel_context_close(ctx
);
846 err
= throttle(ce
, tq
, ARRAY_SIZE(tq
));
848 intel_context_put(ce
);
849 kernel_context_close(ctx
);
853 if (++dw
== max_dwords(obj
)) {
861 intel_context_put(ce
);
862 kernel_context_close(ctx
);
864 pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
865 ncontexts
, engine
->name
, ndwords
);
868 list_for_each_entry(obj
, &objects
, st_link
) {
870 min_t(unsigned int, ndwords
- dw
, max_dwords(obj
));
872 err
= cpu_check(obj
, ncontexts
++, rem
);
879 i915_gem_drain_freed_objects(i915
);
882 throttle_release(tq
, ARRAY_SIZE(tq
));
883 if (igt_live_test_end(&t
))
890 static struct i915_vma
*rpcs_query_batch(struct i915_vma
*vma
)
892 struct drm_i915_gem_object
*obj
;
896 if (INTEL_GEN(vma
->vm
->i915
) < 8)
897 return ERR_PTR(-EINVAL
);
899 obj
= i915_gem_object_create_internal(vma
->vm
->i915
, PAGE_SIZE
);
901 return ERR_CAST(obj
);
903 cmd
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
909 *cmd
++ = MI_STORE_REGISTER_MEM_GEN8
;
910 *cmd
++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE
);
911 *cmd
++ = lower_32_bits(vma
->node
.start
);
912 *cmd
++ = upper_32_bits(vma
->node
.start
);
913 *cmd
= MI_BATCH_BUFFER_END
;
915 __i915_gem_object_flush_map(obj
, 0, 64);
916 i915_gem_object_unpin_map(obj
);
918 intel_gt_chipset_flush(vma
->vm
->gt
);
920 vma
= i915_vma_instance(obj
, vma
->vm
, NULL
);
926 err
= i915_vma_pin(vma
, 0, 0, PIN_USER
);
933 i915_gem_object_put(obj
);
938 emit_rpcs_query(struct drm_i915_gem_object
*obj
,
939 struct intel_context
*ce
,
940 struct i915_request
**rq_out
)
942 struct i915_request
*rq
;
943 struct i915_vma
*batch
;
944 struct i915_vma
*vma
;
947 GEM_BUG_ON(!intel_engine_can_store_dword(ce
->engine
));
949 vma
= i915_vma_instance(obj
, ce
->vm
, NULL
);
953 i915_gem_object_lock(obj
);
954 err
= i915_gem_object_set_to_gtt_domain(obj
, false);
955 i915_gem_object_unlock(obj
);
959 err
= i915_vma_pin(vma
, 0, 0, PIN_USER
);
963 batch
= rpcs_query_batch(vma
);
965 err
= PTR_ERR(batch
);
969 rq
= i915_request_create(ce
);
975 err
= rq
->engine
->emit_bb_start(rq
,
976 batch
->node
.start
, batch
->node
.size
,
981 i915_vma_lock(batch
);
982 err
= i915_request_await_object(rq
, batch
->obj
, false);
984 err
= i915_vma_move_to_active(batch
, rq
, 0);
985 i915_vma_unlock(batch
);
990 err
= i915_request_await_object(rq
, vma
->obj
, true);
992 err
= i915_vma_move_to_active(vma
, rq
, EXEC_OBJECT_WRITE
);
993 i915_vma_unlock(vma
);
997 i915_vma_unpin_and_release(&batch
, 0);
1000 *rq_out
= i915_request_get(rq
);
1002 i915_request_add(rq
);
1007 i915_request_skip(rq
, err
);
1009 i915_request_add(rq
);
1011 i915_vma_unpin_and_release(&batch
, 0);
1013 i915_vma_unpin(vma
);
1018 #define TEST_IDLE BIT(0)
1019 #define TEST_BUSY BIT(1)
1020 #define TEST_RESET BIT(2)
1023 __sseu_prepare(const char *name
,
1025 struct intel_context
*ce
,
1026 struct igt_spinner
**spin
)
1028 struct i915_request
*rq
;
1032 if (!(flags
& (TEST_BUSY
| TEST_RESET
)))
1035 *spin
= kzalloc(sizeof(**spin
), GFP_KERNEL
);
1039 ret
= igt_spinner_init(*spin
, ce
->engine
->gt
);
1043 rq
= igt_spinner_create_request(*spin
, ce
, MI_NOOP
);
1049 i915_request_add(rq
);
1051 if (!igt_wait_for_spinner(*spin
, rq
)) {
1052 pr_err("%s: Spinner failed to start!\n", name
);
1060 igt_spinner_end(*spin
);
1062 igt_spinner_fini(*spin
);
1064 kfree(fetch_and_zero(spin
));
1069 __read_slice_count(struct intel_context
*ce
,
1070 struct drm_i915_gem_object
*obj
,
1071 struct igt_spinner
*spin
,
1074 struct i915_request
*rq
= NULL
;
1075 u32 s_mask
, s_shift
;
1080 ret
= emit_rpcs_query(obj
, ce
, &rq
);
1085 igt_spinner_end(spin
);
1087 ret
= i915_request_wait(rq
, 0, MAX_SCHEDULE_TIMEOUT
);
1088 i915_request_put(rq
);
1092 buf
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
1098 if (INTEL_GEN(ce
->engine
->i915
) >= 11) {
1099 s_mask
= GEN11_RPCS_S_CNT_MASK
;
1100 s_shift
= GEN11_RPCS_S_CNT_SHIFT
;
1102 s_mask
= GEN8_RPCS_S_CNT_MASK
;
1103 s_shift
= GEN8_RPCS_S_CNT_SHIFT
;
1107 cnt
= (val
& s_mask
) >> s_shift
;
1110 i915_gem_object_unpin_map(obj
);
1116 __check_rpcs(const char *name
, u32 rpcs
, int slices
, unsigned int expected
,
1117 const char *prefix
, const char *suffix
)
1119 if (slices
== expected
)
1123 pr_err("%s: %s read slice count failed with %d%s\n",
1124 name
, prefix
, slices
, suffix
);
1128 pr_err("%s: %s slice count %d is not %u%s\n",
1129 name
, prefix
, slices
, expected
, suffix
);
1131 pr_info("RPCS=0x%x; %u%sx%u%s\n",
1133 (rpcs
& GEN8_RPCS_S_CNT_ENABLE
) ? "*" : "",
1134 (rpcs
& GEN8_RPCS_SS_CNT_MASK
) >> GEN8_RPCS_SS_CNT_SHIFT
,
1135 (rpcs
& GEN8_RPCS_SS_CNT_ENABLE
) ? "*" : "");
1141 __sseu_finish(const char *name
,
1143 struct intel_context
*ce
,
1144 struct drm_i915_gem_object
*obj
,
1145 unsigned int expected
,
1146 struct igt_spinner
*spin
)
1148 unsigned int slices
= hweight32(ce
->engine
->sseu
.slice_mask
);
1152 if (flags
& TEST_RESET
) {
1153 ret
= intel_engine_reset(ce
->engine
, "sseu");
1158 ret
= __read_slice_count(ce
, obj
,
1159 flags
& TEST_RESET
? NULL
: spin
, &rpcs
);
1160 ret
= __check_rpcs(name
, rpcs
, ret
, expected
, "Context", "!");
1164 ret
= __read_slice_count(ce
->engine
->kernel_context
, obj
, NULL
, &rpcs
);
1165 ret
= __check_rpcs(name
, rpcs
, ret
, slices
, "Kernel context", "!");
1169 igt_spinner_end(spin
);
1171 if ((flags
& TEST_IDLE
) && ret
== 0) {
1172 ret
= igt_flush_test(ce
->engine
->i915
);
1176 ret
= __read_slice_count(ce
, obj
, NULL
, &rpcs
);
1177 ret
= __check_rpcs(name
, rpcs
, ret
, expected
,
1178 "Context", " after idle!");
1185 __sseu_test(const char *name
,
1187 struct intel_context
*ce
,
1188 struct drm_i915_gem_object
*obj
,
1189 struct intel_sseu sseu
)
1191 struct igt_spinner
*spin
= NULL
;
1194 intel_engine_pm_get(ce
->engine
);
1196 ret
= __sseu_prepare(name
, flags
, ce
, &spin
);
1200 ret
= intel_context_reconfigure_sseu(ce
, sseu
);
1204 ret
= __sseu_finish(name
, flags
, ce
, obj
,
1205 hweight32(sseu
.slice_mask
), spin
);
1209 igt_spinner_end(spin
);
1210 igt_spinner_fini(spin
);
1214 intel_engine_pm_put(ce
->engine
);
1219 __igt_ctx_sseu(struct drm_i915_private
*i915
,
1223 struct drm_i915_gem_object
*obj
;
1227 if (INTEL_GEN(i915
) < 9 || !RUNTIME_INFO(i915
)->sseu
.has_slice_pg
)
1230 if (flags
& TEST_RESET
)
1231 igt_global_reset_lock(&i915
->gt
);
1233 obj
= i915_gem_object_create_internal(i915
, PAGE_SIZE
);
1240 struct intel_engine_cs
*engine
;
1241 struct intel_context
*ce
;
1242 struct intel_sseu pg_sseu
;
1244 engine
= intel_engine_lookup_user(i915
,
1245 I915_ENGINE_CLASS_RENDER
,
1250 if (hweight32(engine
->sseu
.slice_mask
) < 2)
1254 * Gen11 VME friendly power-gated configuration with
1255 * half enabled sub-slices.
1257 pg_sseu
= engine
->sseu
;
1258 pg_sseu
.slice_mask
= 1;
1259 pg_sseu
.subslice_mask
=
1260 ~(~0 << (hweight32(engine
->sseu
.subslice_mask
) / 2));
1262 pr_info("%s: SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
1263 engine
->name
, name
, flags
,
1264 hweight32(engine
->sseu
.slice_mask
),
1265 hweight32(pg_sseu
.slice_mask
));
1267 ce
= intel_context_create(engine
);
1273 ret
= intel_context_pin(ce
);
1277 /* First set the default mask. */
1278 ret
= __sseu_test(name
, flags
, ce
, obj
, engine
->sseu
);
1282 /* Then set a power-gated configuration. */
1283 ret
= __sseu_test(name
, flags
, ce
, obj
, pg_sseu
);
1287 /* Back to defaults. */
1288 ret
= __sseu_test(name
, flags
, ce
, obj
, engine
->sseu
);
1292 /* One last power-gated configuration for the road. */
1293 ret
= __sseu_test(name
, flags
, ce
, obj
, pg_sseu
);
1298 intel_context_unpin(ce
);
1300 intel_context_put(ce
);
1303 if (igt_flush_test(i915
))
1307 i915_gem_object_put(obj
);
1310 if (flags
& TEST_RESET
)
1311 igt_global_reset_unlock(&i915
->gt
);
1314 pr_err("%s: Failed with %d!\n", name
, ret
);
1319 static int igt_ctx_sseu(void *arg
)
1324 } *phase
, phases
[] = {
1325 { .name
= "basic", .flags
= 0 },
1326 { .name
= "idle", .flags
= TEST_IDLE
},
1327 { .name
= "busy", .flags
= TEST_BUSY
},
1328 { .name
= "busy-reset", .flags
= TEST_BUSY
| TEST_RESET
},
1329 { .name
= "busy-idle", .flags
= TEST_BUSY
| TEST_IDLE
},
1330 { .name
= "reset-idle", .flags
= TEST_RESET
| TEST_IDLE
},
1335 for (i
= 0, phase
= phases
; ret
== 0 && i
< ARRAY_SIZE(phases
);
1337 ret
= __igt_ctx_sseu(arg
, phase
->name
, phase
->flags
);
1342 static int igt_ctx_readonly(void *arg
)
1344 struct drm_i915_private
*i915
= arg
;
1345 unsigned long idx
, ndwords
, dw
, num_engines
;
1346 struct drm_i915_gem_object
*obj
= NULL
;
1347 struct i915_request
*tq
[5] = {};
1348 struct i915_gem_engines_iter it
;
1349 struct i915_address_space
*vm
;
1350 struct i915_gem_context
*ctx
;
1351 struct intel_context
*ce
;
1352 struct igt_live_test t
;
1353 I915_RND_STATE(prng
);
1354 IGT_TIMEOUT(end_time
);
1360 * Create a few read-only objects (with the occasional writable object)
1361 * and try to write into these object checking that the GPU discards
1362 * any write to a read-only object.
1365 file
= mock_file(i915
);
1367 return PTR_ERR(file
);
1369 err
= igt_live_test_begin(&t
, i915
, __func__
, "");
1373 ctx
= live_context(i915
, file
);
1379 vm
= ctx_vm(ctx
) ?: &i915
->ggtt
.alias
->vm
;
1380 if (!vm
|| !vm
->has_read_only
) {
1386 for_each_gem_engine(ce
, i915_gem_context_lock_engines(ctx
), it
)
1387 if (intel_engine_can_store_dword(ce
->engine
))
1389 i915_gem_context_unlock_engines(ctx
);
1393 while (!time_after(jiffies
, end_time
)) {
1394 for_each_gem_engine(ce
,
1395 i915_gem_context_lock_engines(ctx
), it
) {
1396 if (!intel_engine_can_store_dword(ce
->engine
))
1400 obj
= create_test_object(ce
->vm
, file
, &objects
);
1403 i915_gem_context_unlock_engines(ctx
);
1407 if (prandom_u32_state(&prng
) & 1)
1408 i915_gem_object_set_readonly(obj
);
1411 err
= gpu_fill(ce
, obj
, dw
);
1413 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
1414 ndwords
, dw
, max_dwords(obj
),
1416 yesno(!!ctx_vm(ctx
)),
1418 i915_gem_context_unlock_engines(ctx
);
1422 err
= throttle(ce
, tq
, ARRAY_SIZE(tq
));
1424 i915_gem_context_unlock_engines(ctx
);
1428 if (++dw
== max_dwords(obj
)) {
1434 i915_gem_context_unlock_engines(ctx
);
1436 pr_info("Submitted %lu dwords (across %lu engines)\n",
1437 ndwords
, num_engines
);
1441 list_for_each_entry(obj
, &objects
, st_link
) {
1443 min_t(unsigned int, ndwords
- dw
, max_dwords(obj
));
1444 unsigned int num_writes
;
1447 if (i915_gem_object_is_readonly(obj
))
1450 err
= cpu_check(obj
, idx
++, num_writes
);
1458 throttle_release(tq
, ARRAY_SIZE(tq
));
1459 if (igt_live_test_end(&t
))
1466 static int check_scratch(struct i915_address_space
*vm
, u64 offset
)
1468 struct drm_mm_node
*node
=
1469 __drm_mm_interval_first(&vm
->mm
,
1470 offset
, offset
+ sizeof(u32
) - 1);
1471 if (!node
|| node
->start
> offset
)
1474 GEM_BUG_ON(offset
>= node
->start
+ node
->size
);
1476 pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n",
1477 upper_32_bits(offset
), lower_32_bits(offset
));
1481 static int write_to_scratch(struct i915_gem_context
*ctx
,
1482 struct intel_engine_cs
*engine
,
1483 u64 offset
, u32 value
)
1485 struct drm_i915_private
*i915
= ctx
->i915
;
1486 struct drm_i915_gem_object
*obj
;
1487 struct i915_address_space
*vm
;
1488 struct i915_request
*rq
;
1489 struct i915_vma
*vma
;
1493 GEM_BUG_ON(offset
< I915_GTT_PAGE_SIZE
);
1495 obj
= i915_gem_object_create_internal(i915
, PAGE_SIZE
);
1497 return PTR_ERR(obj
);
1499 cmd
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
1505 *cmd
++ = MI_STORE_DWORD_IMM_GEN4
;
1506 if (INTEL_GEN(i915
) >= 8) {
1507 *cmd
++ = lower_32_bits(offset
);
1508 *cmd
++ = upper_32_bits(offset
);
1514 *cmd
= MI_BATCH_BUFFER_END
;
1515 __i915_gem_object_flush_map(obj
, 0, 64);
1516 i915_gem_object_unpin_map(obj
);
1518 intel_gt_chipset_flush(engine
->gt
);
1520 vm
= i915_gem_context_get_vm_rcu(ctx
);
1521 vma
= i915_vma_instance(obj
, vm
, NULL
);
1527 err
= i915_vma_pin(vma
, 0, 0, PIN_USER
| PIN_OFFSET_FIXED
);
1531 err
= check_scratch(vm
, offset
);
1535 rq
= igt_request_alloc(ctx
, engine
);
1541 err
= engine
->emit_bb_start(rq
, vma
->node
.start
, vma
->node
.size
, 0);
1546 err
= i915_request_await_object(rq
, vma
->obj
, false);
1548 err
= i915_vma_move_to_active(vma
, rq
, 0);
1549 i915_vma_unlock(vma
);
1553 i915_vma_unpin(vma
);
1555 i915_request_add(rq
);
1559 i915_request_skip(rq
, err
);
1561 i915_request_add(rq
);
1563 i915_vma_unpin(vma
);
1567 i915_gem_object_put(obj
);
1571 static int read_from_scratch(struct i915_gem_context
*ctx
,
1572 struct intel_engine_cs
*engine
,
1573 u64 offset
, u32
*value
)
1575 struct drm_i915_private
*i915
= ctx
->i915
;
1576 struct drm_i915_gem_object
*obj
;
1577 struct i915_address_space
*vm
;
1578 const u32 RCS_GPR0
= 0x2600; /* not all engines have their own GPR! */
1579 const u32 result
= 0x100;
1580 struct i915_request
*rq
;
1581 struct i915_vma
*vma
;
1585 GEM_BUG_ON(offset
< I915_GTT_PAGE_SIZE
);
1587 obj
= i915_gem_object_create_internal(i915
, PAGE_SIZE
);
1589 return PTR_ERR(obj
);
1591 cmd
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
1597 memset(cmd
, POISON_INUSE
, PAGE_SIZE
);
1598 if (INTEL_GEN(i915
) >= 8) {
1599 *cmd
++ = MI_LOAD_REGISTER_MEM_GEN8
;
1601 *cmd
++ = lower_32_bits(offset
);
1602 *cmd
++ = upper_32_bits(offset
);
1603 *cmd
++ = MI_STORE_REGISTER_MEM_GEN8
;
1608 *cmd
++ = MI_LOAD_REGISTER_MEM
;
1611 *cmd
++ = MI_STORE_REGISTER_MEM
;
1615 *cmd
= MI_BATCH_BUFFER_END
;
1617 i915_gem_object_flush_map(obj
);
1618 i915_gem_object_unpin_map(obj
);
1620 intel_gt_chipset_flush(engine
->gt
);
1622 vm
= i915_gem_context_get_vm_rcu(ctx
);
1623 vma
= i915_vma_instance(obj
, vm
, NULL
);
1629 err
= i915_vma_pin(vma
, 0, 0, PIN_USER
| PIN_OFFSET_FIXED
);
1633 err
= check_scratch(vm
, offset
);
1637 rq
= igt_request_alloc(ctx
, engine
);
1643 err
= engine
->emit_bb_start(rq
, vma
->node
.start
, vma
->node
.size
, 0);
1648 err
= i915_request_await_object(rq
, vma
->obj
, true);
1650 err
= i915_vma_move_to_active(vma
, rq
, EXEC_OBJECT_WRITE
);
1651 i915_vma_unlock(vma
);
1655 i915_vma_unpin(vma
);
1656 i915_vma_close(vma
);
1658 i915_request_add(rq
);
1660 i915_gem_object_lock(obj
);
1661 err
= i915_gem_object_set_to_cpu_domain(obj
, false);
1662 i915_gem_object_unlock(obj
);
1666 cmd
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
1672 *value
= cmd
[result
/ sizeof(*cmd
)];
1673 i915_gem_object_unpin_map(obj
);
1677 i915_request_skip(rq
, err
);
1679 i915_request_add(rq
);
1681 i915_vma_unpin(vma
);
1685 i915_gem_object_put(obj
);
1689 static int igt_vm_isolation(void *arg
)
1691 struct drm_i915_private
*i915
= arg
;
1692 struct i915_gem_context
*ctx_a
, *ctx_b
;
1693 unsigned long num_engines
, count
;
1694 struct intel_engine_cs
*engine
;
1695 struct igt_live_test t
;
1696 I915_RND_STATE(prng
);
1701 if (INTEL_GEN(i915
) < 7)
1705 * The simple goal here is that a write into one context is not
1706 * observed in a second (separate page tables and scratch).
1709 file
= mock_file(i915
);
1711 return PTR_ERR(file
);
1713 err
= igt_live_test_begin(&t
, i915
, __func__
, "");
1717 ctx_a
= live_context(i915
, file
);
1718 if (IS_ERR(ctx_a
)) {
1719 err
= PTR_ERR(ctx_a
);
1723 ctx_b
= live_context(i915
, file
);
1724 if (IS_ERR(ctx_b
)) {
1725 err
= PTR_ERR(ctx_b
);
1729 /* We can only test vm isolation, if the vm are distinct */
1730 if (ctx_vm(ctx_a
) == ctx_vm(ctx_b
))
1733 vm_total
= ctx_vm(ctx_a
)->total
;
1734 GEM_BUG_ON(ctx_vm(ctx_b
)->total
!= vm_total
);
1735 vm_total
-= I915_GTT_PAGE_SIZE
;
1739 for_each_uabi_engine(engine
, i915
) {
1740 IGT_TIMEOUT(end_time
);
1741 unsigned long this = 0;
1743 if (!intel_engine_can_store_dword(engine
))
1746 while (!__igt_timeout(end_time
, NULL
)) {
1747 u32 value
= 0xc5c5c5c5;
1750 div64_u64_rem(i915_prandom_u64_state(&prng
),
1752 offset
= round_down(offset
, alignof_dword
);
1753 offset
+= I915_GTT_PAGE_SIZE
;
1755 err
= write_to_scratch(ctx_a
, engine
,
1756 offset
, 0xdeadbeef);
1758 err
= read_from_scratch(ctx_b
, engine
,
1764 pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
1765 engine
->name
, value
,
1766 upper_32_bits(offset
),
1767 lower_32_bits(offset
),
1778 pr_info("Checked %lu scratch offsets across %lu engines\n",
1779 count
, num_engines
);
1782 if (igt_live_test_end(&t
))
1788 static bool skip_unused_engines(struct intel_context
*ce
, void *data
)
1793 static void mock_barrier_task(void *data
)
1795 unsigned int *counter
= data
;
1800 static int mock_context_barrier(void *arg
)
1803 #define pr_fmt(x) "context_barrier_task():" # x
1804 struct drm_i915_private
*i915
= arg
;
1805 struct i915_gem_context
*ctx
;
1806 struct i915_request
*rq
;
1807 unsigned int counter
;
1811 * The context barrier provides us with a callback after it emits
1812 * a request; useful for retiring old state after loading new.
1815 ctx
= mock_context(i915
, "mock");
1820 err
= context_barrier_task(ctx
, 0,
1821 NULL
, NULL
, mock_barrier_task
, &counter
);
1823 pr_err("Failed at line %d, err=%d\n", __LINE__
, err
);
1827 pr_err("Did not retire immediately with 0 engines\n");
1833 err
= context_barrier_task(ctx
, ALL_ENGINES
,
1834 skip_unused_engines
,
1839 pr_err("Failed at line %d, err=%d\n", __LINE__
, err
);
1843 pr_err("Did not retire immediately for all unused engines\n");
1848 rq
= igt_request_alloc(ctx
, i915
->engine
[RCS0
]);
1850 pr_err("Request allocation failed!\n");
1853 i915_request_add(rq
);
1856 context_barrier_inject_fault
= BIT(RCS0
);
1857 err
= context_barrier_task(ctx
, ALL_ENGINES
,
1858 NULL
, NULL
, mock_barrier_task
, &counter
);
1859 context_barrier_inject_fault
= 0;
1863 pr_err("Did not hit fault injection!\n");
1865 pr_err("Invoked callback on error!\n");
1872 err
= context_barrier_task(ctx
, ALL_ENGINES
,
1873 skip_unused_engines
,
1878 pr_err("Failed at line %d, err=%d\n", __LINE__
, err
);
1881 mock_device_flush(i915
);
1883 pr_err("Did not retire on each active engines\n");
1889 mock_context_close(ctx
);
1895 int i915_gem_context_mock_selftests(void)
1897 static const struct i915_subtest tests
[] = {
1898 SUBTEST(mock_context_barrier
),
1900 struct drm_i915_private
*i915
;
1903 i915
= mock_gem_device();
1907 err
= i915_subtests(tests
, i915
);
1909 drm_dev_put(&i915
->drm
);
1913 int i915_gem_context_live_selftests(struct drm_i915_private
*i915
)
1915 static const struct i915_subtest tests
[] = {
1916 SUBTEST(live_nop_switch
),
1917 SUBTEST(live_parallel_switch
),
1918 SUBTEST(igt_ctx_exec
),
1919 SUBTEST(igt_ctx_readonly
),
1920 SUBTEST(igt_ctx_sseu
),
1921 SUBTEST(igt_shared_ctx_exec
),
1922 SUBTEST(igt_vm_isolation
),
1925 if (intel_gt_is_wedged(&i915
->gt
))
1928 return i915_live_subtests(tests
, i915
);