2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
6 #include "gt/intel_gt.h"
8 #include "gem/selftests/igt_gem_utils.h"
10 #include "igt_spinner.h"
12 int igt_spinner_init(struct igt_spinner
*spin
, struct intel_gt
*gt
)
18 memset(spin
, 0, sizeof(*spin
));
21 spin
->hws
= i915_gem_object_create_internal(gt
->i915
, PAGE_SIZE
);
22 if (IS_ERR(spin
->hws
)) {
23 err
= PTR_ERR(spin
->hws
);
27 spin
->obj
= i915_gem_object_create_internal(gt
->i915
, PAGE_SIZE
);
28 if (IS_ERR(spin
->obj
)) {
29 err
= PTR_ERR(spin
->obj
);
33 i915_gem_object_set_cache_coherency(spin
->hws
, I915_CACHE_LLC
);
34 vaddr
= i915_gem_object_pin_map(spin
->hws
, I915_MAP_WB
);
39 spin
->seqno
= memset(vaddr
, 0xff, PAGE_SIZE
);
41 mode
= i915_coherent_map_type(gt
->i915
);
42 vaddr
= i915_gem_object_pin_map(spin
->obj
, mode
);
52 i915_gem_object_unpin_map(spin
->hws
);
54 i915_gem_object_put(spin
->obj
);
56 i915_gem_object_put(spin
->hws
);
61 static unsigned int seqno_offset(u64 fence
)
63 return offset_in_page(sizeof(u32
) * fence
);
66 static u64
hws_address(const struct i915_vma
*hws
,
67 const struct i915_request
*rq
)
69 return hws
->node
.start
+ seqno_offset(rq
->fence
.context
);
72 static int move_to_active(struct i915_vma
*vma
,
73 struct i915_request
*rq
,
79 err
= i915_request_await_object(rq
, vma
->obj
,
80 flags
& EXEC_OBJECT_WRITE
);
82 err
= i915_vma_move_to_active(vma
, rq
, flags
);
89 igt_spinner_create_request(struct igt_spinner
*spin
,
90 struct intel_context
*ce
,
91 u32 arbitration_command
)
93 struct intel_engine_cs
*engine
= ce
->engine
;
94 struct i915_request
*rq
= NULL
;
95 struct i915_vma
*hws
, *vma
;
100 GEM_BUG_ON(spin
->gt
!= ce
->vm
->gt
);
102 if (!intel_engine_can_store_dword(ce
->engine
))
103 return ERR_PTR(-ENODEV
);
105 vma
= i915_vma_instance(spin
->obj
, ce
->vm
, NULL
);
107 return ERR_CAST(vma
);
109 hws
= i915_vma_instance(spin
->hws
, ce
->vm
, NULL
);
111 return ERR_CAST(hws
);
113 err
= i915_vma_pin(vma
, 0, 0, PIN_USER
);
117 err
= i915_vma_pin(hws
, 0, 0, PIN_USER
);
121 rq
= intel_context_create_request(ce
);
127 err
= move_to_active(vma
, rq
, 0);
131 err
= move_to_active(hws
, rq
, 0);
137 if (INTEL_GEN(rq
->engine
->i915
) >= 8) {
138 *batch
++ = MI_STORE_DWORD_IMM_GEN4
;
139 *batch
++ = lower_32_bits(hws_address(hws
, rq
));
140 *batch
++ = upper_32_bits(hws_address(hws
, rq
));
141 } else if (INTEL_GEN(rq
->engine
->i915
) >= 6) {
142 *batch
++ = MI_STORE_DWORD_IMM_GEN4
;
144 *batch
++ = hws_address(hws
, rq
);
145 } else if (INTEL_GEN(rq
->engine
->i915
) >= 4) {
146 *batch
++ = MI_STORE_DWORD_IMM_GEN4
| MI_USE_GGTT
;
148 *batch
++ = hws_address(hws
, rq
);
150 *batch
++ = MI_STORE_DWORD_IMM
| MI_MEM_VIRTUAL
;
151 *batch
++ = hws_address(hws
, rq
);
153 *batch
++ = rq
->fence
.seqno
;
155 *batch
++ = arbitration_command
;
157 if (INTEL_GEN(rq
->engine
->i915
) >= 8)
158 *batch
++ = MI_BATCH_BUFFER_START
| BIT(8) | 1;
159 else if (IS_HASWELL(rq
->engine
->i915
))
160 *batch
++ = MI_BATCH_BUFFER_START
| MI_BATCH_PPGTT_HSW
;
161 else if (INTEL_GEN(rq
->engine
->i915
) >= 6)
162 *batch
++ = MI_BATCH_BUFFER_START
;
164 *batch
++ = MI_BATCH_BUFFER_START
| MI_BATCH_GTT
;
165 *batch
++ = lower_32_bits(vma
->node
.start
);
166 *batch
++ = upper_32_bits(vma
->node
.start
);
168 *batch
++ = MI_BATCH_BUFFER_END
; /* not reached */
170 intel_gt_chipset_flush(engine
->gt
);
172 if (engine
->emit_init_breadcrumb
) {
173 err
= engine
->emit_init_breadcrumb(rq
);
179 if (INTEL_GEN(rq
->engine
->i915
) <= 5)
180 flags
|= I915_DISPATCH_SECURE
;
181 err
= engine
->emit_bb_start(rq
, vma
->node
.start
, PAGE_SIZE
, flags
);
185 i915_request_set_error_once(rq
, err
);
186 i915_request_add(rq
);
192 return err
? ERR_PTR(err
) : rq
;
196 hws_seqno(const struct igt_spinner
*spin
, const struct i915_request
*rq
)
198 u32
*seqno
= spin
->seqno
+ seqno_offset(rq
->fence
.context
);
200 return READ_ONCE(*seqno
);
203 void igt_spinner_end(struct igt_spinner
*spin
)
205 *spin
->batch
= MI_BATCH_BUFFER_END
;
206 intel_gt_chipset_flush(spin
->gt
);
209 void igt_spinner_fini(struct igt_spinner
*spin
)
211 igt_spinner_end(spin
);
213 i915_gem_object_unpin_map(spin
->obj
);
214 i915_gem_object_put(spin
->obj
);
216 i915_gem_object_unpin_map(spin
->hws
);
217 i915_gem_object_put(spin
->hws
);
220 bool igt_wait_for_spinner(struct igt_spinner
*spin
, struct i915_request
*rq
)
222 return !(wait_for_us(i915_seqno_passed(hws_seqno(spin
, rq
),
225 wait_for(i915_seqno_passed(hws_seqno(spin
, rq
),