Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / selftests / igt_spinner.c
blobec0ecb4e4ca6a4853c1f829034876ad9a5b9b7cf
1 /*
2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
5 */
6 #include "gt/intel_gt.h"
8 #include "gem/selftests/igt_gem_utils.h"
10 #include "igt_spinner.h"
12 int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
14 unsigned int mode;
15 void *vaddr;
16 int err;
18 memset(spin, 0, sizeof(*spin));
19 spin->gt = gt;
21 spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
22 if (IS_ERR(spin->hws)) {
23 err = PTR_ERR(spin->hws);
24 goto err;
27 spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
28 if (IS_ERR(spin->obj)) {
29 err = PTR_ERR(spin->obj);
30 goto err_hws;
33 i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
34 vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
35 if (IS_ERR(vaddr)) {
36 err = PTR_ERR(vaddr);
37 goto err_obj;
39 spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
41 mode = i915_coherent_map_type(gt->i915);
42 vaddr = i915_gem_object_pin_map(spin->obj, mode);
43 if (IS_ERR(vaddr)) {
44 err = PTR_ERR(vaddr);
45 goto err_unpin_hws;
47 spin->batch = vaddr;
49 return 0;
51 err_unpin_hws:
52 i915_gem_object_unpin_map(spin->hws);
53 err_obj:
54 i915_gem_object_put(spin->obj);
55 err_hws:
56 i915_gem_object_put(spin->hws);
57 err:
58 return err;
61 static unsigned int seqno_offset(u64 fence)
63 return offset_in_page(sizeof(u32) * fence);
66 static u64 hws_address(const struct i915_vma *hws,
67 const struct i915_request *rq)
69 return hws->node.start + seqno_offset(rq->fence.context);
72 static int move_to_active(struct i915_vma *vma,
73 struct i915_request *rq,
74 unsigned int flags)
76 int err;
78 i915_vma_lock(vma);
79 err = i915_request_await_object(rq, vma->obj,
80 flags & EXEC_OBJECT_WRITE);
81 if (err == 0)
82 err = i915_vma_move_to_active(vma, rq, flags);
83 i915_vma_unlock(vma);
85 return err;
88 struct i915_request *
89 igt_spinner_create_request(struct igt_spinner *spin,
90 struct intel_context *ce,
91 u32 arbitration_command)
93 struct intel_engine_cs *engine = ce->engine;
94 struct i915_request *rq = NULL;
95 struct i915_vma *hws, *vma;
96 unsigned int flags;
97 u32 *batch;
98 int err;
100 GEM_BUG_ON(spin->gt != ce->vm->gt);
102 if (!intel_engine_can_store_dword(ce->engine))
103 return ERR_PTR(-ENODEV);
105 vma = i915_vma_instance(spin->obj, ce->vm, NULL);
106 if (IS_ERR(vma))
107 return ERR_CAST(vma);
109 hws = i915_vma_instance(spin->hws, ce->vm, NULL);
110 if (IS_ERR(hws))
111 return ERR_CAST(hws);
113 err = i915_vma_pin(vma, 0, 0, PIN_USER);
114 if (err)
115 return ERR_PTR(err);
117 err = i915_vma_pin(hws, 0, 0, PIN_USER);
118 if (err)
119 goto unpin_vma;
121 rq = intel_context_create_request(ce);
122 if (IS_ERR(rq)) {
123 err = PTR_ERR(rq);
124 goto unpin_hws;
127 err = move_to_active(vma, rq, 0);
128 if (err)
129 goto cancel_rq;
131 err = move_to_active(hws, rq, 0);
132 if (err)
133 goto cancel_rq;
135 batch = spin->batch;
137 if (INTEL_GEN(rq->engine->i915) >= 8) {
138 *batch++ = MI_STORE_DWORD_IMM_GEN4;
139 *batch++ = lower_32_bits(hws_address(hws, rq));
140 *batch++ = upper_32_bits(hws_address(hws, rq));
141 } else if (INTEL_GEN(rq->engine->i915) >= 6) {
142 *batch++ = MI_STORE_DWORD_IMM_GEN4;
143 *batch++ = 0;
144 *batch++ = hws_address(hws, rq);
145 } else if (INTEL_GEN(rq->engine->i915) >= 4) {
146 *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
147 *batch++ = 0;
148 *batch++ = hws_address(hws, rq);
149 } else {
150 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
151 *batch++ = hws_address(hws, rq);
153 *batch++ = rq->fence.seqno;
155 *batch++ = arbitration_command;
157 if (INTEL_GEN(rq->engine->i915) >= 8)
158 *batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
159 else if (IS_HASWELL(rq->engine->i915))
160 *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
161 else if (INTEL_GEN(rq->engine->i915) >= 6)
162 *batch++ = MI_BATCH_BUFFER_START;
163 else
164 *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
165 *batch++ = lower_32_bits(vma->node.start);
166 *batch++ = upper_32_bits(vma->node.start);
168 *batch++ = MI_BATCH_BUFFER_END; /* not reached */
170 intel_gt_chipset_flush(engine->gt);
172 if (engine->emit_init_breadcrumb) {
173 err = engine->emit_init_breadcrumb(rq);
174 if (err)
175 goto cancel_rq;
178 flags = 0;
179 if (INTEL_GEN(rq->engine->i915) <= 5)
180 flags |= I915_DISPATCH_SECURE;
181 err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
183 cancel_rq:
184 if (err) {
185 i915_request_set_error_once(rq, err);
186 i915_request_add(rq);
188 unpin_hws:
189 i915_vma_unpin(hws);
190 unpin_vma:
191 i915_vma_unpin(vma);
192 return err ? ERR_PTR(err) : rq;
195 static u32
196 hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
198 u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
200 return READ_ONCE(*seqno);
203 void igt_spinner_end(struct igt_spinner *spin)
205 *spin->batch = MI_BATCH_BUFFER_END;
206 intel_gt_chipset_flush(spin->gt);
209 void igt_spinner_fini(struct igt_spinner *spin)
211 igt_spinner_end(spin);
213 i915_gem_object_unpin_map(spin->obj);
214 i915_gem_object_put(spin->obj);
216 i915_gem_object_unpin_map(spin->hws);
217 i915_gem_object_put(spin->hws);
220 bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
222 return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
223 rq->fence.seqno),
224 100) &&
225 wait_for(i915_seqno_passed(hws_seqno(spin, rq),
226 rq->fence.seqno),
227 50));