2 * SPDX-License-Identifier: GPL-2.0
4 * Copyright © 2018 Intel Corporation
7 #include <linux/sort.h>
9 #include "intel_gt_pm.h"
10 #include "intel_rps.h"
12 #include "i915_selftest.h"
13 #include "selftests/igt_flush_test.h"
17 static int cmp_u32(const void *A
, const void *B
)
19 const u32
*a
= A
, *b
= B
;
24 static void perf_begin(struct intel_gt
*gt
)
28 /* Boost gpufreq to max [waitboost] and keep it fixed */
29 atomic_inc(>
->rps
.num_waiters
);
30 schedule_work(>
->rps
.work
);
31 flush_work(>
->rps
.work
);
34 static int perf_end(struct intel_gt
*gt
)
36 atomic_dec(>
->rps
.num_waiters
);
39 return igt_flush_test(gt
->i915
);
42 static int write_timestamp(struct i915_request
*rq
, int slot
)
47 cs
= intel_ring_begin(rq
, 4);
51 cmd
= MI_STORE_REGISTER_MEM
| MI_USE_GGTT
;
52 if (INTEL_GEN(rq
->engine
->i915
) >= 8)
55 *cs
++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq
->engine
->mmio_base
));
56 *cs
++ = i915_request_timeline(rq
)->hwsp_offset
+ slot
* sizeof(u32
);
59 intel_ring_advance(rq
, cs
);
64 static struct i915_vma
*create_empty_batch(struct intel_context
*ce
)
66 struct drm_i915_gem_object
*obj
;
71 obj
= i915_gem_object_create_internal(ce
->engine
->i915
, PAGE_SIZE
);
75 cs
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
81 cs
[0] = MI_BATCH_BUFFER_END
;
83 i915_gem_object_flush_map(obj
);
85 vma
= i915_vma_instance(obj
, ce
->vm
, NULL
);
91 err
= i915_vma_pin(vma
, 0, 0, PIN_USER
);
95 i915_gem_object_unpin_map(obj
);
99 i915_gem_object_unpin_map(obj
);
101 i915_gem_object_put(obj
);
105 static u32
trifilter(u32
*a
)
109 sort(a
, COUNT
, sizeof(*a
), cmp_u32
, NULL
);
111 sum
= mul_u32_u32(a
[2], 2);
118 static int perf_mi_bb_start(void *arg
)
120 struct intel_gt
*gt
= arg
;
121 struct intel_engine_cs
*engine
;
122 enum intel_engine_id id
;
125 if (INTEL_GEN(gt
->i915
) < 7) /* for per-engine CS_TIMESTAMP */
129 for_each_engine(engine
, gt
, id
) {
130 struct intel_context
*ce
= engine
->kernel_context
;
131 struct i915_vma
*batch
;
135 intel_engine_pm_get(engine
);
137 batch
= create_empty_batch(ce
);
139 err
= PTR_ERR(batch
);
140 intel_engine_pm_put(engine
);
144 err
= i915_vma_sync(batch
);
146 intel_engine_pm_put(engine
);
151 for (i
= 0; i
< ARRAY_SIZE(cycles
); i
++) {
152 struct i915_request
*rq
;
154 rq
= i915_request_create(ce
);
160 err
= write_timestamp(rq
, 2);
164 err
= rq
->engine
->emit_bb_start(rq
,
165 batch
->node
.start
, 8,
170 err
= write_timestamp(rq
, 3);
175 i915_request_get(rq
);
176 i915_request_add(rq
);
178 if (i915_request_wait(rq
, 0, HZ
/ 5) < 0)
180 i915_request_put(rq
);
184 cycles
[i
] = rq
->hwsp_seqno
[3] - rq
->hwsp_seqno
[2];
187 intel_engine_pm_put(engine
);
191 pr_info("%s: MI_BB_START cycles: %u\n",
192 engine
->name
, trifilter(cycles
));
200 static struct i915_vma
*create_nop_batch(struct intel_context
*ce
)
202 struct drm_i915_gem_object
*obj
;
203 struct i915_vma
*vma
;
207 obj
= i915_gem_object_create_internal(ce
->engine
->i915
, SZ_64K
);
209 return ERR_CAST(obj
);
211 cs
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
217 memset(cs
, 0, SZ_64K
);
218 cs
[SZ_64K
/ sizeof(*cs
) - 1] = MI_BATCH_BUFFER_END
;
220 i915_gem_object_flush_map(obj
);
222 vma
= i915_vma_instance(obj
, ce
->vm
, NULL
);
228 err
= i915_vma_pin(vma
, 0, 0, PIN_USER
);
232 i915_gem_object_unpin_map(obj
);
236 i915_gem_object_unpin_map(obj
);
238 i915_gem_object_put(obj
);
242 static int perf_mi_noop(void *arg
)
244 struct intel_gt
*gt
= arg
;
245 struct intel_engine_cs
*engine
;
246 enum intel_engine_id id
;
249 if (INTEL_GEN(gt
->i915
) < 7) /* for per-engine CS_TIMESTAMP */
253 for_each_engine(engine
, gt
, id
) {
254 struct intel_context
*ce
= engine
->kernel_context
;
255 struct i915_vma
*base
, *nop
;
259 intel_engine_pm_get(engine
);
261 base
= create_empty_batch(ce
);
264 intel_engine_pm_put(engine
);
268 err
= i915_vma_sync(base
);
271 intel_engine_pm_put(engine
);
275 nop
= create_nop_batch(ce
);
279 intel_engine_pm_put(engine
);
283 err
= i915_vma_sync(nop
);
287 intel_engine_pm_put(engine
);
291 for (i
= 0; i
< ARRAY_SIZE(cycles
); i
++) {
292 struct i915_request
*rq
;
294 rq
= i915_request_create(ce
);
300 err
= write_timestamp(rq
, 2);
304 err
= rq
->engine
->emit_bb_start(rq
,
310 err
= write_timestamp(rq
, 3);
314 err
= rq
->engine
->emit_bb_start(rq
,
321 err
= write_timestamp(rq
, 4);
326 i915_request_get(rq
);
327 i915_request_add(rq
);
329 if (i915_request_wait(rq
, 0, HZ
/ 5) < 0)
331 i915_request_put(rq
);
336 (rq
->hwsp_seqno
[4] - rq
->hwsp_seqno
[3]) -
337 (rq
->hwsp_seqno
[3] - rq
->hwsp_seqno
[2]);
341 intel_engine_pm_put(engine
);
345 pr_info("%s: 16K MI_NOOP cycles: %u\n",
346 engine
->name
, trifilter(cycles
));
354 int intel_engine_cs_perf_selftests(struct drm_i915_private
*i915
)
356 static const struct i915_subtest tests
[] = {
357 SUBTEST(perf_mi_bb_start
),
358 SUBTEST(perf_mi_noop
),
361 if (intel_gt_is_wedged(&i915
->gt
))
364 return intel_gt_live_subtests(tests
, &i915
->gt
);
367 static int intel_mmio_bases_check(void *arg
)
371 for (i
= 0; i
< ARRAY_SIZE(intel_engines
); i
++) {
372 const struct engine_info
*info
= &intel_engines
[i
];
375 for (j
= 0; j
< MAX_MMIO_BASES
; j
++) {
376 u8 gen
= info
->mmio_bases
[j
].gen
;
377 u32 base
= info
->mmio_bases
[j
].base
;
380 pr_err("%s(%s, class:%d, instance:%d): mmio base for gen %x is before the one for gen %x\n",
382 intel_engine_class_repr(info
->class),
383 info
->class, info
->instance
,
392 pr_err("%s(%s, class:%d, instance:%d): invalid mmio base (%x) for gen %x at entry %u\n",
394 intel_engine_class_repr(info
->class),
395 info
->class, info
->instance
,
403 pr_debug("%s: min gen supported for %s%d is %d\n",
405 intel_engine_class_repr(info
->class),
413 int intel_engine_cs_mock_selftests(void)
415 static const struct i915_subtest tests
[] = {
416 SUBTEST(intel_mmio_bases_check
),
419 return i915_subtests(tests
, NULL
);