2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include <linux/nospec.h>
10 #include "i915_perf.h"
11 #include "i915_query.h"
12 #include <uapi/drm/i915_drm.h>
14 static int copy_query_item(void *query_hdr
, size_t query_sz
,
16 struct drm_i915_query_item
*query_item
)
18 if (query_item
->length
== 0)
21 if (query_item
->length
< total_length
)
24 if (copy_from_user(query_hdr
, u64_to_user_ptr(query_item
->data_ptr
),
28 if (!access_ok(u64_to_user_ptr(query_item
->data_ptr
),
35 static int query_topology_info(struct drm_i915_private
*dev_priv
,
36 struct drm_i915_query_item
*query_item
)
38 const struct sseu_dev_info
*sseu
= &RUNTIME_INFO(dev_priv
)->sseu
;
39 struct drm_i915_query_topology_info topo
;
40 u32 slice_length
, subslice_length
, eu_length
, total_length
;
43 if (query_item
->flags
!= 0)
46 if (sseu
->max_slices
== 0)
49 BUILD_BUG_ON(sizeof(u8
) != sizeof(sseu
->slice_mask
));
51 slice_length
= sizeof(sseu
->slice_mask
);
52 subslice_length
= sseu
->max_slices
* sseu
->ss_stride
;
53 eu_length
= sseu
->max_slices
* sseu
->max_subslices
* sseu
->eu_stride
;
54 total_length
= sizeof(topo
) + slice_length
+ subslice_length
+
57 ret
= copy_query_item(&topo
, sizeof(topo
), total_length
,
65 memset(&topo
, 0, sizeof(topo
));
66 topo
.max_slices
= sseu
->max_slices
;
67 topo
.max_subslices
= sseu
->max_subslices
;
68 topo
.max_eus_per_subslice
= sseu
->max_eus_per_subslice
;
70 topo
.subslice_offset
= slice_length
;
71 topo
.subslice_stride
= sseu
->ss_stride
;
72 topo
.eu_offset
= slice_length
+ subslice_length
;
73 topo
.eu_stride
= sseu
->eu_stride
;
75 if (__copy_to_user(u64_to_user_ptr(query_item
->data_ptr
),
79 if (__copy_to_user(u64_to_user_ptr(query_item
->data_ptr
+ sizeof(topo
)),
80 &sseu
->slice_mask
, slice_length
))
83 if (__copy_to_user(u64_to_user_ptr(query_item
->data_ptr
+
84 sizeof(topo
) + slice_length
),
85 sseu
->subslice_mask
, subslice_length
))
88 if (__copy_to_user(u64_to_user_ptr(query_item
->data_ptr
+
90 slice_length
+ subslice_length
),
91 sseu
->eu_mask
, eu_length
))
98 query_engine_info(struct drm_i915_private
*i915
,
99 struct drm_i915_query_item
*query_item
)
101 struct drm_i915_query_engine_info __user
*query_ptr
=
102 u64_to_user_ptr(query_item
->data_ptr
);
103 struct drm_i915_engine_info __user
*info_ptr
;
104 struct drm_i915_query_engine_info query
;
105 struct drm_i915_engine_info info
= { };
106 unsigned int num_uabi_engines
= 0;
107 struct intel_engine_cs
*engine
;
110 if (query_item
->flags
)
113 for_each_uabi_engine(engine
, i915
)
116 len
= sizeof(struct drm_i915_query_engine_info
) +
117 num_uabi_engines
* sizeof(struct drm_i915_engine_info
);
119 ret
= copy_query_item(&query
, sizeof(query
), len
, query_item
);
123 if (query
.num_engines
|| query
.rsvd
[0] || query
.rsvd
[1] ||
127 info_ptr
= &query_ptr
->engines
[0];
129 for_each_uabi_engine(engine
, i915
) {
130 info
.engine
.engine_class
= engine
->uabi_class
;
131 info
.engine
.engine_instance
= engine
->uabi_instance
;
132 info
.capabilities
= engine
->uabi_capabilities
;
134 if (__copy_to_user(info_ptr
, &info
, sizeof(info
)))
141 if (__copy_to_user(query_ptr
, &query
, sizeof(query
)))
147 static int can_copy_perf_config_registers_or_number(u32 user_n_regs
,
152 * We'll just put the number of registers, and won't copy the
155 if (user_n_regs
== 0)
158 if (user_n_regs
< kernel_n_regs
)
161 if (!access_ok(u64_to_user_ptr(user_regs_ptr
),
162 2 * sizeof(u32
) * kernel_n_regs
))
168 static int copy_perf_config_registers_or_number(const struct i915_oa_reg
*kernel_regs
,
175 if (*user_n_regs
== 0) {
176 *user_n_regs
= kernel_n_regs
;
180 *user_n_regs
= kernel_n_regs
;
182 for (r
= 0; r
< kernel_n_regs
; r
++) {
183 u32 __user
*user_reg_ptr
=
184 u64_to_user_ptr(user_regs_ptr
+ sizeof(u32
) * r
* 2);
185 u32 __user
*user_val_ptr
=
186 u64_to_user_ptr(user_regs_ptr
+ sizeof(u32
) * r
* 2 +
190 ret
= __put_user(i915_mmio_reg_offset(kernel_regs
[r
].addr
),
195 ret
= __put_user(kernel_regs
[r
].value
, user_val_ptr
);
203 static int query_perf_config_data(struct drm_i915_private
*i915
,
204 struct drm_i915_query_item
*query_item
,
207 struct drm_i915_query_perf_config __user
*user_query_config_ptr
=
208 u64_to_user_ptr(query_item
->data_ptr
);
209 struct drm_i915_perf_oa_config __user
*user_config_ptr
=
210 u64_to_user_ptr(query_item
->data_ptr
+
211 sizeof(struct drm_i915_query_perf_config
));
212 struct drm_i915_perf_oa_config user_config
;
213 struct i915_perf
*perf
= &i915
->perf
;
214 struct i915_oa_config
*oa_config
;
215 char uuid
[UUID_STRING_LEN
+ 1];
217 u32 flags
, total_size
;
224 sizeof(struct drm_i915_query_perf_config
) +
225 sizeof(struct drm_i915_perf_oa_config
);
227 if (query_item
->length
== 0)
230 if (query_item
->length
< total_size
) {
231 DRM_DEBUG("Invalid query config data item size=%u expected=%u\n",
232 query_item
->length
, total_size
);
236 if (!access_ok(user_query_config_ptr
, total_size
))
239 if (__get_user(flags
, &user_query_config_ptr
->flags
))
246 struct i915_oa_config
*tmp
;
249 BUILD_BUG_ON(sizeof(user_query_config_ptr
->uuid
) >= sizeof(uuid
));
251 memset(&uuid
, 0, sizeof(uuid
));
252 if (__copy_from_user(uuid
, user_query_config_ptr
->uuid
,
253 sizeof(user_query_config_ptr
->uuid
)))
258 idr_for_each_entry(&perf
->metrics_idr
, tmp
, id
) {
259 if (!strcmp(tmp
->uuid
, uuid
)) {
260 oa_config
= i915_oa_config_get(tmp
);
266 if (__get_user(config_id
, &user_query_config_ptr
->config
))
269 oa_config
= i915_perf_get_oa_config(perf
, config_id
);
274 if (__copy_from_user(&user_config
, user_config_ptr
,
275 sizeof(user_config
))) {
280 ret
= can_copy_perf_config_registers_or_number(user_config
.n_boolean_regs
,
281 user_config
.boolean_regs_ptr
,
282 oa_config
->b_counter_regs_len
);
286 ret
= can_copy_perf_config_registers_or_number(user_config
.n_flex_regs
,
287 user_config
.flex_regs_ptr
,
288 oa_config
->flex_regs_len
);
292 ret
= can_copy_perf_config_registers_or_number(user_config
.n_mux_regs
,
293 user_config
.mux_regs_ptr
,
294 oa_config
->mux_regs_len
);
298 ret
= copy_perf_config_registers_or_number(oa_config
->b_counter_regs
,
299 oa_config
->b_counter_regs_len
,
300 user_config
.boolean_regs_ptr
,
301 &user_config
.n_boolean_regs
);
305 ret
= copy_perf_config_registers_or_number(oa_config
->flex_regs
,
306 oa_config
->flex_regs_len
,
307 user_config
.flex_regs_ptr
,
308 &user_config
.n_flex_regs
);
312 ret
= copy_perf_config_registers_or_number(oa_config
->mux_regs
,
313 oa_config
->mux_regs_len
,
314 user_config
.mux_regs_ptr
,
315 &user_config
.n_mux_regs
);
319 memcpy(user_config
.uuid
, oa_config
->uuid
, sizeof(user_config
.uuid
));
321 if (__copy_to_user(user_config_ptr
, &user_config
,
322 sizeof(user_config
))) {
330 i915_oa_config_put(oa_config
);
334 static size_t sizeof_perf_config_list(size_t count
)
336 return sizeof(struct drm_i915_query_perf_config
) + sizeof(u64
) * count
;
339 static size_t sizeof_perf_metrics(struct i915_perf
*perf
)
341 struct i915_oa_config
*tmp
;
347 idr_for_each_entry(&perf
->metrics_idr
, tmp
, id
)
351 return sizeof_perf_config_list(i
);
354 static int query_perf_config_list(struct drm_i915_private
*i915
,
355 struct drm_i915_query_item
*query_item
)
357 struct drm_i915_query_perf_config __user
*user_query_config_ptr
=
358 u64_to_user_ptr(query_item
->data_ptr
);
359 struct i915_perf
*perf
= &i915
->perf
;
360 u64
*oa_config_ids
= NULL
;
361 int alloc
, n_configs
;
368 if (query_item
->length
== 0)
369 return sizeof_perf_metrics(perf
);
371 if (get_user(flags
, &user_query_config_ptr
->flags
))
379 struct i915_oa_config
*tmp
;
383 ids
= krealloc(oa_config_ids
,
384 n_configs
* sizeof(*oa_config_ids
),
389 alloc
= fetch_and_zero(&n_configs
);
391 ids
[n_configs
++] = 1ull; /* reserved for test_config */
393 idr_for_each_entry(&perf
->metrics_idr
, tmp
, id
) {
394 if (n_configs
< alloc
)
401 } while (n_configs
> alloc
);
403 if (query_item
->length
< sizeof_perf_config_list(n_configs
)) {
404 DRM_DEBUG("Invalid query config list item size=%u expected=%zu\n",
406 sizeof_perf_config_list(n_configs
));
407 kfree(oa_config_ids
);
411 if (put_user(n_configs
, &user_query_config_ptr
->config
)) {
412 kfree(oa_config_ids
);
416 ret
= copy_to_user(user_query_config_ptr
+ 1,
418 n_configs
* sizeof(*oa_config_ids
));
419 kfree(oa_config_ids
);
423 return sizeof_perf_config_list(n_configs
);
426 static int query_perf_config(struct drm_i915_private
*i915
,
427 struct drm_i915_query_item
*query_item
)
429 switch (query_item
->flags
) {
430 case DRM_I915_QUERY_PERF_CONFIG_LIST
:
431 return query_perf_config_list(i915
, query_item
);
432 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
:
433 return query_perf_config_data(i915
, query_item
, true);
434 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID
:
435 return query_perf_config_data(i915
, query_item
, false);
441 static int (* const i915_query_funcs
[])(struct drm_i915_private
*dev_priv
,
442 struct drm_i915_query_item
*query_item
) = {
448 int i915_query_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*file
)
450 struct drm_i915_private
*dev_priv
= to_i915(dev
);
451 struct drm_i915_query
*args
= data
;
452 struct drm_i915_query_item __user
*user_item_ptr
=
453 u64_to_user_ptr(args
->items_ptr
);
456 if (args
->flags
!= 0)
459 for (i
= 0; i
< args
->num_items
; i
++, user_item_ptr
++) {
460 struct drm_i915_query_item item
;
461 unsigned long func_idx
;
464 if (copy_from_user(&item
, user_item_ptr
, sizeof(item
)))
467 if (item
.query_id
== 0)
470 if (overflows_type(item
.query_id
- 1, unsigned long))
473 func_idx
= item
.query_id
- 1;
476 if (func_idx
< ARRAY_SIZE(i915_query_funcs
)) {
477 func_idx
= array_index_nospec(func_idx
,
478 ARRAY_SIZE(i915_query_funcs
));
479 ret
= i915_query_funcs
[func_idx
](dev_priv
, &item
);
482 /* Only write the length back to userspace if they differ. */
483 if (ret
!= item
.length
&& put_user(ret
, &user_item_ptr
->length
))