2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include <linux/nospec.h>
10 #include "i915_perf.h"
11 #include "i915_query.h"
12 #include <uapi/drm/i915_drm.h>
14 static int copy_query_item(void *query_hdr
, size_t query_sz
,
16 struct drm_i915_query_item
*query_item
)
18 if (query_item
->length
== 0)
21 if (query_item
->length
< total_length
)
24 if (copy_from_user(query_hdr
, u64_to_user_ptr(query_item
->data_ptr
),
31 static int query_topology_info(struct drm_i915_private
*dev_priv
,
32 struct drm_i915_query_item
*query_item
)
34 const struct sseu_dev_info
*sseu
= &dev_priv
->gt
.info
.sseu
;
35 struct drm_i915_query_topology_info topo
;
36 u32 slice_length
, subslice_length
, eu_length
, total_length
;
39 if (query_item
->flags
!= 0)
42 if (sseu
->max_slices
== 0)
45 BUILD_BUG_ON(sizeof(u8
) != sizeof(sseu
->slice_mask
));
47 slice_length
= sizeof(sseu
->slice_mask
);
48 subslice_length
= sseu
->max_slices
* sseu
->ss_stride
;
49 eu_length
= sseu
->max_slices
* sseu
->max_subslices
* sseu
->eu_stride
;
50 total_length
= sizeof(topo
) + slice_length
+ subslice_length
+
53 ret
= copy_query_item(&topo
, sizeof(topo
), total_length
,
61 memset(&topo
, 0, sizeof(topo
));
62 topo
.max_slices
= sseu
->max_slices
;
63 topo
.max_subslices
= sseu
->max_subslices
;
64 topo
.max_eus_per_subslice
= sseu
->max_eus_per_subslice
;
66 topo
.subslice_offset
= slice_length
;
67 topo
.subslice_stride
= sseu
->ss_stride
;
68 topo
.eu_offset
= slice_length
+ subslice_length
;
69 topo
.eu_stride
= sseu
->eu_stride
;
71 if (copy_to_user(u64_to_user_ptr(query_item
->data_ptr
),
75 if (copy_to_user(u64_to_user_ptr(query_item
->data_ptr
+ sizeof(topo
)),
76 &sseu
->slice_mask
, slice_length
))
79 if (copy_to_user(u64_to_user_ptr(query_item
->data_ptr
+
80 sizeof(topo
) + slice_length
),
81 sseu
->subslice_mask
, subslice_length
))
84 if (copy_to_user(u64_to_user_ptr(query_item
->data_ptr
+
86 slice_length
+ subslice_length
),
87 sseu
->eu_mask
, eu_length
))
94 query_engine_info(struct drm_i915_private
*i915
,
95 struct drm_i915_query_item
*query_item
)
97 struct drm_i915_query_engine_info __user
*query_ptr
=
98 u64_to_user_ptr(query_item
->data_ptr
);
99 struct drm_i915_engine_info __user
*info_ptr
;
100 struct drm_i915_query_engine_info query
;
101 struct drm_i915_engine_info info
= { };
102 unsigned int num_uabi_engines
= 0;
103 struct intel_engine_cs
*engine
;
106 if (query_item
->flags
)
109 for_each_uabi_engine(engine
, i915
)
112 len
= struct_size(query_ptr
, engines
, num_uabi_engines
);
114 ret
= copy_query_item(&query
, sizeof(query
), len
, query_item
);
118 if (query
.num_engines
|| query
.rsvd
[0] || query
.rsvd
[1] ||
122 info_ptr
= &query_ptr
->engines
[0];
124 for_each_uabi_engine(engine
, i915
) {
125 info
.engine
.engine_class
= engine
->uabi_class
;
126 info
.engine
.engine_instance
= engine
->uabi_instance
;
127 info
.capabilities
= engine
->uabi_capabilities
;
129 if (copy_to_user(info_ptr
, &info
, sizeof(info
)))
136 if (copy_to_user(query_ptr
, &query
, sizeof(query
)))
142 static int can_copy_perf_config_registers_or_number(u32 user_n_regs
,
147 * We'll just put the number of registers, and won't copy the
150 if (user_n_regs
== 0)
153 if (user_n_regs
< kernel_n_regs
)
159 static int copy_perf_config_registers_or_number(const struct i915_oa_reg
*kernel_regs
,
164 u32 __user
*p
= u64_to_user_ptr(user_regs_ptr
);
167 if (*user_n_regs
== 0) {
168 *user_n_regs
= kernel_n_regs
;
172 *user_n_regs
= kernel_n_regs
;
174 if (!user_write_access_begin(p
, 2 * sizeof(u32
) * kernel_n_regs
))
177 for (r
= 0; r
< kernel_n_regs
; r
++, p
+= 2) {
178 unsafe_put_user(i915_mmio_reg_offset(kernel_regs
[r
].addr
),
180 unsafe_put_user(kernel_regs
[r
].value
, p
+ 1, Efault
);
182 user_write_access_end();
185 user_write_access_end();
189 static int query_perf_config_data(struct drm_i915_private
*i915
,
190 struct drm_i915_query_item
*query_item
,
193 struct drm_i915_query_perf_config __user
*user_query_config_ptr
=
194 u64_to_user_ptr(query_item
->data_ptr
);
195 struct drm_i915_perf_oa_config __user
*user_config_ptr
=
196 u64_to_user_ptr(query_item
->data_ptr
+
197 sizeof(struct drm_i915_query_perf_config
));
198 struct drm_i915_perf_oa_config user_config
;
199 struct i915_perf
*perf
= &i915
->perf
;
200 struct i915_oa_config
*oa_config
;
201 char uuid
[UUID_STRING_LEN
+ 1];
203 u32 flags
, total_size
;
210 sizeof(struct drm_i915_query_perf_config
) +
211 sizeof(struct drm_i915_perf_oa_config
);
213 if (query_item
->length
== 0)
216 if (query_item
->length
< total_size
) {
217 DRM_DEBUG("Invalid query config data item size=%u expected=%u\n",
218 query_item
->length
, total_size
);
222 if (get_user(flags
, &user_query_config_ptr
->flags
))
229 struct i915_oa_config
*tmp
;
232 BUILD_BUG_ON(sizeof(user_query_config_ptr
->uuid
) >= sizeof(uuid
));
234 memset(&uuid
, 0, sizeof(uuid
));
235 if (copy_from_user(uuid
, user_query_config_ptr
->uuid
,
236 sizeof(user_query_config_ptr
->uuid
)))
241 idr_for_each_entry(&perf
->metrics_idr
, tmp
, id
) {
242 if (!strcmp(tmp
->uuid
, uuid
)) {
243 oa_config
= i915_oa_config_get(tmp
);
249 if (get_user(config_id
, &user_query_config_ptr
->config
))
252 oa_config
= i915_perf_get_oa_config(perf
, config_id
);
257 if (copy_from_user(&user_config
, user_config_ptr
, sizeof(user_config
))) {
262 ret
= can_copy_perf_config_registers_or_number(user_config
.n_boolean_regs
,
263 user_config
.boolean_regs_ptr
,
264 oa_config
->b_counter_regs_len
);
268 ret
= can_copy_perf_config_registers_or_number(user_config
.n_flex_regs
,
269 user_config
.flex_regs_ptr
,
270 oa_config
->flex_regs_len
);
274 ret
= can_copy_perf_config_registers_or_number(user_config
.n_mux_regs
,
275 user_config
.mux_regs_ptr
,
276 oa_config
->mux_regs_len
);
280 ret
= copy_perf_config_registers_or_number(oa_config
->b_counter_regs
,
281 oa_config
->b_counter_regs_len
,
282 user_config
.boolean_regs_ptr
,
283 &user_config
.n_boolean_regs
);
287 ret
= copy_perf_config_registers_or_number(oa_config
->flex_regs
,
288 oa_config
->flex_regs_len
,
289 user_config
.flex_regs_ptr
,
290 &user_config
.n_flex_regs
);
294 ret
= copy_perf_config_registers_or_number(oa_config
->mux_regs
,
295 oa_config
->mux_regs_len
,
296 user_config
.mux_regs_ptr
,
297 &user_config
.n_mux_regs
);
301 memcpy(user_config
.uuid
, oa_config
->uuid
, sizeof(user_config
.uuid
));
303 if (copy_to_user(user_config_ptr
, &user_config
, sizeof(user_config
))) {
311 i915_oa_config_put(oa_config
);
315 static size_t sizeof_perf_config_list(size_t count
)
317 return sizeof(struct drm_i915_query_perf_config
) + sizeof(u64
) * count
;
320 static size_t sizeof_perf_metrics(struct i915_perf
*perf
)
322 struct i915_oa_config
*tmp
;
328 idr_for_each_entry(&perf
->metrics_idr
, tmp
, id
)
332 return sizeof_perf_config_list(i
);
335 static int query_perf_config_list(struct drm_i915_private
*i915
,
336 struct drm_i915_query_item
*query_item
)
338 struct drm_i915_query_perf_config __user
*user_query_config_ptr
=
339 u64_to_user_ptr(query_item
->data_ptr
);
340 struct i915_perf
*perf
= &i915
->perf
;
341 u64
*oa_config_ids
= NULL
;
342 int alloc
, n_configs
;
349 if (query_item
->length
== 0)
350 return sizeof_perf_metrics(perf
);
352 if (get_user(flags
, &user_query_config_ptr
->flags
))
360 struct i915_oa_config
*tmp
;
364 ids
= krealloc(oa_config_ids
,
365 n_configs
* sizeof(*oa_config_ids
),
370 alloc
= fetch_and_zero(&n_configs
);
372 ids
[n_configs
++] = 1ull; /* reserved for test_config */
374 idr_for_each_entry(&perf
->metrics_idr
, tmp
, id
) {
375 if (n_configs
< alloc
)
382 } while (n_configs
> alloc
);
384 if (query_item
->length
< sizeof_perf_config_list(n_configs
)) {
385 DRM_DEBUG("Invalid query config list item size=%u expected=%zu\n",
387 sizeof_perf_config_list(n_configs
));
388 kfree(oa_config_ids
);
392 if (put_user(n_configs
, &user_query_config_ptr
->config
)) {
393 kfree(oa_config_ids
);
397 ret
= copy_to_user(user_query_config_ptr
+ 1,
399 n_configs
* sizeof(*oa_config_ids
));
400 kfree(oa_config_ids
);
404 return sizeof_perf_config_list(n_configs
);
407 static int query_perf_config(struct drm_i915_private
*i915
,
408 struct drm_i915_query_item
*query_item
)
410 switch (query_item
->flags
) {
411 case DRM_I915_QUERY_PERF_CONFIG_LIST
:
412 return query_perf_config_list(i915
, query_item
);
413 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
:
414 return query_perf_config_data(i915
, query_item
, true);
415 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID
:
416 return query_perf_config_data(i915
, query_item
, false);
422 static int (* const i915_query_funcs
[])(struct drm_i915_private
*dev_priv
,
423 struct drm_i915_query_item
*query_item
) = {
429 int i915_query_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*file
)
431 struct drm_i915_private
*dev_priv
= to_i915(dev
);
432 struct drm_i915_query
*args
= data
;
433 struct drm_i915_query_item __user
*user_item_ptr
=
434 u64_to_user_ptr(args
->items_ptr
);
437 if (args
->flags
!= 0)
440 for (i
= 0; i
< args
->num_items
; i
++, user_item_ptr
++) {
441 struct drm_i915_query_item item
;
442 unsigned long func_idx
;
445 if (copy_from_user(&item
, user_item_ptr
, sizeof(item
)))
448 if (item
.query_id
== 0)
451 if (overflows_type(item
.query_id
- 1, unsigned long))
454 func_idx
= item
.query_id
- 1;
457 if (func_idx
< ARRAY_SIZE(i915_query_funcs
)) {
458 func_idx
= array_index_nospec(func_idx
,
459 ARRAY_SIZE(i915_query_funcs
));
460 ret
= i915_query_funcs
[func_idx
](dev_priv
, &item
);
463 /* Only write the length back to userspace if they differ. */
464 if (ret
!= item
.length
&& put_user(ret
, &user_item_ptr
->length
))