drm/tests: hdmi: Fix memory leaks in drm_display_mode_from_cea_vic()
[drm/drm-misc.git] / drivers / accel / ivpu / ivpu_jsm_msg.c
blob46ef16c3c06910a43c7dd504e3955d6ca9ee33f0
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020-2024 Intel Corporation
4 */
6 #include "ivpu_drv.h"
7 #include "ivpu_hw.h"
8 #include "ivpu_ipc.h"
9 #include "ivpu_jsm_msg.h"
11 const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
13 #define IVPU_CASE_TO_STR(x) case x: return #x
14 switch (type) {
15 IVPU_CASE_TO_STR(VPU_JSM_MSG_UNKNOWN);
16 IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET);
17 IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT);
18 IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB);
19 IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB);
20 IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB);
21 IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT);
22 IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL);
23 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL);
24 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN);
25 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE);
26 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG);
27 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG);
28 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY);
29 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME);
30 IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE);
31 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START);
32 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP);
33 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE);
34 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO);
35 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP);
36 IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE);
37 IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE);
38 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES);
39 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_REGISTER_DB);
40 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ);
41 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ);
42 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP);
43 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE);
44 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG);
45 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP);
46 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION);
47 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_ENGINE_RESUME);
48 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE);
49 IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP);
50 IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP_RSP);
51 IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT);
52 IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL);
53 IVPU_CASE_TO_STR(VPU_JSM_MSG_JOB_DONE);
54 IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET_DONE);
55 IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT_DONE);
56 IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB_DONE);
57 IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB_DONE);
58 IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB_DONE);
59 IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE);
60 IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_DONE);
61 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL_DONE);
62 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN_DONE);
63 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE_DONE);
64 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG_RSP);
65 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG_RSP);
66 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP);
67 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME_RSP);
68 IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE_DONE);
69 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START_DONE);
70 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE);
71 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE);
72 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE);
73 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION);
74 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP);
75 IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP);
76 IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP);
77 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP);
78 IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DONE);
79 IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL_RSP);
80 IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER);
81 IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER_DONE);
82 IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE);
83 IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE_DONE);
84 IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE);
85 IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE_DONE);
87 #undef IVPU_CASE_TO_STR
89 return "Unknown JSM message type";
92 int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
93 u64 jobq_base, u32 jobq_size)
95 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_REGISTER_DB };
96 struct vpu_jsm_msg resp;
97 int ret = 0;
99 req.payload.register_db.db_idx = db_id;
100 req.payload.register_db.jobq_base = jobq_base;
101 req.payload.register_db.jobq_size = jobq_size;
102 req.payload.register_db.host_ssid = ctx_id;
104 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
105 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
106 if (ret)
107 ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
109 return ret;
112 int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id)
114 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_UNREGISTER_DB };
115 struct vpu_jsm_msg resp;
116 int ret = 0;
118 req.payload.unregister_db.db_idx = db_id;
120 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp,
121 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
122 if (ret)
123 ivpu_warn_ratelimited(vdev, "Failed to unregister doorbell %u: %d\n", db_id, ret);
125 return ret;
128 int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
130 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
131 struct vpu_jsm_msg resp;
132 int ret;
134 if (engine > VPU_ENGINE_COPY)
135 return -EINVAL;
137 req.payload.query_engine_hb.engine_idx = engine;
139 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, &resp,
140 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
141 if (ret) {
142 ivpu_err_ratelimited(vdev, "Failed to get heartbeat from engine %d: %d\n",
143 engine, ret);
144 return ret;
147 *heartbeat = resp.payload.query_engine_hb_done.heartbeat;
148 return ret;
151 int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
153 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_RESET };
154 struct vpu_jsm_msg resp;
155 int ret;
157 if (engine > VPU_ENGINE_COPY)
158 return -EINVAL;
160 req.payload.engine_reset.engine_idx = engine;
162 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp,
163 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
164 if (ret)
165 ivpu_err_ratelimited(vdev, "Failed to reset engine %d: %d\n", engine, ret);
167 return ret;
170 int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id)
172 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_PREEMPT };
173 struct vpu_jsm_msg resp;
174 int ret;
176 if (engine > VPU_ENGINE_COPY)
177 return -EINVAL;
179 req.payload.engine_preempt.engine_idx = engine;
180 req.payload.engine_preempt.preempt_id = preempt_id;
182 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_PREEMPT_DONE, &resp,
183 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
184 if (ret)
185 ivpu_err_ratelimited(vdev, "Failed to preempt engine %d: %d\n", engine, ret);
187 return ret;
190 int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size)
192 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DYNDBG_CONTROL };
193 struct vpu_jsm_msg resp;
194 int ret;
196 strscpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN);
198 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
199 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
200 if (ret)
201 ivpu_warn_ratelimited(vdev, "Failed to send command \"%s\": ret %d\n",
202 command, ret);
204 return ret;
207 int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destination_mask,
208 u64 *trace_hw_component_mask)
210 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_GET_CAPABILITY };
211 struct vpu_jsm_msg resp;
212 int ret;
214 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP, &resp,
215 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
216 if (ret) {
217 ivpu_warn_ratelimited(vdev, "Failed to get trace capability: %d\n", ret);
218 return ret;
221 *trace_destination_mask = resp.payload.trace_capability.trace_destination_mask;
222 *trace_hw_component_mask = resp.payload.trace_capability.trace_hw_component_mask;
224 return ret;
227 int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask,
228 u64 trace_hw_component_mask)
230 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_SET_CONFIG };
231 struct vpu_jsm_msg resp;
232 int ret;
234 req.payload.trace_config.trace_level = trace_level;
235 req.payload.trace_config.trace_destination_mask = trace_destination_mask;
236 req.payload.trace_config.trace_hw_component_mask = trace_hw_component_mask;
238 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_SET_CONFIG_RSP, &resp,
239 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
240 if (ret)
241 ivpu_warn_ratelimited(vdev, "Failed to set config: %d\n", ret);
243 return ret;
246 int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid)
248 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SSID_RELEASE };
249 struct vpu_jsm_msg resp;
250 int ret;
252 req.payload.ssid_release.host_ssid = host_ssid;
254 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
255 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
256 if (ret)
257 ivpu_warn_ratelimited(vdev, "Failed to release context: %d\n", ret);
259 return ret;
262 int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev)
264 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_PWR_D0I3_ENTER };
265 struct vpu_jsm_msg resp;
266 int ret;
268 if (IVPU_WA(disable_d0i3_msg))
269 return 0;
271 req.payload.pwr_d0i3_enter.send_response = 1;
273 ret = ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE,
274 &resp, VPU_IPC_CHAN_GEN_CMD,
275 vdev->timeout.d0i3_entry_msg);
276 if (ret)
277 return ret;
279 return ivpu_hw_wait_for_idle(vdev);
282 int ivpu_jsm_hws_create_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_group, u32 cmdq_id,
283 u32 pid, u32 engine, u64 cmdq_base, u32 cmdq_size)
285 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_CREATE_CMD_QUEUE };
286 struct vpu_jsm_msg resp;
287 int ret;
289 req.payload.hws_create_cmdq.host_ssid = ctx_id;
290 req.payload.hws_create_cmdq.process_id = pid;
291 req.payload.hws_create_cmdq.engine_idx = engine;
292 req.payload.hws_create_cmdq.cmdq_group = cmdq_group;
293 req.payload.hws_create_cmdq.cmdq_id = cmdq_id;
294 req.payload.hws_create_cmdq.cmdq_base = cmdq_base;
295 req.payload.hws_create_cmdq.cmdq_size = cmdq_size;
297 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP, &resp,
298 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
299 if (ret)
300 ivpu_warn_ratelimited(vdev, "Failed to create command queue: %d\n", ret);
302 return ret;
305 int ivpu_jsm_hws_destroy_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id)
307 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DESTROY_CMD_QUEUE };
308 struct vpu_jsm_msg resp;
309 int ret;
311 req.payload.hws_destroy_cmdq.host_ssid = ctx_id;
312 req.payload.hws_destroy_cmdq.cmdq_id = cmdq_id;
314 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP, &resp,
315 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
316 if (ret)
317 ivpu_warn_ratelimited(vdev, "Failed to destroy command queue: %d\n", ret);
319 return ret;
322 int ivpu_jsm_hws_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id, u32 db_id,
323 u64 cmdq_base, u32 cmdq_size)
325 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_REGISTER_DB };
326 struct vpu_jsm_msg resp;
327 int ret = 0;
329 req.payload.hws_register_db.db_id = db_id;
330 req.payload.hws_register_db.host_ssid = ctx_id;
331 req.payload.hws_register_db.cmdq_id = cmdq_id;
332 req.payload.hws_register_db.cmdq_base = cmdq_base;
333 req.payload.hws_register_db.cmdq_size = cmdq_size;
335 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
336 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
337 if (ret)
338 ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
340 return ret;
343 int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine)
345 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_ENGINE_RESUME };
346 struct vpu_jsm_msg resp;
347 int ret;
349 if (engine >= VPU_ENGINE_NB)
350 return -EINVAL;
352 req.payload.hws_resume_engine.engine_idx = engine;
354 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE, &resp,
355 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
356 if (ret)
357 ivpu_err_ratelimited(vdev, "Failed to resume engine %d: %d\n", engine, ret);
359 return ret;
362 int ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id,
363 u32 priority)
365 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES };
366 struct vpu_jsm_msg resp;
367 int ret;
369 req.payload.hws_set_context_sched_properties.host_ssid = ctx_id;
370 req.payload.hws_set_context_sched_properties.cmdq_id = cmdq_id;
371 req.payload.hws_set_context_sched_properties.priority_band = priority;
372 req.payload.hws_set_context_sched_properties.realtime_priority_level = 0;
373 req.payload.hws_set_context_sched_properties.in_process_priority = 0;
374 req.payload.hws_set_context_sched_properties.context_quantum = 20000;
375 req.payload.hws_set_context_sched_properties.grace_period_same_priority = 10000;
376 req.payload.hws_set_context_sched_properties.grace_period_lower_priority = 0;
378 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP, &resp,
379 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
380 if (ret)
381 ivpu_warn_ratelimited(vdev, "Failed to set context sched properties: %d\n", ret);
383 return ret;
386 int ivpu_jsm_hws_set_scheduling_log(struct ivpu_device *vdev, u32 engine_idx, u32 host_ssid,
387 u64 vpu_log_buffer_va)
389 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG };
390 struct vpu_jsm_msg resp;
391 int ret;
393 req.payload.hws_set_scheduling_log.engine_idx = engine_idx;
394 req.payload.hws_set_scheduling_log.host_ssid = host_ssid;
395 req.payload.hws_set_scheduling_log.vpu_log_buffer_va = vpu_log_buffer_va;
396 req.payload.hws_set_scheduling_log.notify_index = 0;
397 req.payload.hws_set_scheduling_log.enable_extra_events =
398 ivpu_test_mode & IVPU_TEST_MODE_HWS_EXTRA_EVENTS;
400 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP, &resp,
401 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
402 if (ret)
403 ivpu_warn_ratelimited(vdev, "Failed to set scheduling log: %d\n", ret);
405 return ret;
408 int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev)
410 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP };
411 struct vpu_jsm_msg resp;
412 int ret;
414 /* Idle */
415 req.payload.hws_priority_band_setup.grace_period[0] = 0;
416 req.payload.hws_priority_band_setup.process_grace_period[0] = 50000;
417 req.payload.hws_priority_band_setup.process_quantum[0] = 160000;
418 /* Normal */
419 req.payload.hws_priority_band_setup.grace_period[1] = 50000;
420 req.payload.hws_priority_band_setup.process_grace_period[1] = 50000;
421 req.payload.hws_priority_band_setup.process_quantum[1] = 300000;
422 /* Focus */
423 req.payload.hws_priority_band_setup.grace_period[2] = 50000;
424 req.payload.hws_priority_band_setup.process_grace_period[2] = 50000;
425 req.payload.hws_priority_band_setup.process_quantum[2] = 200000;
426 /* Realtime */
427 req.payload.hws_priority_band_setup.grace_period[3] = 0;
428 req.payload.hws_priority_band_setup.process_grace_period[3] = 50000;
429 req.payload.hws_priority_band_setup.process_quantum[3] = 200000;
431 req.payload.hws_priority_band_setup.normal_band_percentage = 10;
433 ret = ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
434 &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
435 if (ret)
436 ivpu_warn_ratelimited(vdev, "Failed to set priority bands: %d\n", ret);
438 return ret;
441 int ivpu_jsm_metric_streamer_start(struct ivpu_device *vdev, u64 metric_group_mask,
442 u64 sampling_rate, u64 buffer_addr, u64 buffer_size)
444 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_START };
445 struct vpu_jsm_msg resp;
446 int ret;
448 req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
449 req.payload.metric_streamer_start.sampling_rate = sampling_rate;
450 req.payload.metric_streamer_start.buffer_addr = buffer_addr;
451 req.payload.metric_streamer_start.buffer_size = buffer_size;
453 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_START_DONE, &resp,
454 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
455 if (ret) {
456 ivpu_warn_ratelimited(vdev, "Failed to start metric streamer: ret %d\n", ret);
457 return ret;
460 return ret;
463 int ivpu_jsm_metric_streamer_stop(struct ivpu_device *vdev, u64 metric_group_mask)
465 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_STOP };
466 struct vpu_jsm_msg resp;
467 int ret;
469 req.payload.metric_streamer_stop.metric_group_mask = metric_group_mask;
471 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE, &resp,
472 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
473 if (ret)
474 ivpu_warn_ratelimited(vdev, "Failed to stop metric streamer: ret %d\n", ret);
476 return ret;
479 int ivpu_jsm_metric_streamer_update(struct ivpu_device *vdev, u64 metric_group_mask,
480 u64 buffer_addr, u64 buffer_size, u64 *bytes_written)
482 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_UPDATE };
483 struct vpu_jsm_msg resp;
484 int ret;
486 req.payload.metric_streamer_update.metric_group_mask = metric_group_mask;
487 req.payload.metric_streamer_update.buffer_addr = buffer_addr;
488 req.payload.metric_streamer_update.buffer_size = buffer_size;
490 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE, &resp,
491 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
492 if (ret) {
493 ivpu_warn_ratelimited(vdev, "Failed to update metric streamer: ret %d\n", ret);
494 return ret;
497 if (buffer_size && resp.payload.metric_streamer_done.bytes_written > buffer_size) {
498 ivpu_warn_ratelimited(vdev, "MS buffer overflow: bytes_written %#llx > buffer_size %#llx\n",
499 resp.payload.metric_streamer_done.bytes_written, buffer_size);
500 return -EOVERFLOW;
503 *bytes_written = resp.payload.metric_streamer_done.bytes_written;
505 return ret;
508 int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mask, u64 buffer_addr,
509 u64 buffer_size, u32 *sample_size, u64 *info_size)
511 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_INFO };
512 struct vpu_jsm_msg resp;
513 int ret;
515 req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
516 req.payload.metric_streamer_start.buffer_addr = buffer_addr;
517 req.payload.metric_streamer_start.buffer_size = buffer_size;
519 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE, &resp,
520 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
521 if (ret) {
522 ivpu_warn_ratelimited(vdev, "Failed to get metric streamer info: ret %d\n", ret);
523 return ret;
526 if (!resp.payload.metric_streamer_done.sample_size) {
527 ivpu_warn_ratelimited(vdev, "Invalid sample size\n");
528 return -EBADMSG;
531 if (sample_size)
532 *sample_size = resp.payload.metric_streamer_done.sample_size;
533 if (info_size)
534 *info_size = resp.payload.metric_streamer_done.bytes_written;
536 return ret;
539 int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us)
541 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_ENABLE };
542 struct vpu_jsm_msg resp;
544 req.payload.pwr_dct_control.dct_active_us = active_us;
545 req.payload.pwr_dct_control.dct_inactive_us = inactive_us;
547 return ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_DCT_ENABLE_DONE,
548 &resp, VPU_IPC_CHAN_ASYNC_CMD,
549 vdev->timeout.jsm);
552 int ivpu_jsm_dct_disable(struct ivpu_device *vdev)
554 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_DISABLE };
555 struct vpu_jsm_msg resp;
557 return ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_DCT_DISABLE_DONE,
558 &resp, VPU_IPC_CHAN_ASYNC_CMD,
559 vdev->timeout.jsm);