1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2024 Intel Corporation
9 #include "ivpu_jsm_msg.h"
11 const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type
)
13 #define IVPU_CASE_TO_STR(x) case x: return #x
15 IVPU_CASE_TO_STR(VPU_JSM_MSG_UNKNOWN
);
16 IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET
);
17 IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT
);
18 IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB
);
19 IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB
);
20 IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB
);
21 IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT
);
22 IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL
);
23 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL
);
24 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN
);
25 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE
);
26 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG
);
27 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG
);
28 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY
);
29 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME
);
30 IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE
);
31 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START
);
32 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP
);
33 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE
);
34 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO
);
35 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP
);
36 IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE
);
37 IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE
);
38 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES
);
39 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_REGISTER_DB
);
40 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ
);
41 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ
);
42 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP
);
43 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE
);
44 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG
);
45 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP
);
46 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION
);
47 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_ENGINE_RESUME
);
48 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE
);
49 IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP
);
50 IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP_RSP
);
51 IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT
);
52 IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL
);
53 IVPU_CASE_TO_STR(VPU_JSM_MSG_JOB_DONE
);
54 IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET_DONE
);
55 IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT_DONE
);
56 IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB_DONE
);
57 IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB_DONE
);
58 IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB_DONE
);
59 IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE
);
60 IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_DONE
);
61 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL_DONE
);
62 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN_DONE
);
63 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE_DONE
);
64 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG_RSP
);
65 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG_RSP
);
66 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP
);
67 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME_RSP
);
68 IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE_DONE
);
69 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START_DONE
);
70 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE
);
71 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE
);
72 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE
);
73 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION
);
74 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP
);
75 IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP
);
76 IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP
);
77 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP
);
78 IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DONE
);
79 IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL_RSP
);
80 IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER
);
81 IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER_DONE
);
82 IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE
);
83 IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE_DONE
);
84 IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE
);
85 IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE_DONE
);
87 #undef IVPU_CASE_TO_STR
89 return "Unknown JSM message type";
92 int ivpu_jsm_register_db(struct ivpu_device
*vdev
, u32 ctx_id
, u32 db_id
,
93 u64 jobq_base
, u32 jobq_size
)
95 struct vpu_jsm_msg req
= { .type
= VPU_JSM_MSG_REGISTER_DB
};
96 struct vpu_jsm_msg resp
;
99 req
.payload
.register_db
.db_idx
= db_id
;
100 req
.payload
.register_db
.jobq_base
= jobq_base
;
101 req
.payload
.register_db
.jobq_size
= jobq_size
;
102 req
.payload
.register_db
.host_ssid
= ctx_id
;
104 ret
= ivpu_ipc_send_receive(vdev
, &req
, VPU_JSM_MSG_REGISTER_DB_DONE
, &resp
,
105 VPU_IPC_CHAN_ASYNC_CMD
, vdev
->timeout
.jsm
);
107 ivpu_err_ratelimited(vdev
, "Failed to register doorbell %u: %d\n", db_id
, ret
);
112 int ivpu_jsm_unregister_db(struct ivpu_device
*vdev
, u32 db_id
)
114 struct vpu_jsm_msg req
= { .type
= VPU_JSM_MSG_UNREGISTER_DB
};
115 struct vpu_jsm_msg resp
;
118 req
.payload
.unregister_db
.db_idx
= db_id
;
120 ret
= ivpu_ipc_send_receive(vdev
, &req
, VPU_JSM_MSG_UNREGISTER_DB_DONE
, &resp
,
121 VPU_IPC_CHAN_ASYNC_CMD
, vdev
->timeout
.jsm
);
123 ivpu_warn_ratelimited(vdev
, "Failed to unregister doorbell %u: %d\n", db_id
, ret
);
128 int ivpu_jsm_get_heartbeat(struct ivpu_device
*vdev
, u32 engine
, u64
*heartbeat
)
130 struct vpu_jsm_msg req
= { .type
= VPU_JSM_MSG_QUERY_ENGINE_HB
};
131 struct vpu_jsm_msg resp
;
134 if (engine
> VPU_ENGINE_COPY
)
137 req
.payload
.query_engine_hb
.engine_idx
= engine
;
139 ret
= ivpu_ipc_send_receive(vdev
, &req
, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE
, &resp
,
140 VPU_IPC_CHAN_ASYNC_CMD
, vdev
->timeout
.jsm
);
142 ivpu_err_ratelimited(vdev
, "Failed to get heartbeat from engine %d: %d\n",
147 *heartbeat
= resp
.payload
.query_engine_hb_done
.heartbeat
;
151 int ivpu_jsm_reset_engine(struct ivpu_device
*vdev
, u32 engine
)
153 struct vpu_jsm_msg req
= { .type
= VPU_JSM_MSG_ENGINE_RESET
};
154 struct vpu_jsm_msg resp
;
157 if (engine
> VPU_ENGINE_COPY
)
160 req
.payload
.engine_reset
.engine_idx
= engine
;
162 ret
= ivpu_ipc_send_receive(vdev
, &req
, VPU_JSM_MSG_ENGINE_RESET_DONE
, &resp
,
163 VPU_IPC_CHAN_ASYNC_CMD
, vdev
->timeout
.jsm
);
165 ivpu_err_ratelimited(vdev
, "Failed to reset engine %d: %d\n", engine
, ret
);
170 int ivpu_jsm_preempt_engine(struct ivpu_device
*vdev
, u32 engine
, u32 preempt_id
)
172 struct vpu_jsm_msg req
= { .type
= VPU_JSM_MSG_ENGINE_PREEMPT
};
173 struct vpu_jsm_msg resp
;
176 if (engine
> VPU_ENGINE_COPY
)
179 req
.payload
.engine_preempt
.engine_idx
= engine
;
180 req
.payload
.engine_preempt
.preempt_id
= preempt_id
;
182 ret
= ivpu_ipc_send_receive(vdev
, &req
, VPU_JSM_MSG_ENGINE_PREEMPT_DONE
, &resp
,
183 VPU_IPC_CHAN_ASYNC_CMD
, vdev
->timeout
.jsm
);
185 ivpu_err_ratelimited(vdev
, "Failed to preempt engine %d: %d\n", engine
, ret
);
190 int ivpu_jsm_dyndbg_control(struct ivpu_device
*vdev
, char *command
, size_t size
)
192 struct vpu_jsm_msg req
= { .type
= VPU_JSM_MSG_DYNDBG_CONTROL
};
193 struct vpu_jsm_msg resp
;
196 strscpy(req
.payload
.dyndbg_control
.dyndbg_cmd
, command
, VPU_DYNDBG_CMD_MAX_LEN
);
198 ret
= ivpu_ipc_send_receive(vdev
, &req
, VPU_JSM_MSG_DYNDBG_CONTROL_RSP
, &resp
,
199 VPU_IPC_CHAN_ASYNC_CMD
, vdev
->timeout
.jsm
);
201 ivpu_warn_ratelimited(vdev
, "Failed to send command \"%s\": ret %d\n",
207 int ivpu_jsm_trace_get_capability(struct ivpu_device
*vdev
, u32
*trace_destination_mask
,
208 u64
*trace_hw_component_mask
)
210 struct vpu_jsm_msg req
= { .type
= VPU_JSM_MSG_TRACE_GET_CAPABILITY
};
211 struct vpu_jsm_msg resp
;
214 ret
= ivpu_ipc_send_receive(vdev
, &req
, VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP
, &resp
,
215 VPU_IPC_CHAN_ASYNC_CMD
, vdev
->timeout
.jsm
);
217 ivpu_warn_ratelimited(vdev
, "Failed to get trace capability: %d\n", ret
);
221 *trace_destination_mask
= resp
.payload
.trace_capability
.trace_destination_mask
;
222 *trace_hw_component_mask
= resp
.payload
.trace_capability
.trace_hw_component_mask
;
227 int ivpu_jsm_trace_set_config(struct ivpu_device
*vdev
, u32 trace_level
, u32 trace_destination_mask
,
228 u64 trace_hw_component_mask
)
230 struct vpu_jsm_msg req
= { .type
= VPU_JSM_MSG_TRACE_SET_CONFIG
};
231 struct vpu_jsm_msg resp
;
234 req
.payload
.trace_config
.trace_level
= trace_level
;
235 req
.payload
.trace_config
.trace_destination_mask
= trace_destination_mask
;
236 req
.payload
.trace_config
.trace_hw_component_mask
= trace_hw_component_mask
;
238 ret
= ivpu_ipc_send_receive(vdev
, &req
, VPU_JSM_MSG_TRACE_SET_CONFIG_RSP
, &resp
,
239 VPU_IPC_CHAN_ASYNC_CMD
, vdev
->timeout
.jsm
);
241 ivpu_warn_ratelimited(vdev
, "Failed to set config: %d\n", ret
);
246 int ivpu_jsm_context_release(struct ivpu_device
*vdev
, u32 host_ssid
)
248 struct vpu_jsm_msg req
= { .type
= VPU_JSM_MSG_SSID_RELEASE
};
249 struct vpu_jsm_msg resp
;
252 req
.payload
.ssid_release
.host_ssid
= host_ssid
;
254 ret
= ivpu_ipc_send_receive(vdev
, &req
, VPU_JSM_MSG_SSID_RELEASE_DONE
, &resp
,
255 VPU_IPC_CHAN_ASYNC_CMD
, vdev
->timeout
.jsm
);
257 ivpu_warn_ratelimited(vdev
, "Failed to release context: %d\n", ret
);
262 int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device
*vdev
)
264 struct vpu_jsm_msg req
= { .type
= VPU_JSM_MSG_PWR_D0I3_ENTER
};
265 struct vpu_jsm_msg resp
;
268 if (IVPU_WA(disable_d0i3_msg
))
271 req
.payload
.pwr_d0i3_enter
.send_response
= 1;
273 ret
= ivpu_ipc_send_receive_active(vdev
, &req
, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE
,
274 &resp
, VPU_IPC_CHAN_GEN_CMD
,
275 vdev
->timeout
.d0i3_entry_msg
);
279 return ivpu_hw_wait_for_idle(vdev
);
282 int ivpu_jsm_hws_create_cmdq(struct ivpu_device
*vdev
, u32 ctx_id
, u32 cmdq_group
, u32 cmdq_id
,
283 u32 pid
, u32 engine
, u64 cmdq_base
, u32 cmdq_size
)
285 struct vpu_jsm_msg req
= { .type
= VPU_JSM_MSG_CREATE_CMD_QUEUE
};
286 struct vpu_jsm_msg resp
;
289 req
.payload
.hws_create_cmdq
.host_ssid
= ctx_id
;
290 req
.payload
.hws_create_cmdq
.process_id
= pid
;
291 req
.payload
.hws_create_cmdq
.engine_idx
= engine
;
292 req
.payload
.hws_create_cmdq
.cmdq_group
= cmdq_group
;
293 req
.payload
.hws_create_cmdq
.cmdq_id
= cmdq_id
;
294 req
.payload
.hws_create_cmdq
.cmdq_base
= cmdq_base
;
295 req
.payload
.hws_create_cmdq
.cmdq_size
= cmdq_size
;
297 ret
= ivpu_ipc_send_receive(vdev
, &req
, VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP
, &resp
,
298 VPU_IPC_CHAN_ASYNC_CMD
, vdev
->timeout
.jsm
);
300 ivpu_warn_ratelimited(vdev
, "Failed to create command queue: %d\n", ret
);
305 int ivpu_jsm_hws_destroy_cmdq(struct ivpu_device
*vdev
, u32 ctx_id
, u32 cmdq_id
)
307 struct vpu_jsm_msg req
= { .type
= VPU_JSM_MSG_DESTROY_CMD_QUEUE
};
308 struct vpu_jsm_msg resp
;
311 req
.payload
.hws_destroy_cmdq
.host_ssid
= ctx_id
;
312 req
.payload
.hws_destroy_cmdq
.cmdq_id
= cmdq_id
;
314 ret
= ivpu_ipc_send_receive(vdev
, &req
, VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP
, &resp
,
315 VPU_IPC_CHAN_ASYNC_CMD
, vdev
->timeout
.jsm
);
317 ivpu_warn_ratelimited(vdev
, "Failed to destroy command queue: %d\n", ret
);
322 int ivpu_jsm_hws_register_db(struct ivpu_device
*vdev
, u32 ctx_id
, u32 cmdq_id
, u32 db_id
,
323 u64 cmdq_base
, u32 cmdq_size
)
325 struct vpu_jsm_msg req
= { .type
= VPU_JSM_MSG_HWS_REGISTER_DB
};
326 struct vpu_jsm_msg resp
;
329 req
.payload
.hws_register_db
.db_id
= db_id
;
330 req
.payload
.hws_register_db
.host_ssid
= ctx_id
;
331 req
.payload
.hws_register_db
.cmdq_id
= cmdq_id
;
332 req
.payload
.hws_register_db
.cmdq_base
= cmdq_base
;
333 req
.payload
.hws_register_db
.cmdq_size
= cmdq_size
;
335 ret
= ivpu_ipc_send_receive(vdev
, &req
, VPU_JSM_MSG_REGISTER_DB_DONE
, &resp
,
336 VPU_IPC_CHAN_ASYNC_CMD
, vdev
->timeout
.jsm
);
338 ivpu_err_ratelimited(vdev
, "Failed to register doorbell %u: %d\n", db_id
, ret
);
343 int ivpu_jsm_hws_resume_engine(struct ivpu_device
*vdev
, u32 engine
)
345 struct vpu_jsm_msg req
= { .type
= VPU_JSM_MSG_HWS_ENGINE_RESUME
};
346 struct vpu_jsm_msg resp
;
349 if (engine
>= VPU_ENGINE_NB
)
352 req
.payload
.hws_resume_engine
.engine_idx
= engine
;
354 ret
= ivpu_ipc_send_receive(vdev
, &req
, VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE
, &resp
,
355 VPU_IPC_CHAN_ASYNC_CMD
, vdev
->timeout
.jsm
);
357 ivpu_err_ratelimited(vdev
, "Failed to resume engine %d: %d\n", engine
, ret
);
362 int ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device
*vdev
, u32 ctx_id
, u32 cmdq_id
,
365 struct vpu_jsm_msg req
= { .type
= VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES
};
366 struct vpu_jsm_msg resp
;
369 req
.payload
.hws_set_context_sched_properties
.host_ssid
= ctx_id
;
370 req
.payload
.hws_set_context_sched_properties
.cmdq_id
= cmdq_id
;
371 req
.payload
.hws_set_context_sched_properties
.priority_band
= priority
;
372 req
.payload
.hws_set_context_sched_properties
.realtime_priority_level
= 0;
373 req
.payload
.hws_set_context_sched_properties
.in_process_priority
= 0;
374 req
.payload
.hws_set_context_sched_properties
.context_quantum
= 20000;
375 req
.payload
.hws_set_context_sched_properties
.grace_period_same_priority
= 10000;
376 req
.payload
.hws_set_context_sched_properties
.grace_period_lower_priority
= 0;
378 ret
= ivpu_ipc_send_receive(vdev
, &req
, VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP
, &resp
,
379 VPU_IPC_CHAN_ASYNC_CMD
, vdev
->timeout
.jsm
);
381 ivpu_warn_ratelimited(vdev
, "Failed to set context sched properties: %d\n", ret
);
386 int ivpu_jsm_hws_set_scheduling_log(struct ivpu_device
*vdev
, u32 engine_idx
, u32 host_ssid
,
387 u64 vpu_log_buffer_va
)
389 struct vpu_jsm_msg req
= { .type
= VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG
};
390 struct vpu_jsm_msg resp
;
393 req
.payload
.hws_set_scheduling_log
.engine_idx
= engine_idx
;
394 req
.payload
.hws_set_scheduling_log
.host_ssid
= host_ssid
;
395 req
.payload
.hws_set_scheduling_log
.vpu_log_buffer_va
= vpu_log_buffer_va
;
396 req
.payload
.hws_set_scheduling_log
.notify_index
= 0;
397 req
.payload
.hws_set_scheduling_log
.enable_extra_events
=
398 ivpu_test_mode
& IVPU_TEST_MODE_HWS_EXTRA_EVENTS
;
400 ret
= ivpu_ipc_send_receive(vdev
, &req
, VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP
, &resp
,
401 VPU_IPC_CHAN_ASYNC_CMD
, vdev
->timeout
.jsm
);
403 ivpu_warn_ratelimited(vdev
, "Failed to set scheduling log: %d\n", ret
);
408 int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device
*vdev
)
410 struct vpu_jsm_msg req
= { .type
= VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP
};
411 struct vpu_jsm_msg resp
;
415 req
.payload
.hws_priority_band_setup
.grace_period
[0] = 0;
416 req
.payload
.hws_priority_band_setup
.process_grace_period
[0] = 50000;
417 req
.payload
.hws_priority_band_setup
.process_quantum
[0] = 160000;
419 req
.payload
.hws_priority_band_setup
.grace_period
[1] = 50000;
420 req
.payload
.hws_priority_band_setup
.process_grace_period
[1] = 50000;
421 req
.payload
.hws_priority_band_setup
.process_quantum
[1] = 300000;
423 req
.payload
.hws_priority_band_setup
.grace_period
[2] = 50000;
424 req
.payload
.hws_priority_band_setup
.process_grace_period
[2] = 50000;
425 req
.payload
.hws_priority_band_setup
.process_quantum
[2] = 200000;
427 req
.payload
.hws_priority_band_setup
.grace_period
[3] = 0;
428 req
.payload
.hws_priority_band_setup
.process_grace_period
[3] = 50000;
429 req
.payload
.hws_priority_band_setup
.process_quantum
[3] = 200000;
431 req
.payload
.hws_priority_band_setup
.normal_band_percentage
= 10;
433 ret
= ivpu_ipc_send_receive_active(vdev
, &req
, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP
,
434 &resp
, VPU_IPC_CHAN_ASYNC_CMD
, vdev
->timeout
.jsm
);
436 ivpu_warn_ratelimited(vdev
, "Failed to set priority bands: %d\n", ret
);
441 int ivpu_jsm_metric_streamer_start(struct ivpu_device
*vdev
, u64 metric_group_mask
,
442 u64 sampling_rate
, u64 buffer_addr
, u64 buffer_size
)
444 struct vpu_jsm_msg req
= { .type
= VPU_JSM_MSG_METRIC_STREAMER_START
};
445 struct vpu_jsm_msg resp
;
448 req
.payload
.metric_streamer_start
.metric_group_mask
= metric_group_mask
;
449 req
.payload
.metric_streamer_start
.sampling_rate
= sampling_rate
;
450 req
.payload
.metric_streamer_start
.buffer_addr
= buffer_addr
;
451 req
.payload
.metric_streamer_start
.buffer_size
= buffer_size
;
453 ret
= ivpu_ipc_send_receive(vdev
, &req
, VPU_JSM_MSG_METRIC_STREAMER_START_DONE
, &resp
,
454 VPU_IPC_CHAN_ASYNC_CMD
, vdev
->timeout
.jsm
);
456 ivpu_warn_ratelimited(vdev
, "Failed to start metric streamer: ret %d\n", ret
);
463 int ivpu_jsm_metric_streamer_stop(struct ivpu_device
*vdev
, u64 metric_group_mask
)
465 struct vpu_jsm_msg req
= { .type
= VPU_JSM_MSG_METRIC_STREAMER_STOP
};
466 struct vpu_jsm_msg resp
;
469 req
.payload
.metric_streamer_stop
.metric_group_mask
= metric_group_mask
;
471 ret
= ivpu_ipc_send_receive(vdev
, &req
, VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE
, &resp
,
472 VPU_IPC_CHAN_ASYNC_CMD
, vdev
->timeout
.jsm
);
474 ivpu_warn_ratelimited(vdev
, "Failed to stop metric streamer: ret %d\n", ret
);
479 int ivpu_jsm_metric_streamer_update(struct ivpu_device
*vdev
, u64 metric_group_mask
,
480 u64 buffer_addr
, u64 buffer_size
, u64
*bytes_written
)
482 struct vpu_jsm_msg req
= { .type
= VPU_JSM_MSG_METRIC_STREAMER_UPDATE
};
483 struct vpu_jsm_msg resp
;
486 req
.payload
.metric_streamer_update
.metric_group_mask
= metric_group_mask
;
487 req
.payload
.metric_streamer_update
.buffer_addr
= buffer_addr
;
488 req
.payload
.metric_streamer_update
.buffer_size
= buffer_size
;
490 ret
= ivpu_ipc_send_receive(vdev
, &req
, VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE
, &resp
,
491 VPU_IPC_CHAN_ASYNC_CMD
, vdev
->timeout
.jsm
);
493 ivpu_warn_ratelimited(vdev
, "Failed to update metric streamer: ret %d\n", ret
);
497 if (buffer_size
&& resp
.payload
.metric_streamer_done
.bytes_written
> buffer_size
) {
498 ivpu_warn_ratelimited(vdev
, "MS buffer overflow: bytes_written %#llx > buffer_size %#llx\n",
499 resp
.payload
.metric_streamer_done
.bytes_written
, buffer_size
);
503 *bytes_written
= resp
.payload
.metric_streamer_done
.bytes_written
;
508 int ivpu_jsm_metric_streamer_info(struct ivpu_device
*vdev
, u64 metric_group_mask
, u64 buffer_addr
,
509 u64 buffer_size
, u32
*sample_size
, u64
*info_size
)
511 struct vpu_jsm_msg req
= { .type
= VPU_JSM_MSG_METRIC_STREAMER_INFO
};
512 struct vpu_jsm_msg resp
;
515 req
.payload
.metric_streamer_start
.metric_group_mask
= metric_group_mask
;
516 req
.payload
.metric_streamer_start
.buffer_addr
= buffer_addr
;
517 req
.payload
.metric_streamer_start
.buffer_size
= buffer_size
;
519 ret
= ivpu_ipc_send_receive(vdev
, &req
, VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE
, &resp
,
520 VPU_IPC_CHAN_ASYNC_CMD
, vdev
->timeout
.jsm
);
522 ivpu_warn_ratelimited(vdev
, "Failed to get metric streamer info: ret %d\n", ret
);
526 if (!resp
.payload
.metric_streamer_done
.sample_size
) {
527 ivpu_warn_ratelimited(vdev
, "Invalid sample size\n");
532 *sample_size
= resp
.payload
.metric_streamer_done
.sample_size
;
534 *info_size
= resp
.payload
.metric_streamer_done
.bytes_written
;
539 int ivpu_jsm_dct_enable(struct ivpu_device
*vdev
, u32 active_us
, u32 inactive_us
)
541 struct vpu_jsm_msg req
= { .type
= VPU_JSM_MSG_DCT_ENABLE
};
542 struct vpu_jsm_msg resp
;
544 req
.payload
.pwr_dct_control
.dct_active_us
= active_us
;
545 req
.payload
.pwr_dct_control
.dct_inactive_us
= inactive_us
;
547 return ivpu_ipc_send_receive_active(vdev
, &req
, VPU_JSM_MSG_DCT_ENABLE_DONE
,
548 &resp
, VPU_IPC_CHAN_ASYNC_CMD
,
552 int ivpu_jsm_dct_disable(struct ivpu_device
*vdev
)
554 struct vpu_jsm_msg req
= { .type
= VPU_JSM_MSG_DCT_DISABLE
};
555 struct vpu_jsm_msg resp
;
557 return ivpu_ipc_send_receive_active(vdev
, &req
, VPU_JSM_MSG_DCT_DISABLE_DONE
,
558 &resp
, VPU_IPC_CHAN_ASYNC_CMD
,