1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2024 Intel Corporation
6 #include <linux/genalloc.h>
7 #include <linux/highmem.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/wait.h>
14 #include "ivpu_hw_reg_io.h"
16 #include "ivpu_jsm_msg.h"
18 #include "ivpu_trace.h"
20 #define IPC_MAX_RX_MSG 128
22 struct ivpu_ipc_tx_buf
{
23 struct ivpu_ipc_hdr ipc
;
24 struct vpu_jsm_msg jsm
;
27 static void ivpu_ipc_msg_dump(struct ivpu_device
*vdev
, char *c
,
28 struct ivpu_ipc_hdr
*ipc_hdr
, u32 vpu_addr
)
31 "%s: vpu:0x%x (data_addr:0x%08x, data_size:0x%x, channel:0x%x, src_node:0x%x, dst_node:0x%x, status:0x%x)",
32 c
, vpu_addr
, ipc_hdr
->data_addr
, ipc_hdr
->data_size
, ipc_hdr
->channel
,
33 ipc_hdr
->src_node
, ipc_hdr
->dst_node
, ipc_hdr
->status
);
36 static void ivpu_jsm_msg_dump(struct ivpu_device
*vdev
, char *c
,
37 struct vpu_jsm_msg
*jsm_msg
, u32 vpu_addr
)
39 u32
*payload
= (u32
*)&jsm_msg
->payload
;
42 "%s: vpu:0x%08x (type:%s, status:0x%x, id: 0x%x, result: 0x%x, payload:0x%x 0x%x 0x%x 0x%x 0x%x)\n",
43 c
, vpu_addr
, ivpu_jsm_msg_type_to_str(jsm_msg
->type
),
44 jsm_msg
->status
, jsm_msg
->request_id
, jsm_msg
->result
,
45 payload
[0], payload
[1], payload
[2], payload
[3], payload
[4]);
49 ivpu_ipc_rx_mark_free(struct ivpu_device
*vdev
, struct ivpu_ipc_hdr
*ipc_hdr
,
50 struct vpu_jsm_msg
*jsm_msg
)
52 ipc_hdr
->status
= IVPU_IPC_HDR_FREE
;
54 jsm_msg
->status
= VPU_JSM_MSG_FREE
;
55 wmb(); /* Flush WC buffers for message statuses */
58 static void ivpu_ipc_mem_fini(struct ivpu_device
*vdev
)
60 struct ivpu_ipc_info
*ipc
= vdev
->ipc
;
62 ivpu_bo_free(ipc
->mem_rx
);
63 ivpu_bo_free(ipc
->mem_tx
);
67 ivpu_ipc_tx_prepare(struct ivpu_device
*vdev
, struct ivpu_ipc_consumer
*cons
,
68 struct vpu_jsm_msg
*req
)
70 struct ivpu_ipc_info
*ipc
= vdev
->ipc
;
71 struct ivpu_ipc_tx_buf
*tx_buf
;
75 tx_buf_vpu_addr
= gen_pool_alloc(ipc
->mm_tx
, sizeof(*tx_buf
));
76 if (!tx_buf_vpu_addr
) {
77 ivpu_err_ratelimited(vdev
, "Failed to reserve IPC buffer, size %ld\n",
82 tx_buf
= ivpu_to_cpu_addr(ipc
->mem_tx
, tx_buf_vpu_addr
);
83 if (drm_WARN_ON(&vdev
->drm
, !tx_buf
)) {
84 gen_pool_free(ipc
->mm_tx
, tx_buf_vpu_addr
, sizeof(*tx_buf
));
88 jsm_vpu_addr
= tx_buf_vpu_addr
+ offsetof(struct ivpu_ipc_tx_buf
, jsm
);
90 if (tx_buf
->ipc
.status
!= IVPU_IPC_HDR_FREE
)
91 ivpu_warn_ratelimited(vdev
, "IPC message vpu:0x%x not released by firmware\n",
94 if (tx_buf
->jsm
.status
!= VPU_JSM_MSG_FREE
)
95 ivpu_warn_ratelimited(vdev
, "JSM message vpu:0x%x not released by firmware\n",
98 memset(tx_buf
, 0, sizeof(*tx_buf
));
99 tx_buf
->ipc
.data_addr
= jsm_vpu_addr
;
100 /* TODO: Set data_size to actual JSM message size, not union of all messages */
101 tx_buf
->ipc
.data_size
= sizeof(*req
);
102 tx_buf
->ipc
.channel
= cons
->channel
;
103 tx_buf
->ipc
.src_node
= 0;
104 tx_buf
->ipc
.dst_node
= 1;
105 tx_buf
->ipc
.status
= IVPU_IPC_HDR_ALLOCATED
;
106 tx_buf
->jsm
.type
= req
->type
;
107 tx_buf
->jsm
.status
= VPU_JSM_MSG_ALLOCATED
;
108 tx_buf
->jsm
.payload
= req
->payload
;
110 req
->request_id
= atomic_inc_return(&ipc
->request_id
);
111 tx_buf
->jsm
.request_id
= req
->request_id
;
112 cons
->request_id
= req
->request_id
;
113 wmb(); /* Flush WC buffers for IPC, JSM msgs */
115 cons
->tx_vpu_addr
= tx_buf_vpu_addr
;
117 ivpu_jsm_msg_dump(vdev
, "TX", &tx_buf
->jsm
, jsm_vpu_addr
);
118 ivpu_ipc_msg_dump(vdev
, "TX", &tx_buf
->ipc
, tx_buf_vpu_addr
);
123 static void ivpu_ipc_tx_release(struct ivpu_device
*vdev
, u32 vpu_addr
)
125 struct ivpu_ipc_info
*ipc
= vdev
->ipc
;
128 gen_pool_free(ipc
->mm_tx
, vpu_addr
, sizeof(struct ivpu_ipc_tx_buf
));
131 static void ivpu_ipc_tx(struct ivpu_device
*vdev
, u32 vpu_addr
)
133 ivpu_hw_ipc_tx_set(vdev
, vpu_addr
);
137 ivpu_ipc_rx_msg_add(struct ivpu_device
*vdev
, struct ivpu_ipc_consumer
*cons
,
138 struct ivpu_ipc_hdr
*ipc_hdr
, struct vpu_jsm_msg
*jsm_msg
)
140 struct ivpu_ipc_info
*ipc
= vdev
->ipc
;
141 struct ivpu_ipc_rx_msg
*rx_msg
;
143 lockdep_assert_held(&ipc
->cons_lock
);
144 lockdep_assert_irqs_disabled();
146 rx_msg
= kzalloc(sizeof(*rx_msg
), GFP_ATOMIC
);
148 ivpu_ipc_rx_mark_free(vdev
, ipc_hdr
, jsm_msg
);
152 atomic_inc(&ipc
->rx_msg_count
);
154 rx_msg
->ipc_hdr
= ipc_hdr
;
155 rx_msg
->jsm_msg
= jsm_msg
;
156 rx_msg
->callback
= cons
->rx_callback
;
158 if (rx_msg
->callback
) {
159 list_add_tail(&rx_msg
->link
, &ipc
->cb_msg_list
);
161 spin_lock(&cons
->rx_lock
);
162 list_add_tail(&rx_msg
->link
, &cons
->rx_msg_list
);
163 spin_unlock(&cons
->rx_lock
);
164 wake_up(&cons
->rx_msg_wq
);
169 ivpu_ipc_rx_msg_del(struct ivpu_device
*vdev
, struct ivpu_ipc_rx_msg
*rx_msg
)
171 list_del(&rx_msg
->link
);
172 ivpu_ipc_rx_mark_free(vdev
, rx_msg
->ipc_hdr
, rx_msg
->jsm_msg
);
173 atomic_dec(&vdev
->ipc
->rx_msg_count
);
177 void ivpu_ipc_consumer_add(struct ivpu_device
*vdev
, struct ivpu_ipc_consumer
*cons
,
178 u32 channel
, ivpu_ipc_rx_callback_t rx_callback
)
180 struct ivpu_ipc_info
*ipc
= vdev
->ipc
;
182 INIT_LIST_HEAD(&cons
->link
);
183 cons
->channel
= channel
;
184 cons
->tx_vpu_addr
= 0;
185 cons
->request_id
= 0;
186 cons
->aborted
= false;
187 cons
->rx_callback
= rx_callback
;
188 spin_lock_init(&cons
->rx_lock
);
189 INIT_LIST_HEAD(&cons
->rx_msg_list
);
190 init_waitqueue_head(&cons
->rx_msg_wq
);
192 spin_lock_irq(&ipc
->cons_lock
);
193 list_add_tail(&cons
->link
, &ipc
->cons_list
);
194 spin_unlock_irq(&ipc
->cons_lock
);
197 void ivpu_ipc_consumer_del(struct ivpu_device
*vdev
, struct ivpu_ipc_consumer
*cons
)
199 struct ivpu_ipc_info
*ipc
= vdev
->ipc
;
200 struct ivpu_ipc_rx_msg
*rx_msg
, *r
;
202 spin_lock_irq(&ipc
->cons_lock
);
203 list_del(&cons
->link
);
204 spin_unlock_irq(&ipc
->cons_lock
);
206 spin_lock_irq(&cons
->rx_lock
);
207 list_for_each_entry_safe(rx_msg
, r
, &cons
->rx_msg_list
, link
)
208 ivpu_ipc_rx_msg_del(vdev
, rx_msg
);
209 spin_unlock_irq(&cons
->rx_lock
);
211 ivpu_ipc_tx_release(vdev
, cons
->tx_vpu_addr
);
214 int ivpu_ipc_send(struct ivpu_device
*vdev
, struct ivpu_ipc_consumer
*cons
, struct vpu_jsm_msg
*req
)
216 struct ivpu_ipc_info
*ipc
= vdev
->ipc
;
219 mutex_lock(&ipc
->lock
);
226 ret
= ivpu_ipc_tx_prepare(vdev
, cons
, req
);
230 ivpu_ipc_tx(vdev
, cons
->tx_vpu_addr
);
231 trace_jsm("[tx]", req
);
234 mutex_unlock(&ipc
->lock
);
238 static bool ivpu_ipc_rx_need_wakeup(struct ivpu_ipc_consumer
*cons
)
242 spin_lock_irq(&cons
->rx_lock
);
243 ret
= !list_empty(&cons
->rx_msg_list
) || cons
->aborted
;
244 spin_unlock_irq(&cons
->rx_lock
);
249 int ivpu_ipc_receive(struct ivpu_device
*vdev
, struct ivpu_ipc_consumer
*cons
,
250 struct ivpu_ipc_hdr
*ipc_buf
,
251 struct vpu_jsm_msg
*jsm_msg
, unsigned long timeout_ms
)
253 struct ivpu_ipc_rx_msg
*rx_msg
;
254 int wait_ret
, ret
= 0;
256 if (drm_WARN_ONCE(&vdev
->drm
, cons
->rx_callback
, "Consumer works only in async mode\n"))
259 wait_ret
= wait_event_timeout(cons
->rx_msg_wq
,
260 ivpu_ipc_rx_need_wakeup(cons
),
261 msecs_to_jiffies(timeout_ms
));
266 spin_lock_irq(&cons
->rx_lock
);
268 spin_unlock_irq(&cons
->rx_lock
);
271 rx_msg
= list_first_entry_or_null(&cons
->rx_msg_list
, struct ivpu_ipc_rx_msg
, link
);
273 spin_unlock_irq(&cons
->rx_lock
);
278 memcpy(ipc_buf
, rx_msg
->ipc_hdr
, sizeof(*ipc_buf
));
279 if (rx_msg
->jsm_msg
) {
280 u32 size
= min_t(int, rx_msg
->ipc_hdr
->data_size
, sizeof(*jsm_msg
));
282 if (rx_msg
->jsm_msg
->result
!= VPU_JSM_STATUS_SUCCESS
) {
283 ivpu_err(vdev
, "IPC resp result error: %d\n", rx_msg
->jsm_msg
->result
);
288 memcpy(jsm_msg
, rx_msg
->jsm_msg
, size
);
289 trace_jsm("[rx]", rx_msg
->jsm_msg
);
292 ivpu_ipc_rx_msg_del(vdev
, rx_msg
);
293 spin_unlock_irq(&cons
->rx_lock
);
298 ivpu_ipc_send_receive_internal(struct ivpu_device
*vdev
, struct vpu_jsm_msg
*req
,
299 enum vpu_ipc_msg_type expected_resp_type
,
300 struct vpu_jsm_msg
*resp
, u32 channel
, unsigned long timeout_ms
)
302 struct ivpu_ipc_consumer cons
;
305 drm_WARN_ON(&vdev
->drm
, pm_runtime_status_suspended(vdev
->drm
.dev
));
307 ivpu_ipc_consumer_add(vdev
, &cons
, channel
, NULL
);
309 ret
= ivpu_ipc_send(vdev
, &cons
, req
);
311 ivpu_warn_ratelimited(vdev
, "IPC send failed: %d\n", ret
);
315 ret
= ivpu_ipc_receive(vdev
, &cons
, NULL
, resp
, timeout_ms
);
317 ivpu_warn_ratelimited(vdev
, "IPC receive failed: type %s, ret %d\n",
318 ivpu_jsm_msg_type_to_str(req
->type
), ret
);
322 if (resp
->type
!= expected_resp_type
) {
323 ivpu_warn_ratelimited(vdev
, "Invalid JSM response type: 0x%x\n", resp
->type
);
328 ivpu_ipc_consumer_del(vdev
, &cons
);
332 int ivpu_ipc_send_receive(struct ivpu_device
*vdev
, struct vpu_jsm_msg
*req
,
333 enum vpu_ipc_msg_type expected_resp
, struct vpu_jsm_msg
*resp
,
334 u32 channel
, unsigned long timeout_ms
)
336 struct vpu_jsm_msg hb_req
= { .type
= VPU_JSM_MSG_QUERY_ENGINE_HB
};
337 struct vpu_jsm_msg hb_resp
;
340 ret
= ivpu_rpm_get(vdev
);
344 ret
= ivpu_ipc_send_receive_internal(vdev
, req
, expected_resp
, resp
, channel
, timeout_ms
);
345 if (ret
!= -ETIMEDOUT
)
348 hb_ret
= ivpu_ipc_send_receive_internal(vdev
, &hb_req
, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE
,
349 &hb_resp
, VPU_IPC_CHAN_ASYNC_CMD
,
351 if (hb_ret
== -ETIMEDOUT
)
352 ivpu_pm_trigger_recovery(vdev
, "IPC timeout");
359 int ivpu_ipc_send_and_wait(struct ivpu_device
*vdev
, struct vpu_jsm_msg
*req
,
360 u32 channel
, unsigned long timeout_ms
)
362 struct ivpu_ipc_consumer cons
;
365 ret
= ivpu_rpm_get(vdev
);
369 ivpu_ipc_consumer_add(vdev
, &cons
, channel
, NULL
);
371 ret
= ivpu_ipc_send(vdev
, &cons
, req
);
373 ivpu_warn_ratelimited(vdev
, "IPC send failed: %d\n", ret
);
380 ivpu_ipc_consumer_del(vdev
, &cons
);
386 ivpu_ipc_match_consumer(struct ivpu_device
*vdev
, struct ivpu_ipc_consumer
*cons
,
387 struct ivpu_ipc_hdr
*ipc_hdr
, struct vpu_jsm_msg
*jsm_msg
)
389 if (cons
->channel
!= ipc_hdr
->channel
)
392 if (!jsm_msg
|| jsm_msg
->request_id
== cons
->request_id
)
398 void ivpu_ipc_irq_handler(struct ivpu_device
*vdev
)
400 struct ivpu_ipc_info
*ipc
= vdev
->ipc
;
401 struct ivpu_ipc_consumer
*cons
;
402 struct ivpu_ipc_hdr
*ipc_hdr
;
403 struct vpu_jsm_msg
*jsm_msg
;
409 * Driver needs to purge all messages from IPC FIFO to clear IPC interrupt.
410 * Without purge IPC FIFO to 0 next IPC interrupts won't be generated.
412 while (ivpu_hw_ipc_rx_count_get(vdev
)) {
413 vpu_addr
= ivpu_hw_ipc_rx_addr_get(vdev
);
414 if (vpu_addr
== REG_IO_ERROR
) {
415 ivpu_err_ratelimited(vdev
, "Failed to read IPC rx addr register\n");
419 ipc_hdr
= ivpu_to_cpu_addr(ipc
->mem_rx
, vpu_addr
);
421 ivpu_warn_ratelimited(vdev
, "IPC msg 0x%x out of range\n", vpu_addr
);
424 ivpu_ipc_msg_dump(vdev
, "RX", ipc_hdr
, vpu_addr
);
427 if (ipc_hdr
->channel
!= IVPU_IPC_CHAN_BOOT_MSG
) {
428 jsm_msg
= ivpu_to_cpu_addr(ipc
->mem_rx
, ipc_hdr
->data_addr
);
430 ivpu_warn_ratelimited(vdev
, "JSM msg 0x%x out of range\n",
432 ivpu_ipc_rx_mark_free(vdev
, ipc_hdr
, NULL
);
435 ivpu_jsm_msg_dump(vdev
, "RX", jsm_msg
, ipc_hdr
->data_addr
);
438 if (atomic_read(&ipc
->rx_msg_count
) > IPC_MAX_RX_MSG
) {
439 ivpu_warn_ratelimited(vdev
, "IPC RX msg dropped, msg count %d\n",
441 ivpu_ipc_rx_mark_free(vdev
, ipc_hdr
, jsm_msg
);
446 spin_lock_irqsave(&ipc
->cons_lock
, flags
);
447 list_for_each_entry(cons
, &ipc
->cons_list
, link
) {
448 if (ivpu_ipc_match_consumer(vdev
, cons
, ipc_hdr
, jsm_msg
)) {
449 ivpu_ipc_rx_msg_add(vdev
, cons
, ipc_hdr
, jsm_msg
);
454 spin_unlock_irqrestore(&ipc
->cons_lock
, flags
);
457 ivpu_dbg(vdev
, IPC
, "IPC RX msg 0x%x dropped (no consumer)\n", vpu_addr
);
458 ivpu_ipc_rx_mark_free(vdev
, ipc_hdr
, jsm_msg
);
462 queue_work(system_wq
, &vdev
->irq_ipc_work
);
465 void ivpu_ipc_irq_work_fn(struct work_struct
*work
)
467 struct ivpu_device
*vdev
= container_of(work
, struct ivpu_device
, irq_ipc_work
);
468 struct ivpu_ipc_info
*ipc
= vdev
->ipc
;
469 struct ivpu_ipc_rx_msg
*rx_msg
, *r
;
470 struct list_head cb_msg_list
;
472 INIT_LIST_HEAD(&cb_msg_list
);
474 spin_lock_irq(&ipc
->cons_lock
);
475 list_splice_tail_init(&ipc
->cb_msg_list
, &cb_msg_list
);
476 spin_unlock_irq(&ipc
->cons_lock
);
478 list_for_each_entry_safe(rx_msg
, r
, &cb_msg_list
, link
) {
479 rx_msg
->callback(vdev
, rx_msg
->ipc_hdr
, rx_msg
->jsm_msg
);
480 ivpu_ipc_rx_msg_del(vdev
, rx_msg
);
484 int ivpu_ipc_init(struct ivpu_device
*vdev
)
486 struct ivpu_ipc_info
*ipc
= vdev
->ipc
;
489 ipc
->mem_tx
= ivpu_bo_create_global(vdev
, SZ_16K
, DRM_IVPU_BO_WC
| DRM_IVPU_BO_MAPPABLE
);
491 ivpu_err(vdev
, "Failed to allocate mem_tx\n");
495 ipc
->mem_rx
= ivpu_bo_create_global(vdev
, SZ_16K
, DRM_IVPU_BO_WC
| DRM_IVPU_BO_MAPPABLE
);
497 ivpu_err(vdev
, "Failed to allocate mem_rx\n");
502 ipc
->mm_tx
= devm_gen_pool_create(vdev
->drm
.dev
, __ffs(IVPU_IPC_ALIGNMENT
),
504 if (IS_ERR(ipc
->mm_tx
)) {
505 ret
= PTR_ERR(ipc
->mm_tx
);
506 ivpu_err(vdev
, "Failed to create gen pool, %pe\n", ipc
->mm_tx
);
510 ret
= gen_pool_add(ipc
->mm_tx
, ipc
->mem_tx
->vpu_addr
, ivpu_bo_size(ipc
->mem_tx
), -1);
512 ivpu_err(vdev
, "gen_pool_add failed, ret %d\n", ret
);
516 spin_lock_init(&ipc
->cons_lock
);
517 INIT_LIST_HEAD(&ipc
->cons_list
);
518 INIT_LIST_HEAD(&ipc
->cb_msg_list
);
519 ret
= drmm_mutex_init(&vdev
->drm
, &ipc
->lock
);
521 ivpu_err(vdev
, "Failed to initialize ipc->lock, ret %d\n", ret
);
524 ivpu_ipc_reset(vdev
);
528 ivpu_bo_free(ipc
->mem_rx
);
530 ivpu_bo_free(ipc
->mem_tx
);
534 void ivpu_ipc_fini(struct ivpu_device
*vdev
)
536 struct ivpu_ipc_info
*ipc
= vdev
->ipc
;
538 drm_WARN_ON(&vdev
->drm
, !list_empty(&ipc
->cons_list
));
539 drm_WARN_ON(&vdev
->drm
, !list_empty(&ipc
->cb_msg_list
));
540 drm_WARN_ON(&vdev
->drm
, atomic_read(&ipc
->rx_msg_count
) > 0);
542 ivpu_ipc_mem_fini(vdev
);
545 void ivpu_ipc_enable(struct ivpu_device
*vdev
)
547 struct ivpu_ipc_info
*ipc
= vdev
->ipc
;
549 mutex_lock(&ipc
->lock
);
551 mutex_unlock(&ipc
->lock
);
554 void ivpu_ipc_disable(struct ivpu_device
*vdev
)
556 struct ivpu_ipc_info
*ipc
= vdev
->ipc
;
557 struct ivpu_ipc_consumer
*cons
, *c
;
558 struct ivpu_ipc_rx_msg
*rx_msg
, *r
;
560 drm_WARN_ON(&vdev
->drm
, !list_empty(&ipc
->cb_msg_list
));
562 mutex_lock(&ipc
->lock
);
564 mutex_unlock(&ipc
->lock
);
566 spin_lock_irq(&ipc
->cons_lock
);
567 list_for_each_entry_safe(cons
, c
, &ipc
->cons_list
, link
) {
568 spin_lock(&cons
->rx_lock
);
569 if (!cons
->rx_callback
)
570 cons
->aborted
= true;
571 list_for_each_entry_safe(rx_msg
, r
, &cons
->rx_msg_list
, link
)
572 ivpu_ipc_rx_msg_del(vdev
, rx_msg
);
573 spin_unlock(&cons
->rx_lock
);
574 wake_up(&cons
->rx_msg_wq
);
576 spin_unlock_irq(&ipc
->cons_lock
);
578 drm_WARN_ON(&vdev
->drm
, atomic_read(&ipc
->rx_msg_count
) > 0);
581 void ivpu_ipc_reset(struct ivpu_device
*vdev
)
583 struct ivpu_ipc_info
*ipc
= vdev
->ipc
;
585 mutex_lock(&ipc
->lock
);
586 drm_WARN_ON(&vdev
->drm
, ipc
->on
);
588 memset(ivpu_bo_vaddr(ipc
->mem_tx
), 0, ivpu_bo_size(ipc
->mem_tx
));
589 memset(ivpu_bo_vaddr(ipc
->mem_rx
), 0, ivpu_bo_size(ipc
->mem_rx
));
590 wmb(); /* Flush WC buffers for TX and RX rings */
592 mutex_unlock(&ipc
->lock
);