1 // SPDX-License-Identifier: GPL-2.0
3 // Copyright (c) 2018 MediaTek Inc.
5 #include <linux/completion.h>
6 #include <linux/errno.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/module.h>
9 #include <linux/mailbox_controller.h>
10 #include <linux/soc/mediatek/mtk-cmdq.h>
12 #define CMDQ_ARG_A_WRITE_MASK 0xffff
13 #define CMDQ_WRITE_ENABLE_MASK BIT(0)
14 #define CMDQ_EOC_IRQ_EN BIT(0)
15 #define CMDQ_EOC_CMD ((u64)((CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT)) \
16 << 32 | CMDQ_EOC_IRQ_EN)
18 static void cmdq_client_timeout(struct timer_list
*t
)
20 struct cmdq_client
*client
= from_timer(client
, t
, timer
);
22 dev_err(client
->client
.dev
, "cmdq timeout!\n");
25 struct cmdq_client
*cmdq_mbox_create(struct device
*dev
, int index
, u32 timeout
)
27 struct cmdq_client
*client
;
29 client
= kzalloc(sizeof(*client
), GFP_KERNEL
);
31 return (struct cmdq_client
*)-ENOMEM
;
33 client
->timeout_ms
= timeout
;
34 if (timeout
!= CMDQ_NO_TIMEOUT
) {
35 spin_lock_init(&client
->lock
);
36 timer_setup(&client
->timer
, cmdq_client_timeout
, 0);
39 client
->client
.dev
= dev
;
40 client
->client
.tx_block
= false;
41 client
->chan
= mbox_request_channel(&client
->client
, index
);
43 if (IS_ERR(client
->chan
)) {
46 dev_err(dev
, "failed to request channel\n");
47 err
= PTR_ERR(client
->chan
);
55 EXPORT_SYMBOL(cmdq_mbox_create
);
57 void cmdq_mbox_destroy(struct cmdq_client
*client
)
59 if (client
->timeout_ms
!= CMDQ_NO_TIMEOUT
) {
60 spin_lock(&client
->lock
);
61 del_timer_sync(&client
->timer
);
62 spin_unlock(&client
->lock
);
64 mbox_free_channel(client
->chan
);
67 EXPORT_SYMBOL(cmdq_mbox_destroy
);
69 struct cmdq_pkt
*cmdq_pkt_create(struct cmdq_client
*client
, size_t size
)
75 pkt
= kzalloc(sizeof(*pkt
), GFP_KERNEL
);
77 return ERR_PTR(-ENOMEM
);
78 pkt
->va_base
= kzalloc(size
, GFP_KERNEL
);
81 return ERR_PTR(-ENOMEM
);
84 pkt
->cl
= (void *)client
;
86 dev
= client
->chan
->mbox
->dev
;
87 dma_addr
= dma_map_single(dev
, pkt
->va_base
, pkt
->buf_size
,
89 if (dma_mapping_error(dev
, dma_addr
)) {
90 dev_err(dev
, "dma map failed, size=%u\n", (u32
)(u64
)size
);
93 return ERR_PTR(-ENOMEM
);
96 pkt
->pa_base
= dma_addr
;
100 EXPORT_SYMBOL(cmdq_pkt_create
);
102 void cmdq_pkt_destroy(struct cmdq_pkt
*pkt
)
104 struct cmdq_client
*client
= (struct cmdq_client
*)pkt
->cl
;
106 dma_unmap_single(client
->chan
->mbox
->dev
, pkt
->pa_base
, pkt
->buf_size
,
111 EXPORT_SYMBOL(cmdq_pkt_destroy
);
113 static int cmdq_pkt_append_command(struct cmdq_pkt
*pkt
, enum cmdq_code code
,
114 u32 arg_a
, u32 arg_b
)
118 if (unlikely(pkt
->cmd_buf_size
+ CMDQ_INST_SIZE
> pkt
->buf_size
)) {
120 * In the case of allocated buffer size (pkt->buf_size) is used
121 * up, the real required size (pkt->cmdq_buf_size) is still
122 * increased, so that the user knows how much memory should be
123 * ultimately allocated after appending all commands and
124 * flushing the command packet. Therefor, the user can call
125 * cmdq_pkt_create() again with the real required buffer size.
127 pkt
->cmd_buf_size
+= CMDQ_INST_SIZE
;
128 WARN_ONCE(1, "%s: buffer size %u is too small !\n",
129 __func__
, (u32
)pkt
->buf_size
);
132 cmd_ptr
= pkt
->va_base
+ pkt
->cmd_buf_size
;
133 (*cmd_ptr
) = (u64
)((code
<< CMDQ_OP_CODE_SHIFT
) | arg_a
) << 32 | arg_b
;
134 pkt
->cmd_buf_size
+= CMDQ_INST_SIZE
;
139 int cmdq_pkt_write(struct cmdq_pkt
*pkt
, u32 value
, u32 subsys
, u32 offset
)
141 u32 arg_a
= (offset
& CMDQ_ARG_A_WRITE_MASK
) |
142 (subsys
<< CMDQ_SUBSYS_SHIFT
);
144 return cmdq_pkt_append_command(pkt
, CMDQ_CODE_WRITE
, arg_a
, value
);
146 EXPORT_SYMBOL(cmdq_pkt_write
);
148 int cmdq_pkt_write_mask(struct cmdq_pkt
*pkt
, u32 value
,
149 u32 subsys
, u32 offset
, u32 mask
)
151 u32 offset_mask
= offset
;
154 if (mask
!= 0xffffffff) {
155 err
= cmdq_pkt_append_command(pkt
, CMDQ_CODE_MASK
, 0, ~mask
);
156 offset_mask
|= CMDQ_WRITE_ENABLE_MASK
;
158 err
|= cmdq_pkt_write(pkt
, value
, subsys
, offset_mask
);
162 EXPORT_SYMBOL(cmdq_pkt_write_mask
);
164 int cmdq_pkt_wfe(struct cmdq_pkt
*pkt
, u32 event
)
168 if (event
>= CMDQ_MAX_EVENT
)
173 * bit 0-11: wait value
174 * bit 15: 1 - wait, 0 - no wait
175 * bit 16-27: update value
176 * bit 31: 1 - update, 0 - no update
178 arg_b
= CMDQ_WFE_UPDATE
| CMDQ_WFE_WAIT
| CMDQ_WFE_WAIT_VALUE
;
180 return cmdq_pkt_append_command(pkt
, CMDQ_CODE_WFE
, event
, arg_b
);
182 EXPORT_SYMBOL(cmdq_pkt_wfe
);
184 int cmdq_pkt_clear_event(struct cmdq_pkt
*pkt
, u32 event
)
186 if (event
>= CMDQ_MAX_EVENT
)
189 return cmdq_pkt_append_command(pkt
, CMDQ_CODE_WFE
, event
,
192 EXPORT_SYMBOL(cmdq_pkt_clear_event
);
194 static int cmdq_pkt_finalize(struct cmdq_pkt
*pkt
)
198 /* insert EOC and generate IRQ for each command iteration */
199 err
= cmdq_pkt_append_command(pkt
, CMDQ_CODE_EOC
, 0, CMDQ_EOC_IRQ_EN
);
202 err
|= cmdq_pkt_append_command(pkt
, CMDQ_CODE_JUMP
, 0, CMDQ_JUMP_PASS
);
207 static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data
)
209 struct cmdq_pkt
*pkt
= (struct cmdq_pkt
*)data
.data
;
210 struct cmdq_task_cb
*cb
= &pkt
->cb
;
211 struct cmdq_client
*client
= (struct cmdq_client
*)pkt
->cl
;
213 if (client
->timeout_ms
!= CMDQ_NO_TIMEOUT
) {
214 unsigned long flags
= 0;
216 spin_lock_irqsave(&client
->lock
, flags
);
217 if (--client
->pkt_cnt
== 0)
218 del_timer(&client
->timer
);
220 mod_timer(&client
->timer
, jiffies
+
221 msecs_to_jiffies(client
->timeout_ms
));
222 spin_unlock_irqrestore(&client
->lock
, flags
);
225 dma_sync_single_for_cpu(client
->chan
->mbox
->dev
, pkt
->pa_base
,
226 pkt
->cmd_buf_size
, DMA_TO_DEVICE
);
228 data
.data
= cb
->data
;
233 int cmdq_pkt_flush_async(struct cmdq_pkt
*pkt
, cmdq_async_flush_cb cb
,
237 unsigned long flags
= 0;
238 struct cmdq_client
*client
= (struct cmdq_client
*)pkt
->cl
;
240 err
= cmdq_pkt_finalize(pkt
);
246 pkt
->async_cb
.cb
= cmdq_pkt_flush_async_cb
;
247 pkt
->async_cb
.data
= pkt
;
249 dma_sync_single_for_device(client
->chan
->mbox
->dev
, pkt
->pa_base
,
250 pkt
->cmd_buf_size
, DMA_TO_DEVICE
);
252 if (client
->timeout_ms
!= CMDQ_NO_TIMEOUT
) {
253 spin_lock_irqsave(&client
->lock
, flags
);
254 if (client
->pkt_cnt
++ == 0)
255 mod_timer(&client
->timer
, jiffies
+
256 msecs_to_jiffies(client
->timeout_ms
));
257 spin_unlock_irqrestore(&client
->lock
, flags
);
260 mbox_send_message(client
->chan
, pkt
);
261 /* We can send next packet immediately, so just call txdone. */
262 mbox_client_txdone(client
->chan
, 0);
266 EXPORT_SYMBOL(cmdq_pkt_flush_async
);
268 struct cmdq_flush_completion
{
269 struct completion cmplt
;
273 static void cmdq_pkt_flush_cb(struct cmdq_cb_data data
)
275 struct cmdq_flush_completion
*cmplt
;
277 cmplt
= (struct cmdq_flush_completion
*)data
.data
;
278 if (data
.sta
!= CMDQ_CB_NORMAL
)
282 complete(&cmplt
->cmplt
);
285 int cmdq_pkt_flush(struct cmdq_pkt
*pkt
)
287 struct cmdq_flush_completion cmplt
;
290 init_completion(&cmplt
.cmplt
);
291 err
= cmdq_pkt_flush_async(pkt
, cmdq_pkt_flush_cb
, &cmplt
);
294 wait_for_completion(&cmplt
.cmplt
);
296 return cmplt
.err
? -EFAULT
: 0;
298 EXPORT_SYMBOL(cmdq_pkt_flush
);
300 MODULE_LICENSE("GPL v2");