1 // SPDX-License-Identifier: GPL-2.0
3 // Copyright (c) 2018 MediaTek Inc.
5 #include <linux/completion.h>
6 #include <linux/errno.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/module.h>
9 #include <linux/mailbox_controller.h>
10 #include <linux/soc/mediatek/mtk-cmdq.h>
12 #define CMDQ_WRITE_ENABLE_MASK BIT(0)
13 #define CMDQ_POLL_ENABLE_MASK BIT(0)
14 #define CMDQ_EOC_IRQ_EN BIT(0)
15 #define CMDQ_EOC_CMD ((u64)((CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT)) \
16 << 32 | CMDQ_EOC_IRQ_EN)
18 struct cmdq_instruction
{
31 int cmdq_dev_get_client_reg(struct device
*dev
,
32 struct cmdq_client_reg
*client_reg
, int idx
)
34 struct of_phandle_args spec
;
40 err
= of_parse_phandle_with_fixed_args(dev
->of_node
,
41 "mediatek,gce-client-reg",
45 "error %d can't parse gce-client-reg property (%d)",
51 client_reg
->subsys
= (u8
)spec
.args
[0];
52 client_reg
->offset
= (u16
)spec
.args
[1];
53 client_reg
->size
= (u16
)spec
.args
[2];
58 EXPORT_SYMBOL(cmdq_dev_get_client_reg
);
60 static void cmdq_client_timeout(struct timer_list
*t
)
62 struct cmdq_client
*client
= from_timer(client
, t
, timer
);
64 dev_err(client
->client
.dev
, "cmdq timeout!\n");
67 struct cmdq_client
*cmdq_mbox_create(struct device
*dev
, int index
, u32 timeout
)
69 struct cmdq_client
*client
;
71 client
= kzalloc(sizeof(*client
), GFP_KERNEL
);
73 return (struct cmdq_client
*)-ENOMEM
;
75 client
->timeout_ms
= timeout
;
76 if (timeout
!= CMDQ_NO_TIMEOUT
) {
77 spin_lock_init(&client
->lock
);
78 timer_setup(&client
->timer
, cmdq_client_timeout
, 0);
81 client
->client
.dev
= dev
;
82 client
->client
.tx_block
= false;
83 client
->chan
= mbox_request_channel(&client
->client
, index
);
85 if (IS_ERR(client
->chan
)) {
88 dev_err(dev
, "failed to request channel\n");
89 err
= PTR_ERR(client
->chan
);
97 EXPORT_SYMBOL(cmdq_mbox_create
);
99 void cmdq_mbox_destroy(struct cmdq_client
*client
)
101 if (client
->timeout_ms
!= CMDQ_NO_TIMEOUT
) {
102 spin_lock(&client
->lock
);
103 del_timer_sync(&client
->timer
);
104 spin_unlock(&client
->lock
);
106 mbox_free_channel(client
->chan
);
109 EXPORT_SYMBOL(cmdq_mbox_destroy
);
111 struct cmdq_pkt
*cmdq_pkt_create(struct cmdq_client
*client
, size_t size
)
113 struct cmdq_pkt
*pkt
;
117 pkt
= kzalloc(sizeof(*pkt
), GFP_KERNEL
);
119 return ERR_PTR(-ENOMEM
);
120 pkt
->va_base
= kzalloc(size
, GFP_KERNEL
);
123 return ERR_PTR(-ENOMEM
);
125 pkt
->buf_size
= size
;
126 pkt
->cl
= (void *)client
;
128 dev
= client
->chan
->mbox
->dev
;
129 dma_addr
= dma_map_single(dev
, pkt
->va_base
, pkt
->buf_size
,
131 if (dma_mapping_error(dev
, dma_addr
)) {
132 dev_err(dev
, "dma map failed, size=%u\n", (u32
)(u64
)size
);
135 return ERR_PTR(-ENOMEM
);
138 pkt
->pa_base
= dma_addr
;
142 EXPORT_SYMBOL(cmdq_pkt_create
);
144 void cmdq_pkt_destroy(struct cmdq_pkt
*pkt
)
146 struct cmdq_client
*client
= (struct cmdq_client
*)pkt
->cl
;
148 dma_unmap_single(client
->chan
->mbox
->dev
, pkt
->pa_base
, pkt
->buf_size
,
153 EXPORT_SYMBOL(cmdq_pkt_destroy
);
155 static int cmdq_pkt_append_command(struct cmdq_pkt
*pkt
,
156 struct cmdq_instruction inst
)
158 struct cmdq_instruction
*cmd_ptr
;
160 if (unlikely(pkt
->cmd_buf_size
+ CMDQ_INST_SIZE
> pkt
->buf_size
)) {
162 * In the case of allocated buffer size (pkt->buf_size) is used
163 * up, the real required size (pkt->cmdq_buf_size) is still
164 * increased, so that the user knows how much memory should be
165 * ultimately allocated after appending all commands and
166 * flushing the command packet. Therefor, the user can call
167 * cmdq_pkt_create() again with the real required buffer size.
169 pkt
->cmd_buf_size
+= CMDQ_INST_SIZE
;
170 WARN_ONCE(1, "%s: buffer size %u is too small !\n",
171 __func__
, (u32
)pkt
->buf_size
);
175 cmd_ptr
= pkt
->va_base
+ pkt
->cmd_buf_size
;
177 pkt
->cmd_buf_size
+= CMDQ_INST_SIZE
;
182 int cmdq_pkt_write(struct cmdq_pkt
*pkt
, u8 subsys
, u16 offset
, u32 value
)
184 struct cmdq_instruction inst
;
186 inst
.op
= CMDQ_CODE_WRITE
;
188 inst
.offset
= offset
;
189 inst
.subsys
= subsys
;
191 return cmdq_pkt_append_command(pkt
, inst
);
193 EXPORT_SYMBOL(cmdq_pkt_write
);
195 int cmdq_pkt_write_mask(struct cmdq_pkt
*pkt
, u8 subsys
,
196 u16 offset
, u32 value
, u32 mask
)
198 struct cmdq_instruction inst
= { {0} };
199 u16 offset_mask
= offset
;
202 if (mask
!= 0xffffffff) {
203 inst
.op
= CMDQ_CODE_MASK
;
205 err
= cmdq_pkt_append_command(pkt
, inst
);
209 offset_mask
|= CMDQ_WRITE_ENABLE_MASK
;
211 err
= cmdq_pkt_write(pkt
, subsys
, offset_mask
, value
);
215 EXPORT_SYMBOL(cmdq_pkt_write_mask
);
217 int cmdq_pkt_wfe(struct cmdq_pkt
*pkt
, u16 event
)
219 struct cmdq_instruction inst
= { {0} };
221 if (event
>= CMDQ_MAX_EVENT
)
224 inst
.op
= CMDQ_CODE_WFE
;
225 inst
.value
= CMDQ_WFE_OPTION
;
228 return cmdq_pkt_append_command(pkt
, inst
);
230 EXPORT_SYMBOL(cmdq_pkt_wfe
);
232 int cmdq_pkt_clear_event(struct cmdq_pkt
*pkt
, u16 event
)
234 struct cmdq_instruction inst
= { {0} };
236 if (event
>= CMDQ_MAX_EVENT
)
239 inst
.op
= CMDQ_CODE_WFE
;
240 inst
.value
= CMDQ_WFE_UPDATE
;
243 return cmdq_pkt_append_command(pkt
, inst
);
245 EXPORT_SYMBOL(cmdq_pkt_clear_event
);
247 int cmdq_pkt_poll(struct cmdq_pkt
*pkt
, u8 subsys
,
248 u16 offset
, u32 value
)
250 struct cmdq_instruction inst
= { {0} };
253 inst
.op
= CMDQ_CODE_POLL
;
255 inst
.offset
= offset
;
256 inst
.subsys
= subsys
;
257 err
= cmdq_pkt_append_command(pkt
, inst
);
261 EXPORT_SYMBOL(cmdq_pkt_poll
);
263 int cmdq_pkt_poll_mask(struct cmdq_pkt
*pkt
, u8 subsys
,
264 u16 offset
, u32 value
, u32 mask
)
266 struct cmdq_instruction inst
= { {0} };
269 inst
.op
= CMDQ_CODE_MASK
;
271 err
= cmdq_pkt_append_command(pkt
, inst
);
275 offset
= offset
| CMDQ_POLL_ENABLE_MASK
;
276 err
= cmdq_pkt_poll(pkt
, subsys
, offset
, value
);
280 EXPORT_SYMBOL(cmdq_pkt_poll_mask
);
282 static int cmdq_pkt_finalize(struct cmdq_pkt
*pkt
)
284 struct cmdq_instruction inst
= { {0} };
287 /* insert EOC and generate IRQ for each command iteration */
288 inst
.op
= CMDQ_CODE_EOC
;
289 inst
.value
= CMDQ_EOC_IRQ_EN
;
290 err
= cmdq_pkt_append_command(pkt
, inst
);
295 inst
.op
= CMDQ_CODE_JUMP
;
296 inst
.value
= CMDQ_JUMP_PASS
;
297 err
= cmdq_pkt_append_command(pkt
, inst
);
302 static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data
)
304 struct cmdq_pkt
*pkt
= (struct cmdq_pkt
*)data
.data
;
305 struct cmdq_task_cb
*cb
= &pkt
->cb
;
306 struct cmdq_client
*client
= (struct cmdq_client
*)pkt
->cl
;
308 if (client
->timeout_ms
!= CMDQ_NO_TIMEOUT
) {
309 unsigned long flags
= 0;
311 spin_lock_irqsave(&client
->lock
, flags
);
312 if (--client
->pkt_cnt
== 0)
313 del_timer(&client
->timer
);
315 mod_timer(&client
->timer
, jiffies
+
316 msecs_to_jiffies(client
->timeout_ms
));
317 spin_unlock_irqrestore(&client
->lock
, flags
);
320 dma_sync_single_for_cpu(client
->chan
->mbox
->dev
, pkt
->pa_base
,
321 pkt
->cmd_buf_size
, DMA_TO_DEVICE
);
323 data
.data
= cb
->data
;
328 int cmdq_pkt_flush_async(struct cmdq_pkt
*pkt
, cmdq_async_flush_cb cb
,
332 unsigned long flags
= 0;
333 struct cmdq_client
*client
= (struct cmdq_client
*)pkt
->cl
;
335 err
= cmdq_pkt_finalize(pkt
);
341 pkt
->async_cb
.cb
= cmdq_pkt_flush_async_cb
;
342 pkt
->async_cb
.data
= pkt
;
344 dma_sync_single_for_device(client
->chan
->mbox
->dev
, pkt
->pa_base
,
345 pkt
->cmd_buf_size
, DMA_TO_DEVICE
);
347 if (client
->timeout_ms
!= CMDQ_NO_TIMEOUT
) {
348 spin_lock_irqsave(&client
->lock
, flags
);
349 if (client
->pkt_cnt
++ == 0)
350 mod_timer(&client
->timer
, jiffies
+
351 msecs_to_jiffies(client
->timeout_ms
));
352 spin_unlock_irqrestore(&client
->lock
, flags
);
355 mbox_send_message(client
->chan
, pkt
);
356 /* We can send next packet immediately, so just call txdone. */
357 mbox_client_txdone(client
->chan
, 0);
361 EXPORT_SYMBOL(cmdq_pkt_flush_async
);
363 struct cmdq_flush_completion
{
364 struct completion cmplt
;
368 static void cmdq_pkt_flush_cb(struct cmdq_cb_data data
)
370 struct cmdq_flush_completion
*cmplt
;
372 cmplt
= (struct cmdq_flush_completion
*)data
.data
;
373 if (data
.sta
!= CMDQ_CB_NORMAL
)
377 complete(&cmplt
->cmplt
);
380 int cmdq_pkt_flush(struct cmdq_pkt
*pkt
)
382 struct cmdq_flush_completion cmplt
;
385 init_completion(&cmplt
.cmplt
);
386 err
= cmdq_pkt_flush_async(pkt
, cmdq_pkt_flush_cb
, &cmplt
);
389 wait_for_completion(&cmplt
.cmplt
);
391 return cmplt
.err
? -EFAULT
: 0;
393 EXPORT_SYMBOL(cmdq_pkt_flush
);
395 MODULE_LICENSE("GPL v2");