1 // SPDX-License-Identifier: GPL-2.0
3 // Copyright (c) 2018 MediaTek Inc.
5 #include <linux/completion.h>
6 #include <linux/errno.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/module.h>
9 #include <linux/mailbox_controller.h>
10 #include <linux/soc/mediatek/mtk-cmdq.h>
12 #define CMDQ_WRITE_ENABLE_MASK BIT(0)
13 #define CMDQ_POLL_ENABLE_MASK BIT(0)
14 #define CMDQ_EOC_IRQ_EN BIT(0)
15 #define CMDQ_REG_TYPE 1
16 #define CMDQ_JUMP_RELATIVE 1
18 struct cmdq_instruction
{
44 int cmdq_dev_get_client_reg(struct device
*dev
,
45 struct cmdq_client_reg
*client_reg
, int idx
)
47 struct of_phandle_args spec
;
53 err
= of_parse_phandle_with_fixed_args(dev
->of_node
,
54 "mediatek,gce-client-reg",
58 "error %d can't parse gce-client-reg property (%d)",
64 client_reg
->subsys
= (u8
)spec
.args
[0];
65 client_reg
->offset
= (u16
)spec
.args
[1];
66 client_reg
->size
= (u16
)spec
.args
[2];
71 EXPORT_SYMBOL(cmdq_dev_get_client_reg
);
73 struct cmdq_client
*cmdq_mbox_create(struct device
*dev
, int index
)
75 struct cmdq_client
*client
;
77 client
= kzalloc(sizeof(*client
), GFP_KERNEL
);
79 return (struct cmdq_client
*)-ENOMEM
;
81 client
->client
.dev
= dev
;
82 client
->client
.tx_block
= false;
83 client
->client
.knows_txdone
= true;
84 client
->chan
= mbox_request_channel(&client
->client
, index
);
86 if (IS_ERR(client
->chan
)) {
89 dev_err(dev
, "failed to request channel\n");
90 err
= PTR_ERR(client
->chan
);
98 EXPORT_SYMBOL(cmdq_mbox_create
);
100 void cmdq_mbox_destroy(struct cmdq_client
*client
)
102 mbox_free_channel(client
->chan
);
105 EXPORT_SYMBOL(cmdq_mbox_destroy
);
107 struct cmdq_pkt
*cmdq_pkt_create(struct cmdq_client
*client
, size_t size
)
109 struct cmdq_pkt
*pkt
;
113 pkt
= kzalloc(sizeof(*pkt
), GFP_KERNEL
);
115 return ERR_PTR(-ENOMEM
);
116 pkt
->va_base
= kzalloc(size
, GFP_KERNEL
);
119 return ERR_PTR(-ENOMEM
);
121 pkt
->buf_size
= size
;
122 pkt
->cl
= (void *)client
;
124 dev
= client
->chan
->mbox
->dev
;
125 dma_addr
= dma_map_single(dev
, pkt
->va_base
, pkt
->buf_size
,
127 if (dma_mapping_error(dev
, dma_addr
)) {
128 dev_err(dev
, "dma map failed, size=%u\n", (u32
)(u64
)size
);
131 return ERR_PTR(-ENOMEM
);
134 pkt
->pa_base
= dma_addr
;
138 EXPORT_SYMBOL(cmdq_pkt_create
);
140 void cmdq_pkt_destroy(struct cmdq_pkt
*pkt
)
142 struct cmdq_client
*client
= (struct cmdq_client
*)pkt
->cl
;
144 dma_unmap_single(client
->chan
->mbox
->dev
, pkt
->pa_base
, pkt
->buf_size
,
149 EXPORT_SYMBOL(cmdq_pkt_destroy
);
151 static int cmdq_pkt_append_command(struct cmdq_pkt
*pkt
,
152 struct cmdq_instruction inst
)
154 struct cmdq_instruction
*cmd_ptr
;
156 if (unlikely(pkt
->cmd_buf_size
+ CMDQ_INST_SIZE
> pkt
->buf_size
)) {
158 * In the case of allocated buffer size (pkt->buf_size) is used
159 * up, the real required size (pkt->cmdq_buf_size) is still
160 * increased, so that the user knows how much memory should be
161 * ultimately allocated after appending all commands and
162 * flushing the command packet. Therefor, the user can call
163 * cmdq_pkt_create() again with the real required buffer size.
165 pkt
->cmd_buf_size
+= CMDQ_INST_SIZE
;
166 WARN_ONCE(1, "%s: buffer size %u is too small !\n",
167 __func__
, (u32
)pkt
->buf_size
);
171 cmd_ptr
= pkt
->va_base
+ pkt
->cmd_buf_size
;
173 pkt
->cmd_buf_size
+= CMDQ_INST_SIZE
;
178 int cmdq_pkt_write(struct cmdq_pkt
*pkt
, u8 subsys
, u16 offset
, u32 value
)
180 struct cmdq_instruction inst
;
182 inst
.op
= CMDQ_CODE_WRITE
;
184 inst
.offset
= offset
;
185 inst
.subsys
= subsys
;
187 return cmdq_pkt_append_command(pkt
, inst
);
189 EXPORT_SYMBOL(cmdq_pkt_write
);
191 int cmdq_pkt_write_mask(struct cmdq_pkt
*pkt
, u8 subsys
,
192 u16 offset
, u32 value
, u32 mask
)
194 struct cmdq_instruction inst
= { {0} };
195 u16 offset_mask
= offset
;
198 if (mask
!= 0xffffffff) {
199 inst
.op
= CMDQ_CODE_MASK
;
201 err
= cmdq_pkt_append_command(pkt
, inst
);
205 offset_mask
|= CMDQ_WRITE_ENABLE_MASK
;
207 err
= cmdq_pkt_write(pkt
, subsys
, offset_mask
, value
);
211 EXPORT_SYMBOL(cmdq_pkt_write_mask
);
213 int cmdq_pkt_read_s(struct cmdq_pkt
*pkt
, u16 high_addr_reg_idx
, u16 addr_low
,
216 struct cmdq_instruction inst
= {};
218 inst
.op
= CMDQ_CODE_READ_S
;
219 inst
.dst_t
= CMDQ_REG_TYPE
;
220 inst
.sop
= high_addr_reg_idx
;
221 inst
.reg_dst
= reg_idx
;
222 inst
.src_reg
= addr_low
;
224 return cmdq_pkt_append_command(pkt
, inst
);
226 EXPORT_SYMBOL(cmdq_pkt_read_s
);
228 int cmdq_pkt_write_s(struct cmdq_pkt
*pkt
, u16 high_addr_reg_idx
,
229 u16 addr_low
, u16 src_reg_idx
)
231 struct cmdq_instruction inst
= {};
233 inst
.op
= CMDQ_CODE_WRITE_S
;
234 inst
.src_t
= CMDQ_REG_TYPE
;
235 inst
.sop
= high_addr_reg_idx
;
236 inst
.offset
= addr_low
;
237 inst
.src_reg
= src_reg_idx
;
239 return cmdq_pkt_append_command(pkt
, inst
);
241 EXPORT_SYMBOL(cmdq_pkt_write_s
);
243 int cmdq_pkt_write_s_mask(struct cmdq_pkt
*pkt
, u16 high_addr_reg_idx
,
244 u16 addr_low
, u16 src_reg_idx
, u32 mask
)
246 struct cmdq_instruction inst
= {};
249 inst
.op
= CMDQ_CODE_MASK
;
251 err
= cmdq_pkt_append_command(pkt
, inst
);
256 inst
.op
= CMDQ_CODE_WRITE_S_MASK
;
257 inst
.src_t
= CMDQ_REG_TYPE
;
258 inst
.sop
= high_addr_reg_idx
;
259 inst
.offset
= addr_low
;
260 inst
.src_reg
= src_reg_idx
;
262 return cmdq_pkt_append_command(pkt
, inst
);
264 EXPORT_SYMBOL(cmdq_pkt_write_s_mask
);
266 int cmdq_pkt_write_s_value(struct cmdq_pkt
*pkt
, u8 high_addr_reg_idx
,
267 u16 addr_low
, u32 value
)
269 struct cmdq_instruction inst
= {};
271 inst
.op
= CMDQ_CODE_WRITE_S
;
272 inst
.sop
= high_addr_reg_idx
;
273 inst
.offset
= addr_low
;
276 return cmdq_pkt_append_command(pkt
, inst
);
278 EXPORT_SYMBOL(cmdq_pkt_write_s_value
);
280 int cmdq_pkt_write_s_mask_value(struct cmdq_pkt
*pkt
, u8 high_addr_reg_idx
,
281 u16 addr_low
, u32 value
, u32 mask
)
283 struct cmdq_instruction inst
= {};
286 inst
.op
= CMDQ_CODE_MASK
;
288 err
= cmdq_pkt_append_command(pkt
, inst
);
292 inst
.op
= CMDQ_CODE_WRITE_S_MASK
;
293 inst
.sop
= high_addr_reg_idx
;
294 inst
.offset
= addr_low
;
297 return cmdq_pkt_append_command(pkt
, inst
);
299 EXPORT_SYMBOL(cmdq_pkt_write_s_mask_value
);
301 int cmdq_pkt_wfe(struct cmdq_pkt
*pkt
, u16 event
, bool clear
)
303 struct cmdq_instruction inst
= { {0} };
304 u32 clear_option
= clear
? CMDQ_WFE_UPDATE
: 0;
306 if (event
>= CMDQ_MAX_EVENT
)
309 inst
.op
= CMDQ_CODE_WFE
;
310 inst
.value
= CMDQ_WFE_OPTION
| clear_option
;
313 return cmdq_pkt_append_command(pkt
, inst
);
315 EXPORT_SYMBOL(cmdq_pkt_wfe
);
317 int cmdq_pkt_clear_event(struct cmdq_pkt
*pkt
, u16 event
)
319 struct cmdq_instruction inst
= { {0} };
321 if (event
>= CMDQ_MAX_EVENT
)
324 inst
.op
= CMDQ_CODE_WFE
;
325 inst
.value
= CMDQ_WFE_UPDATE
;
328 return cmdq_pkt_append_command(pkt
, inst
);
330 EXPORT_SYMBOL(cmdq_pkt_clear_event
);
332 int cmdq_pkt_set_event(struct cmdq_pkt
*pkt
, u16 event
)
334 struct cmdq_instruction inst
= {};
336 if (event
>= CMDQ_MAX_EVENT
)
339 inst
.op
= CMDQ_CODE_WFE
;
340 inst
.value
= CMDQ_WFE_UPDATE
| CMDQ_WFE_UPDATE_VALUE
;
343 return cmdq_pkt_append_command(pkt
, inst
);
345 EXPORT_SYMBOL(cmdq_pkt_set_event
);
347 int cmdq_pkt_poll(struct cmdq_pkt
*pkt
, u8 subsys
,
348 u16 offset
, u32 value
)
350 struct cmdq_instruction inst
= { {0} };
353 inst
.op
= CMDQ_CODE_POLL
;
355 inst
.offset
= offset
;
356 inst
.subsys
= subsys
;
357 err
= cmdq_pkt_append_command(pkt
, inst
);
361 EXPORT_SYMBOL(cmdq_pkt_poll
);
363 int cmdq_pkt_poll_mask(struct cmdq_pkt
*pkt
, u8 subsys
,
364 u16 offset
, u32 value
, u32 mask
)
366 struct cmdq_instruction inst
= { {0} };
369 inst
.op
= CMDQ_CODE_MASK
;
371 err
= cmdq_pkt_append_command(pkt
, inst
);
375 offset
= offset
| CMDQ_POLL_ENABLE_MASK
;
376 err
= cmdq_pkt_poll(pkt
, subsys
, offset
, value
);
380 EXPORT_SYMBOL(cmdq_pkt_poll_mask
);
382 int cmdq_pkt_assign(struct cmdq_pkt
*pkt
, u16 reg_idx
, u32 value
)
384 struct cmdq_instruction inst
= {};
386 inst
.op
= CMDQ_CODE_LOGIC
;
387 inst
.dst_t
= CMDQ_REG_TYPE
;
388 inst
.reg_dst
= reg_idx
;
390 return cmdq_pkt_append_command(pkt
, inst
);
392 EXPORT_SYMBOL(cmdq_pkt_assign
);
394 int cmdq_pkt_jump(struct cmdq_pkt
*pkt
, dma_addr_t addr
)
396 struct cmdq_instruction inst
= {};
398 inst
.op
= CMDQ_CODE_JUMP
;
399 inst
.offset
= CMDQ_JUMP_RELATIVE
;
401 cmdq_get_shift_pa(((struct cmdq_client
*)pkt
->cl
)->chan
);
402 return cmdq_pkt_append_command(pkt
, inst
);
404 EXPORT_SYMBOL(cmdq_pkt_jump
);
406 int cmdq_pkt_finalize(struct cmdq_pkt
*pkt
)
408 struct cmdq_instruction inst
= { {0} };
411 /* insert EOC and generate IRQ for each command iteration */
412 inst
.op
= CMDQ_CODE_EOC
;
413 inst
.value
= CMDQ_EOC_IRQ_EN
;
414 err
= cmdq_pkt_append_command(pkt
, inst
);
419 inst
.op
= CMDQ_CODE_JUMP
;
420 inst
.value
= CMDQ_JUMP_PASS
>>
421 cmdq_get_shift_pa(((struct cmdq_client
*)pkt
->cl
)->chan
);
422 err
= cmdq_pkt_append_command(pkt
, inst
);
426 EXPORT_SYMBOL(cmdq_pkt_finalize
);
428 static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data
)
430 struct cmdq_pkt
*pkt
= (struct cmdq_pkt
*)data
.data
;
431 struct cmdq_task_cb
*cb
= &pkt
->cb
;
432 struct cmdq_client
*client
= (struct cmdq_client
*)pkt
->cl
;
434 dma_sync_single_for_cpu(client
->chan
->mbox
->dev
, pkt
->pa_base
,
435 pkt
->cmd_buf_size
, DMA_TO_DEVICE
);
437 data
.data
= cb
->data
;
442 int cmdq_pkt_flush_async(struct cmdq_pkt
*pkt
, cmdq_async_flush_cb cb
,
446 struct cmdq_client
*client
= (struct cmdq_client
*)pkt
->cl
;
450 pkt
->async_cb
.cb
= cmdq_pkt_flush_async_cb
;
451 pkt
->async_cb
.data
= pkt
;
453 dma_sync_single_for_device(client
->chan
->mbox
->dev
, pkt
->pa_base
,
454 pkt
->cmd_buf_size
, DMA_TO_DEVICE
);
456 err
= mbox_send_message(client
->chan
, pkt
);
459 /* We can send next packet immediately, so just call txdone. */
460 mbox_client_txdone(client
->chan
, 0);
464 EXPORT_SYMBOL(cmdq_pkt_flush_async
);
466 struct cmdq_flush_completion
{
467 struct completion cmplt
;
471 static void cmdq_pkt_flush_cb(struct cmdq_cb_data data
)
473 struct cmdq_flush_completion
*cmplt
;
475 cmplt
= (struct cmdq_flush_completion
*)data
.data
;
476 if (data
.sta
!= CMDQ_CB_NORMAL
)
480 complete(&cmplt
->cmplt
);
483 int cmdq_pkt_flush(struct cmdq_pkt
*pkt
)
485 struct cmdq_flush_completion cmplt
;
488 init_completion(&cmplt
.cmplt
);
489 err
= cmdq_pkt_flush_async(pkt
, cmdq_pkt_flush_cb
, &cmplt
);
492 wait_for_completion(&cmplt
.cmplt
);
494 return cmplt
.err
? -EFAULT
: 0;
496 EXPORT_SYMBOL(cmdq_pkt_flush
);
498 MODULE_LICENSE("GPL v2");