1 // SPDX-License-Identifier: GPL-2.0
3 // Copyright (c) 2018 MediaTek Inc.
5 #include <linux/bitops.h>
7 #include <linux/clk-provider.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/errno.h>
10 #include <linux/interrupt.h>
12 #include <linux/iopoll.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/mailbox_controller.h>
18 #include <linux/mailbox/mtk-cmdq-mailbox.h>
21 #define CMDQ_MBOX_AUTOSUSPEND_DELAY_MS 100
23 #define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT)
24 #define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE)
26 #define CMDQ_CURR_IRQ_STATUS 0x10
27 #define CMDQ_SYNC_TOKEN_UPDATE 0x68
28 #define CMDQ_THR_SLOT_CYCLES 0x30
29 #define CMDQ_THR_BASE 0x100
30 #define CMDQ_THR_SIZE 0x80
31 #define CMDQ_THR_WARM_RESET 0x00
32 #define CMDQ_THR_ENABLE_TASK 0x04
33 #define CMDQ_THR_SUSPEND_TASK 0x08
34 #define CMDQ_THR_CURR_STATUS 0x0c
35 #define CMDQ_THR_IRQ_STATUS 0x10
36 #define CMDQ_THR_IRQ_ENABLE 0x14
37 #define CMDQ_THR_CURR_ADDR 0x20
38 #define CMDQ_THR_END_ADDR 0x24
39 #define CMDQ_THR_WAIT_TOKEN 0x30
40 #define CMDQ_THR_PRIORITY 0x40
42 #define GCE_GCTL_VALUE 0x48
43 #define GCE_CTRL_BY_SW GENMASK(2, 0)
44 #define GCE_DDR_EN GENMASK(18, 16)
46 #define CMDQ_THR_ACTIVE_SLOT_CYCLES 0x3200
47 #define CMDQ_THR_ENABLED 0x1
48 #define CMDQ_THR_DISABLED 0x0
49 #define CMDQ_THR_SUSPEND 0x1
50 #define CMDQ_THR_RESUME 0x0
51 #define CMDQ_THR_STATUS_SUSPENDED BIT(1)
52 #define CMDQ_THR_DO_WARM_RESET BIT(0)
53 #define CMDQ_THR_IRQ_DONE 0x1
54 #define CMDQ_THR_IRQ_ERROR 0x12
55 #define CMDQ_THR_IRQ_EN (CMDQ_THR_IRQ_ERROR | CMDQ_THR_IRQ_DONE)
56 #define CMDQ_THR_IS_WAITING BIT(31)
58 #define CMDQ_JUMP_BY_OFFSET 0x10000000
59 #define CMDQ_JUMP_BY_PA 0x10000001
62 struct mbox_chan
*chan
;
64 struct list_head task_busy_list
;
70 struct list_head list_entry
;
72 struct cmdq_thread
*thread
;
73 struct cmdq_pkt
*pkt
; /* the packet sent from mailbox client */
77 struct mbox_controller mbox
;
81 const struct gce_plat
*pdata
;
82 struct cmdq_thread
*thread
;
83 struct clk_bulk_data
*clocks
;
95 static void cmdq_sw_ddr_enable(struct cmdq
*cmdq
, bool enable
)
97 WARN_ON(clk_bulk_enable(cmdq
->pdata
->gce_num
, cmdq
->clocks
));
100 writel(GCE_DDR_EN
| GCE_CTRL_BY_SW
, cmdq
->base
+ GCE_GCTL_VALUE
);
102 writel(GCE_CTRL_BY_SW
, cmdq
->base
+ GCE_GCTL_VALUE
);
104 clk_bulk_disable(cmdq
->pdata
->gce_num
, cmdq
->clocks
);
107 u8
cmdq_get_shift_pa(struct mbox_chan
*chan
)
109 struct cmdq
*cmdq
= container_of(chan
->mbox
, struct cmdq
, mbox
);
111 return cmdq
->pdata
->shift
;
113 EXPORT_SYMBOL(cmdq_get_shift_pa
);
115 static int cmdq_thread_suspend(struct cmdq
*cmdq
, struct cmdq_thread
*thread
)
119 writel(CMDQ_THR_SUSPEND
, thread
->base
+ CMDQ_THR_SUSPEND_TASK
);
121 /* If already disabled, treat as suspended successful. */
122 if (!(readl(thread
->base
+ CMDQ_THR_ENABLE_TASK
) & CMDQ_THR_ENABLED
))
125 if (readl_poll_timeout_atomic(thread
->base
+ CMDQ_THR_CURR_STATUS
,
126 status
, status
& CMDQ_THR_STATUS_SUSPENDED
, 0, 10)) {
127 dev_err(cmdq
->mbox
.dev
, "suspend GCE thread 0x%x failed\n",
128 (u32
)(thread
->base
- cmdq
->base
));
135 static void cmdq_thread_resume(struct cmdq_thread
*thread
)
137 writel(CMDQ_THR_RESUME
, thread
->base
+ CMDQ_THR_SUSPEND_TASK
);
140 static void cmdq_init(struct cmdq
*cmdq
)
145 WARN_ON(clk_bulk_enable(cmdq
->pdata
->gce_num
, cmdq
->clocks
));
146 if (cmdq
->pdata
->control_by_sw
)
147 gctl_regval
= GCE_CTRL_BY_SW
;
148 if (cmdq
->pdata
->sw_ddr_en
)
149 gctl_regval
|= GCE_DDR_EN
;
152 writel(gctl_regval
, cmdq
->base
+ GCE_GCTL_VALUE
);
154 writel(CMDQ_THR_ACTIVE_SLOT_CYCLES
, cmdq
->base
+ CMDQ_THR_SLOT_CYCLES
);
155 for (i
= 0; i
<= CMDQ_MAX_EVENT
; i
++)
156 writel(i
, cmdq
->base
+ CMDQ_SYNC_TOKEN_UPDATE
);
157 clk_bulk_disable(cmdq
->pdata
->gce_num
, cmdq
->clocks
);
160 static int cmdq_thread_reset(struct cmdq
*cmdq
, struct cmdq_thread
*thread
)
164 writel(CMDQ_THR_DO_WARM_RESET
, thread
->base
+ CMDQ_THR_WARM_RESET
);
165 if (readl_poll_timeout_atomic(thread
->base
+ CMDQ_THR_WARM_RESET
,
166 warm_reset
, !(warm_reset
& CMDQ_THR_DO_WARM_RESET
),
168 dev_err(cmdq
->mbox
.dev
, "reset GCE thread 0x%x failed\n",
169 (u32
)(thread
->base
- cmdq
->base
));
176 static void cmdq_thread_disable(struct cmdq
*cmdq
, struct cmdq_thread
*thread
)
178 cmdq_thread_reset(cmdq
, thread
);
179 writel(CMDQ_THR_DISABLED
, thread
->base
+ CMDQ_THR_ENABLE_TASK
);
182 /* notify GCE to re-fetch commands by setting GCE thread PC */
183 static void cmdq_thread_invalidate_fetched_data(struct cmdq_thread
*thread
)
185 writel(readl(thread
->base
+ CMDQ_THR_CURR_ADDR
),
186 thread
->base
+ CMDQ_THR_CURR_ADDR
);
189 static void cmdq_task_insert_into_thread(struct cmdq_task
*task
)
191 struct device
*dev
= task
->cmdq
->mbox
.dev
;
192 struct cmdq_thread
*thread
= task
->thread
;
193 struct cmdq_task
*prev_task
= list_last_entry(
194 &thread
->task_busy_list
, typeof(*task
), list_entry
);
195 u64
*prev_task_base
= prev_task
->pkt
->va_base
;
197 /* let previous task jump to this task */
198 dma_sync_single_for_cpu(dev
, prev_task
->pa_base
,
199 prev_task
->pkt
->cmd_buf_size
, DMA_TO_DEVICE
);
200 prev_task_base
[CMDQ_NUM_CMD(prev_task
->pkt
) - 1] =
201 (u64
)CMDQ_JUMP_BY_PA
<< 32 |
202 (task
->pa_base
>> task
->cmdq
->pdata
->shift
);
203 dma_sync_single_for_device(dev
, prev_task
->pa_base
,
204 prev_task
->pkt
->cmd_buf_size
, DMA_TO_DEVICE
);
206 cmdq_thread_invalidate_fetched_data(thread
);
209 static bool cmdq_thread_is_in_wfe(struct cmdq_thread
*thread
)
211 return readl(thread
->base
+ CMDQ_THR_WAIT_TOKEN
) & CMDQ_THR_IS_WAITING
;
214 static void cmdq_task_exec_done(struct cmdq_task
*task
, int sta
)
216 struct cmdq_cb_data data
;
219 data
.pkt
= task
->pkt
;
220 mbox_chan_received_data(task
->thread
->chan
, &data
);
222 list_del(&task
->list_entry
);
225 static void cmdq_task_handle_error(struct cmdq_task
*task
)
227 struct cmdq_thread
*thread
= task
->thread
;
228 struct cmdq_task
*next_task
;
229 struct cmdq
*cmdq
= task
->cmdq
;
231 dev_err(cmdq
->mbox
.dev
, "task 0x%p error\n", task
);
232 WARN_ON(cmdq_thread_suspend(cmdq
, thread
) < 0);
233 next_task
= list_first_entry_or_null(&thread
->task_busy_list
,
234 struct cmdq_task
, list_entry
);
236 writel(next_task
->pa_base
>> cmdq
->pdata
->shift
,
237 thread
->base
+ CMDQ_THR_CURR_ADDR
);
238 cmdq_thread_resume(thread
);
241 static void cmdq_thread_irq_handler(struct cmdq
*cmdq
,
242 struct cmdq_thread
*thread
)
244 struct cmdq_task
*task
, *tmp
, *curr_task
= NULL
;
245 u32 curr_pa
, irq_flag
, task_end_pa
;
248 irq_flag
= readl(thread
->base
+ CMDQ_THR_IRQ_STATUS
);
249 writel(~irq_flag
, thread
->base
+ CMDQ_THR_IRQ_STATUS
);
252 * When ISR call this function, another CPU core could run
253 * "release task" right before we acquire the spin lock, and thus
254 * reset / disable this GCE thread, so we need to check the enable
255 * bit of this GCE thread.
257 if (!(readl(thread
->base
+ CMDQ_THR_ENABLE_TASK
) & CMDQ_THR_ENABLED
))
260 if (irq_flag
& CMDQ_THR_IRQ_ERROR
)
262 else if (irq_flag
& CMDQ_THR_IRQ_DONE
)
267 curr_pa
= readl(thread
->base
+ CMDQ_THR_CURR_ADDR
) << cmdq
->pdata
->shift
;
269 list_for_each_entry_safe(task
, tmp
, &thread
->task_busy_list
,
271 task_end_pa
= task
->pa_base
+ task
->pkt
->cmd_buf_size
;
272 if (curr_pa
>= task
->pa_base
&& curr_pa
< task_end_pa
)
275 if (!curr_task
|| curr_pa
== task_end_pa
- CMDQ_INST_SIZE
) {
276 cmdq_task_exec_done(task
, 0);
279 cmdq_task_exec_done(task
, -ENOEXEC
);
280 cmdq_task_handle_error(curr_task
);
288 if (list_empty(&thread
->task_busy_list
))
289 cmdq_thread_disable(cmdq
, thread
);
292 static irqreturn_t
cmdq_irq_handler(int irq
, void *dev
)
294 struct cmdq
*cmdq
= dev
;
295 unsigned long irq_status
, flags
= 0L;
298 irq_status
= readl(cmdq
->base
+ CMDQ_CURR_IRQ_STATUS
) & cmdq
->irq_mask
;
299 if (!(irq_status
^ cmdq
->irq_mask
))
302 for_each_clear_bit(bit
, &irq_status
, cmdq
->pdata
->thread_nr
) {
303 struct cmdq_thread
*thread
= &cmdq
->thread
[bit
];
305 spin_lock_irqsave(&thread
->chan
->lock
, flags
);
306 cmdq_thread_irq_handler(cmdq
, thread
);
307 spin_unlock_irqrestore(&thread
->chan
->lock
, flags
);
310 pm_runtime_mark_last_busy(cmdq
->mbox
.dev
);
315 static int cmdq_runtime_resume(struct device
*dev
)
317 struct cmdq
*cmdq
= dev_get_drvdata(dev
);
319 return clk_bulk_enable(cmdq
->pdata
->gce_num
, cmdq
->clocks
);
322 static int cmdq_runtime_suspend(struct device
*dev
)
324 struct cmdq
*cmdq
= dev_get_drvdata(dev
);
326 clk_bulk_disable(cmdq
->pdata
->gce_num
, cmdq
->clocks
);
330 static int cmdq_suspend(struct device
*dev
)
332 struct cmdq
*cmdq
= dev_get_drvdata(dev
);
333 struct cmdq_thread
*thread
;
335 bool task_running
= false;
337 cmdq
->suspended
= true;
339 for (i
= 0; i
< cmdq
->pdata
->thread_nr
; i
++) {
340 thread
= &cmdq
->thread
[i
];
341 if (!list_empty(&thread
->task_busy_list
)) {
348 dev_warn(dev
, "exist running task(s) in suspend\n");
350 if (cmdq
->pdata
->sw_ddr_en
)
351 cmdq_sw_ddr_enable(cmdq
, false);
353 return pm_runtime_force_suspend(dev
);
356 static int cmdq_resume(struct device
*dev
)
358 struct cmdq
*cmdq
= dev_get_drvdata(dev
);
360 WARN_ON(pm_runtime_force_resume(dev
));
361 cmdq
->suspended
= false;
363 if (cmdq
->pdata
->sw_ddr_en
)
364 cmdq_sw_ddr_enable(cmdq
, true);
369 static void cmdq_remove(struct platform_device
*pdev
)
371 struct cmdq
*cmdq
= platform_get_drvdata(pdev
);
373 if (cmdq
->pdata
->sw_ddr_en
)
374 cmdq_sw_ddr_enable(cmdq
, false);
376 if (!IS_ENABLED(CONFIG_PM
))
377 cmdq_runtime_suspend(&pdev
->dev
);
379 clk_bulk_unprepare(cmdq
->pdata
->gce_num
, cmdq
->clocks
);
382 static int cmdq_mbox_send_data(struct mbox_chan
*chan
, void *data
)
384 struct cmdq_pkt
*pkt
= (struct cmdq_pkt
*)data
;
385 struct cmdq_thread
*thread
= (struct cmdq_thread
*)chan
->con_priv
;
386 struct cmdq
*cmdq
= dev_get_drvdata(chan
->mbox
->dev
);
387 struct cmdq_task
*task
;
388 unsigned long curr_pa
, end_pa
;
391 /* Client should not flush new tasks if suspended. */
392 WARN_ON(cmdq
->suspended
);
394 ret
= pm_runtime_get_sync(cmdq
->mbox
.dev
);
398 task
= kzalloc(sizeof(*task
), GFP_ATOMIC
);
400 __pm_runtime_put_autosuspend(cmdq
->mbox
.dev
);
405 INIT_LIST_HEAD(&task
->list_entry
);
406 task
->pa_base
= pkt
->pa_base
;
407 task
->thread
= thread
;
410 if (list_empty(&thread
->task_busy_list
)) {
412 * The thread reset will clear thread related register to 0,
413 * including pc, end, priority, irq, suspend and enable. Thus
414 * set CMDQ_THR_ENABLED to CMDQ_THR_ENABLE_TASK will enable
415 * thread and make it running.
417 WARN_ON(cmdq_thread_reset(cmdq
, thread
) < 0);
419 writel(task
->pa_base
>> cmdq
->pdata
->shift
,
420 thread
->base
+ CMDQ_THR_CURR_ADDR
);
421 writel((task
->pa_base
+ pkt
->cmd_buf_size
) >> cmdq
->pdata
->shift
,
422 thread
->base
+ CMDQ_THR_END_ADDR
);
424 writel(thread
->priority
, thread
->base
+ CMDQ_THR_PRIORITY
);
425 writel(CMDQ_THR_IRQ_EN
, thread
->base
+ CMDQ_THR_IRQ_ENABLE
);
426 writel(CMDQ_THR_ENABLED
, thread
->base
+ CMDQ_THR_ENABLE_TASK
);
428 WARN_ON(cmdq_thread_suspend(cmdq
, thread
) < 0);
429 curr_pa
= readl(thread
->base
+ CMDQ_THR_CURR_ADDR
) <<
431 end_pa
= readl(thread
->base
+ CMDQ_THR_END_ADDR
) <<
434 if (curr_pa
== end_pa
- CMDQ_INST_SIZE
||
436 /* set to this task directly */
437 writel(task
->pa_base
>> cmdq
->pdata
->shift
,
438 thread
->base
+ CMDQ_THR_CURR_ADDR
);
440 cmdq_task_insert_into_thread(task
);
441 smp_mb(); /* modify jump before enable thread */
443 writel((task
->pa_base
+ pkt
->cmd_buf_size
) >> cmdq
->pdata
->shift
,
444 thread
->base
+ CMDQ_THR_END_ADDR
);
445 cmdq_thread_resume(thread
);
447 list_move_tail(&task
->list_entry
, &thread
->task_busy_list
);
449 pm_runtime_mark_last_busy(cmdq
->mbox
.dev
);
450 __pm_runtime_put_autosuspend(cmdq
->mbox
.dev
);
455 static int cmdq_mbox_startup(struct mbox_chan
*chan
)
460 static void cmdq_mbox_shutdown(struct mbox_chan
*chan
)
462 struct cmdq_thread
*thread
= (struct cmdq_thread
*)chan
->con_priv
;
463 struct cmdq
*cmdq
= dev_get_drvdata(chan
->mbox
->dev
);
464 struct cmdq_task
*task
, *tmp
;
467 WARN_ON(pm_runtime_get_sync(cmdq
->mbox
.dev
) < 0);
469 spin_lock_irqsave(&thread
->chan
->lock
, flags
);
470 if (list_empty(&thread
->task_busy_list
))
473 WARN_ON(cmdq_thread_suspend(cmdq
, thread
) < 0);
475 /* make sure executed tasks have success callback */
476 cmdq_thread_irq_handler(cmdq
, thread
);
477 if (list_empty(&thread
->task_busy_list
))
480 list_for_each_entry_safe(task
, tmp
, &thread
->task_busy_list
,
482 cmdq_task_exec_done(task
, -ECONNABORTED
);
486 cmdq_thread_disable(cmdq
, thread
);
490 * The thread->task_busy_list empty means thread already disable. The
491 * cmdq_mbox_send_data() always reset thread which clear disable and
492 * suspend statue when first pkt send to channel, so there is no need
493 * to do any operation here, only unlock and leave.
495 spin_unlock_irqrestore(&thread
->chan
->lock
, flags
);
497 pm_runtime_mark_last_busy(cmdq
->mbox
.dev
);
498 __pm_runtime_put_autosuspend(cmdq
->mbox
.dev
);
501 static int cmdq_mbox_flush(struct mbox_chan
*chan
, unsigned long timeout
)
503 struct cmdq_thread
*thread
= (struct cmdq_thread
*)chan
->con_priv
;
504 struct cmdq_cb_data data
;
505 struct cmdq
*cmdq
= dev_get_drvdata(chan
->mbox
->dev
);
506 struct cmdq_task
*task
, *tmp
;
511 ret
= pm_runtime_get_sync(cmdq
->mbox
.dev
);
515 spin_lock_irqsave(&thread
->chan
->lock
, flags
);
516 if (list_empty(&thread
->task_busy_list
))
519 WARN_ON(cmdq_thread_suspend(cmdq
, thread
) < 0);
520 if (!cmdq_thread_is_in_wfe(thread
))
523 list_for_each_entry_safe(task
, tmp
, &thread
->task_busy_list
,
525 data
.sta
= -ECONNABORTED
;
526 data
.pkt
= task
->pkt
;
527 mbox_chan_received_data(task
->thread
->chan
, &data
);
528 list_del(&task
->list_entry
);
532 cmdq_thread_resume(thread
);
533 cmdq_thread_disable(cmdq
, thread
);
536 spin_unlock_irqrestore(&thread
->chan
->lock
, flags
);
537 pm_runtime_mark_last_busy(cmdq
->mbox
.dev
);
538 __pm_runtime_put_autosuspend(cmdq
->mbox
.dev
);
543 cmdq_thread_resume(thread
);
544 spin_unlock_irqrestore(&thread
->chan
->lock
, flags
);
545 if (readl_poll_timeout_atomic(thread
->base
+ CMDQ_THR_ENABLE_TASK
,
546 enable
, enable
== 0, 1, timeout
)) {
547 dev_err(cmdq
->mbox
.dev
, "Fail to wait GCE thread 0x%x done\n",
548 (u32
)(thread
->base
- cmdq
->base
));
552 pm_runtime_mark_last_busy(cmdq
->mbox
.dev
);
553 __pm_runtime_put_autosuspend(cmdq
->mbox
.dev
);
557 static const struct mbox_chan_ops cmdq_mbox_chan_ops
= {
558 .send_data
= cmdq_mbox_send_data
,
559 .startup
= cmdq_mbox_startup
,
560 .shutdown
= cmdq_mbox_shutdown
,
561 .flush
= cmdq_mbox_flush
,
564 static struct mbox_chan
*cmdq_xlate(struct mbox_controller
*mbox
,
565 const struct of_phandle_args
*sp
)
567 int ind
= sp
->args
[0];
568 struct cmdq_thread
*thread
;
570 if (ind
>= mbox
->num_chans
)
571 return ERR_PTR(-EINVAL
);
573 thread
= (struct cmdq_thread
*)mbox
->chans
[ind
].con_priv
;
574 thread
->priority
= sp
->args
[1];
575 thread
->chan
= &mbox
->chans
[ind
];
577 return &mbox
->chans
[ind
];
580 static int cmdq_get_clocks(struct device
*dev
, struct cmdq
*cmdq
)
582 static const char * const gce_name
= "gce";
583 struct device_node
*node
, *parent
= dev
->of_node
->parent
;
584 struct clk_bulk_data
*clks
;
586 cmdq
->clocks
= devm_kcalloc(dev
, cmdq
->pdata
->gce_num
,
587 sizeof(*cmdq
->clocks
), GFP_KERNEL
);
591 if (cmdq
->pdata
->gce_num
== 1) {
592 clks
= &cmdq
->clocks
[0];
595 clks
->clk
= devm_clk_get(dev
, NULL
);
596 if (IS_ERR(clks
->clk
))
597 return dev_err_probe(dev
, PTR_ERR(clks
->clk
),
598 "failed to get gce clock\n");
604 * If there is more than one GCE, get the clocks for the others too,
605 * as the clock of the main GCE must be enabled for additional IPs
608 for_each_child_of_node(parent
, node
) {
609 int alias_id
= of_alias_get_id(node
, gce_name
);
611 if (alias_id
< 0 || alias_id
>= cmdq
->pdata
->gce_num
)
614 clks
= &cmdq
->clocks
[alias_id
];
616 clks
->id
= devm_kasprintf(dev
, GFP_KERNEL
, "gce%d", alias_id
);
622 clks
->clk
= of_clk_get(node
, 0);
623 if (IS_ERR(clks
->clk
)) {
625 return dev_err_probe(dev
, PTR_ERR(clks
->clk
),
626 "failed to get gce%d clock\n", alias_id
);
633 static int cmdq_probe(struct platform_device
*pdev
)
635 struct device
*dev
= &pdev
->dev
;
639 cmdq
= devm_kzalloc(dev
, sizeof(*cmdq
), GFP_KERNEL
);
643 cmdq
->base
= devm_platform_ioremap_resource(pdev
, 0);
644 if (IS_ERR(cmdq
->base
))
645 return PTR_ERR(cmdq
->base
);
647 cmdq
->irq
= platform_get_irq(pdev
, 0);
651 cmdq
->pdata
= device_get_match_data(dev
);
653 dev_err(dev
, "failed to get match data\n");
657 cmdq
->irq_mask
= GENMASK(cmdq
->pdata
->thread_nr
- 1, 0);
659 dev_dbg(dev
, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
660 dev
, cmdq
->base
, cmdq
->irq
);
662 err
= cmdq_get_clocks(dev
, cmdq
);
666 cmdq
->mbox
.dev
= dev
;
667 cmdq
->mbox
.chans
= devm_kcalloc(dev
, cmdq
->pdata
->thread_nr
,
668 sizeof(*cmdq
->mbox
.chans
), GFP_KERNEL
);
669 if (!cmdq
->mbox
.chans
)
672 cmdq
->mbox
.num_chans
= cmdq
->pdata
->thread_nr
;
673 cmdq
->mbox
.ops
= &cmdq_mbox_chan_ops
;
674 cmdq
->mbox
.of_xlate
= cmdq_xlate
;
676 /* make use of TXDONE_BY_ACK */
677 cmdq
->mbox
.txdone_irq
= false;
678 cmdq
->mbox
.txdone_poll
= false;
680 cmdq
->thread
= devm_kcalloc(dev
, cmdq
->pdata
->thread_nr
,
681 sizeof(*cmdq
->thread
), GFP_KERNEL
);
685 for (i
= 0; i
< cmdq
->pdata
->thread_nr
; i
++) {
686 cmdq
->thread
[i
].base
= cmdq
->base
+ CMDQ_THR_BASE
+
688 INIT_LIST_HEAD(&cmdq
->thread
[i
].task_busy_list
);
689 cmdq
->mbox
.chans
[i
].con_priv
= (void *)&cmdq
->thread
[i
];
692 platform_set_drvdata(pdev
, cmdq
);
694 WARN_ON(clk_bulk_prepare(cmdq
->pdata
->gce_num
, cmdq
->clocks
));
698 err
= devm_request_irq(dev
, cmdq
->irq
, cmdq_irq_handler
, IRQF_SHARED
,
701 dev_err(dev
, "failed to register ISR (%d)\n", err
);
705 /* If Runtime PM is not available enable the clocks now. */
706 if (!IS_ENABLED(CONFIG_PM
)) {
707 err
= cmdq_runtime_resume(dev
);
712 err
= devm_pm_runtime_enable(dev
);
716 pm_runtime_set_autosuspend_delay(dev
, CMDQ_MBOX_AUTOSUSPEND_DELAY_MS
);
717 pm_runtime_use_autosuspend(dev
);
719 err
= devm_mbox_controller_register(dev
, &cmdq
->mbox
);
721 dev_err(dev
, "failed to register mailbox: %d\n", err
);
728 static const struct dev_pm_ops cmdq_pm_ops
= {
729 .suspend
= cmdq_suspend
,
730 .resume
= cmdq_resume
,
731 SET_RUNTIME_PM_OPS(cmdq_runtime_suspend
,
732 cmdq_runtime_resume
, NULL
)
735 static const struct gce_plat gce_plat_mt6779
= {
738 .control_by_sw
= false,
742 static const struct gce_plat gce_plat_mt8173
= {
745 .control_by_sw
= false,
749 static const struct gce_plat gce_plat_mt8183
= {
752 .control_by_sw
= false,
756 static const struct gce_plat gce_plat_mt8186
= {
759 .control_by_sw
= true,
764 static const struct gce_plat gce_plat_mt8188
= {
767 .control_by_sw
= true,
771 static const struct gce_plat gce_plat_mt8192
= {
774 .control_by_sw
= true,
778 static const struct gce_plat gce_plat_mt8195
= {
781 .control_by_sw
= true,
785 static const struct of_device_id cmdq_of_ids
[] = {
786 {.compatible
= "mediatek,mt6779-gce", .data
= (void *)&gce_plat_mt6779
},
787 {.compatible
= "mediatek,mt8173-gce", .data
= (void *)&gce_plat_mt8173
},
788 {.compatible
= "mediatek,mt8183-gce", .data
= (void *)&gce_plat_mt8183
},
789 {.compatible
= "mediatek,mt8186-gce", .data
= (void *)&gce_plat_mt8186
},
790 {.compatible
= "mediatek,mt8188-gce", .data
= (void *)&gce_plat_mt8188
},
791 {.compatible
= "mediatek,mt8192-gce", .data
= (void *)&gce_plat_mt8192
},
792 {.compatible
= "mediatek,mt8195-gce", .data
= (void *)&gce_plat_mt8195
},
795 MODULE_DEVICE_TABLE(of
, cmdq_of_ids
);
797 static struct platform_driver cmdq_drv
= {
799 .remove
= cmdq_remove
,
803 .of_match_table
= cmdq_of_ids
,
807 static int __init
cmdq_drv_init(void)
809 return platform_driver_register(&cmdq_drv
);
812 static void __exit
cmdq_drv_exit(void)
814 platform_driver_unregister(&cmdq_drv
);
817 subsys_initcall(cmdq_drv_init
);
818 module_exit(cmdq_drv_exit
);
820 MODULE_DESCRIPTION("Mediatek Command Queue(CMDQ) Mailbox driver");
821 MODULE_LICENSE("GPL v2");