1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2021 Alibaba Group Holding Limited.
7 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/mailbox_controller.h>
11 #include <linux/module.h>
12 #include <linux/of_device.h>
13 #include <linux/platform_device.h>
14 #include <linux/slab.h>
17 #define TH_1520_MBOX_STA 0x0
18 #define TH_1520_MBOX_CLR 0x4
19 #define TH_1520_MBOX_MASK 0xc
21 /* Transmit/receive data register:
24 #define TH_1520_MBOX_INFO_NUM 8
25 #define TH_1520_MBOX_DATA_INFO_NUM 7
26 #define TH_1520_MBOX_INFO0 0x14
27 /* Transmit ack register: INFO7 */
28 #define TH_1520_MBOX_INFO7 0x30
30 /* Generate remote icu IRQ Register */
31 #define TH_1520_MBOX_GEN 0x10
32 #define TH_1520_MBOX_GEN_RX_DATA BIT(6)
33 #define TH_1520_MBOX_GEN_TX_ACK BIT(7)
35 #define TH_1520_MBOX_CHAN_RES_SIZE 0x1000
36 #define TH_1520_MBOX_CHANS 4
37 #define TH_1520_MBOX_CHAN_NAME_SIZE 20
39 #define TH_1520_MBOX_ACK_MAGIC 0xdeadbeaf
41 #ifdef CONFIG_PM_SLEEP
42 /* store MBOX context across system-wide suspend/resume transitions */
43 struct th1520_mbox_context
{
44 u32 intr_mask
[TH_1520_MBOX_CHANS
- 1];
48 enum th1520_mbox_icu_cpu_id
{
49 TH_1520_MBOX_ICU_KERNEL_CPU0
, /* 910T */
50 TH_1520_MBOX_ICU_CPU1
, /* 902 */
51 TH_1520_MBOX_ICU_CPU2
, /* 906 */
52 TH_1520_MBOX_ICU_CPU3
, /* 910R */
55 struct th1520_mbox_con_priv
{
56 enum th1520_mbox_icu_cpu_id idx
;
57 void __iomem
*comm_local_base
;
58 void __iomem
*comm_remote_base
;
59 char irq_desc
[TH_1520_MBOX_CHAN_NAME_SIZE
];
60 struct mbox_chan
*chan
;
63 struct th1520_mbox_priv
{
65 void __iomem
*local_icu
[TH_1520_MBOX_CHANS
];
66 void __iomem
*remote_icu
[TH_1520_MBOX_CHANS
- 1];
67 void __iomem
*cur_cpu_ch_base
;
68 spinlock_t mbox_lock
; /* control register lock */
70 struct mbox_controller mbox
;
71 struct mbox_chan mbox_chans
[TH_1520_MBOX_CHANS
];
72 struct clk_bulk_data clocks
[TH_1520_MBOX_CHANS
];
73 struct th1520_mbox_con_priv con_priv
[TH_1520_MBOX_CHANS
];
75 #ifdef CONFIG_PM_SLEEP
76 struct th1520_mbox_context
*ctx
;
80 static struct th1520_mbox_priv
*
81 to_th1520_mbox_priv(struct mbox_controller
*mbox
)
83 return container_of(mbox
, struct th1520_mbox_priv
, mbox
);
86 static void th1520_mbox_write(struct th1520_mbox_priv
*priv
, u32 val
, u32 offs
)
88 iowrite32(val
, priv
->cur_cpu_ch_base
+ offs
);
91 static u32
th1520_mbox_read(struct th1520_mbox_priv
*priv
, u32 offs
)
93 return ioread32(priv
->cur_cpu_ch_base
+ offs
);
96 static u32
th1520_mbox_rmw(struct th1520_mbox_priv
*priv
, u32 off
, u32 set
,
102 spin_lock_irqsave(&priv
->mbox_lock
, flags
);
103 val
= th1520_mbox_read(priv
, off
);
106 th1520_mbox_write(priv
, val
, off
);
107 spin_unlock_irqrestore(&priv
->mbox_lock
, flags
);
112 static void th1520_mbox_chan_write(struct th1520_mbox_con_priv
*cp
, u32 val
,
113 u32 offs
, bool is_remote
)
116 iowrite32(val
, cp
->comm_remote_base
+ offs
);
118 iowrite32(val
, cp
->comm_local_base
+ offs
);
121 static u32
th1520_mbox_chan_read(struct th1520_mbox_con_priv
*cp
, u32 offs
,
125 return ioread32(cp
->comm_remote_base
+ offs
);
127 return ioread32(cp
->comm_local_base
+ offs
);
130 static void th1520_mbox_chan_rmw(struct th1520_mbox_con_priv
*cp
, u32 off
,
131 u32 set
, u32 clr
, bool is_remote
)
133 struct th1520_mbox_priv
*priv
= to_th1520_mbox_priv(cp
->chan
->mbox
);
137 spin_lock_irqsave(&priv
->mbox_lock
, flags
);
138 val
= th1520_mbox_chan_read(cp
, off
, is_remote
);
141 th1520_mbox_chan_write(cp
, val
, off
, is_remote
);
142 spin_unlock_irqrestore(&priv
->mbox_lock
, flags
);
145 static void th1520_mbox_chan_rd_data(struct th1520_mbox_con_priv
*cp
,
146 void *data
, bool is_remote
)
148 u32 off
= TH_1520_MBOX_INFO0
;
152 /* read info0 ~ info6, totally 28 bytes
153 * requires data memory size is 28 bytes
155 for (i
= 0; i
< TH_1520_MBOX_DATA_INFO_NUM
; i
++) {
156 *arg
= th1520_mbox_chan_read(cp
, off
, is_remote
);
162 static void th1520_mbox_chan_wr_data(struct th1520_mbox_con_priv
*cp
,
163 void *data
, bool is_remote
)
165 u32 off
= TH_1520_MBOX_INFO0
;
169 /* write info0 ~ info6, totally 28 bytes
170 * requires data memory is 28 bytes valid data
172 for (i
= 0; i
< TH_1520_MBOX_DATA_INFO_NUM
; i
++) {
173 th1520_mbox_chan_write(cp
, *arg
, off
, is_remote
);
179 static void th1520_mbox_chan_wr_ack(struct th1520_mbox_con_priv
*cp
, void *data
,
182 u32 off
= TH_1520_MBOX_INFO7
;
185 th1520_mbox_chan_write(cp
, *arg
, off
, is_remote
);
188 static int th1520_mbox_chan_id_to_mapbit(struct th1520_mbox_con_priv
*cp
)
193 for (i
= 0; i
< TH_1520_MBOX_CHANS
; i
++) {
197 if (i
!= TH_1520_MBOX_ICU_KERNEL_CPU0
)
201 if (i
== TH_1520_MBOX_CHANS
)
202 dev_err(cp
->chan
->mbox
->dev
, "convert to mapbit failed\n");
207 static irqreturn_t
th1520_mbox_isr(int irq
, void *p
)
209 struct mbox_chan
*chan
= p
;
210 struct th1520_mbox_priv
*priv
= to_th1520_mbox_priv(chan
->mbox
);
211 struct th1520_mbox_con_priv
*cp
= chan
->con_priv
;
212 int mapbit
= th1520_mbox_chan_id_to_mapbit(cp
);
213 u32 sta
, dat
[TH_1520_MBOX_DATA_INFO_NUM
];
214 u32 ack_magic
= TH_1520_MBOX_ACK_MAGIC
;
215 u32 info0_data
, info7_data
;
217 sta
= th1520_mbox_read(priv
, TH_1520_MBOX_STA
);
218 if (!(sta
& BIT(mapbit
)))
221 /* clear chan irq bit in STA register */
222 th1520_mbox_rmw(priv
, TH_1520_MBOX_CLR
, BIT(mapbit
), 0);
224 /* info0 is the protocol word, should not be zero! */
225 info0_data
= th1520_mbox_chan_read(cp
, TH_1520_MBOX_INFO0
, false);
227 /* read info0~info6 data */
228 th1520_mbox_chan_rd_data(cp
, dat
, false);
230 /* clear local info0 */
231 th1520_mbox_chan_write(cp
, 0x0, TH_1520_MBOX_INFO0
, false);
233 /* notify remote cpu */
234 th1520_mbox_chan_wr_ack(cp
, &ack_magic
, true);
235 /* CPU1 902/906 use polling mode to monitor info7 */
236 if (cp
->idx
!= TH_1520_MBOX_ICU_CPU1
&&
237 cp
->idx
!= TH_1520_MBOX_ICU_CPU2
)
238 th1520_mbox_chan_rmw(cp
, TH_1520_MBOX_GEN
,
239 TH_1520_MBOX_GEN_TX_ACK
, 0, true);
241 /* transfer the data to client */
242 mbox_chan_received_data(chan
, (void *)dat
);
245 /* info7 magic value mean the real ack signal, not generate bit7 */
246 info7_data
= th1520_mbox_chan_read(cp
, TH_1520_MBOX_INFO7
, false);
247 if (info7_data
== TH_1520_MBOX_ACK_MAGIC
) {
248 /* clear local info7 */
249 th1520_mbox_chan_write(cp
, 0x0, TH_1520_MBOX_INFO7
, false);
251 /* notify framework the last TX has completed */
252 mbox_chan_txdone(chan
, 0);
255 if (!info0_data
&& !info7_data
)
261 static int th1520_mbox_send_data(struct mbox_chan
*chan
, void *data
)
263 struct th1520_mbox_con_priv
*cp
= chan
->con_priv
;
265 th1520_mbox_chan_wr_data(cp
, data
, true);
266 th1520_mbox_chan_rmw(cp
, TH_1520_MBOX_GEN
, TH_1520_MBOX_GEN_RX_DATA
, 0,
271 static int th1520_mbox_startup(struct mbox_chan
*chan
)
273 struct th1520_mbox_priv
*priv
= to_th1520_mbox_priv(chan
->mbox
);
274 struct th1520_mbox_con_priv
*cp
= chan
->con_priv
;
279 /* clear local and remote generate and info0~info7 */
280 th1520_mbox_chan_rmw(cp
, TH_1520_MBOX_GEN
, 0x0, 0xff, true);
281 th1520_mbox_chan_rmw(cp
, TH_1520_MBOX_GEN
, 0x0, 0xff, false);
282 th1520_mbox_chan_wr_ack(cp
, &data
[7], true);
283 th1520_mbox_chan_wr_ack(cp
, &data
[7], false);
284 th1520_mbox_chan_wr_data(cp
, &data
[0], true);
285 th1520_mbox_chan_wr_data(cp
, &data
[0], false);
287 /* enable the chan mask */
288 mask_bit
= th1520_mbox_chan_id_to_mapbit(cp
);
289 th1520_mbox_rmw(priv
, TH_1520_MBOX_MASK
, BIT(mask_bit
), 0);
292 * Mixing devm_ managed resources with manual IRQ handling is generally
293 * discouraged due to potential complexities with resource management,
294 * especially when dealing with shared interrupts. However, in this case,
295 * the approach is safe and effective because:
297 * 1. Each mailbox channel requests its IRQ within the .startup() callback
298 * and frees it within the .shutdown() callback.
299 * 2. During device unbinding, the devm_ managed mailbox controller first
300 * iterates through all channels, ensuring that their IRQs are freed before
301 * any other devm_ resources are released.
303 * This ordering guarantees that no interrupts can be triggered from the device
304 * while it is being unbound, preventing race conditions and ensuring system
307 ret
= request_irq(priv
->irq
, th1520_mbox_isr
,
308 IRQF_SHARED
| IRQF_NO_SUSPEND
, cp
->irq_desc
, chan
);
310 dev_err(priv
->dev
, "Unable to acquire IRQ %d\n", priv
->irq
);
317 static void th1520_mbox_shutdown(struct mbox_chan
*chan
)
319 struct th1520_mbox_priv
*priv
= to_th1520_mbox_priv(chan
->mbox
);
320 struct th1520_mbox_con_priv
*cp
= chan
->con_priv
;
323 free_irq(priv
->irq
, chan
);
325 /* clear the chan mask */
326 mask_bit
= th1520_mbox_chan_id_to_mapbit(cp
);
327 th1520_mbox_rmw(priv
, TH_1520_MBOX_MASK
, 0, BIT(mask_bit
));
330 static const struct mbox_chan_ops th1520_mbox_ops
= {
331 .send_data
= th1520_mbox_send_data
,
332 .startup
= th1520_mbox_startup
,
333 .shutdown
= th1520_mbox_shutdown
,
336 static int th1520_mbox_init_generic(struct th1520_mbox_priv
*priv
)
338 #ifdef CONFIG_PM_SLEEP
339 priv
->ctx
= devm_kzalloc(priv
->dev
, sizeof(*priv
->ctx
), GFP_KERNEL
);
343 /* Set default configuration */
344 th1520_mbox_write(priv
, 0xff, TH_1520_MBOX_CLR
);
345 th1520_mbox_write(priv
, 0x0, TH_1520_MBOX_MASK
);
349 static struct mbox_chan
*th1520_mbox_xlate(struct mbox_controller
*mbox
,
350 const struct of_phandle_args
*sp
)
354 if (sp
->args_count
!= 1) {
355 dev_err(mbox
->dev
, "Invalid argument count %d\n",
357 return ERR_PTR(-EINVAL
);
360 chan
= sp
->args
[0]; /* comm remote channel */
362 if (chan
>= mbox
->num_chans
) {
363 dev_err(mbox
->dev
, "Not supported channel number: %d\n", chan
);
364 return ERR_PTR(-EINVAL
);
367 if (chan
== TH_1520_MBOX_ICU_KERNEL_CPU0
) {
368 dev_err(mbox
->dev
, "Cannot communicate with yourself\n");
369 return ERR_PTR(-EINVAL
);
372 return &mbox
->chans
[chan
];
375 static void __iomem
*th1520_map_mmio(struct platform_device
*pdev
,
376 char *res_name
, size_t offset
)
378 void __iomem
*mapped
;
379 struct resource
*res
;
381 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, res_name
);
384 dev_err(&pdev
->dev
, "Failed to get resource: %s\n", res_name
);
385 return ERR_PTR(-EINVAL
);
388 mapped
= devm_ioremap(&pdev
->dev
, res
->start
+ offset
,
389 resource_size(res
) - offset
);
391 dev_err(&pdev
->dev
, "Failed to map resource: %s\n", res_name
);
396 static void th1520_disable_clk(void *data
)
398 struct th1520_mbox_priv
*priv
= data
;
400 clk_bulk_disable_unprepare(ARRAY_SIZE(priv
->clocks
), priv
->clocks
);
403 static int th1520_mbox_probe(struct platform_device
*pdev
)
405 struct device
*dev
= &pdev
->dev
;
406 struct th1520_mbox_priv
*priv
;
407 unsigned int remote_idx
= 0;
411 priv
= devm_kzalloc(dev
, sizeof(*priv
), GFP_KERNEL
);
417 priv
->clocks
[0].id
= "clk-local";
418 priv
->clocks
[1].id
= "clk-remote-icu0";
419 priv
->clocks
[2].id
= "clk-remote-icu1";
420 priv
->clocks
[3].id
= "clk-remote-icu2";
422 ret
= devm_clk_bulk_get(dev
, ARRAY_SIZE(priv
->clocks
),
425 dev_err(dev
, "Failed to get clocks\n");
429 ret
= clk_bulk_prepare_enable(ARRAY_SIZE(priv
->clocks
), priv
->clocks
);
431 dev_err(dev
, "Failed to enable clocks\n");
435 ret
= devm_add_action_or_reset(dev
, th1520_disable_clk
, priv
);
437 clk_bulk_disable_unprepare(ARRAY_SIZE(priv
->clocks
), priv
->clocks
);
442 * The address mappings in the device tree align precisely with those
443 * outlined in the manual. However, register offsets within these
444 * mapped regions are irregular, particularly for remote-icu0.
445 * Consequently, th1520_map_mmio() requires an additional parameter to
448 priv
->local_icu
[TH_1520_MBOX_ICU_KERNEL_CPU0
] =
449 th1520_map_mmio(pdev
, "local", 0x0);
450 if (IS_ERR(priv
->local_icu
[TH_1520_MBOX_ICU_KERNEL_CPU0
]))
451 return PTR_ERR(priv
->local_icu
[TH_1520_MBOX_ICU_KERNEL_CPU0
]);
453 priv
->remote_icu
[0] = th1520_map_mmio(pdev
, "remote-icu0", 0x4000);
454 if (IS_ERR(priv
->remote_icu
[0]))
455 return PTR_ERR(priv
->remote_icu
[0]);
457 priv
->remote_icu
[1] = th1520_map_mmio(pdev
, "remote-icu1", 0x0);
458 if (IS_ERR(priv
->remote_icu
[1]))
459 return PTR_ERR(priv
->remote_icu
[1]);
461 priv
->remote_icu
[2] = th1520_map_mmio(pdev
, "remote-icu2", 0x0);
462 if (IS_ERR(priv
->remote_icu
[2]))
463 return PTR_ERR(priv
->remote_icu
[2]);
465 priv
->local_icu
[TH_1520_MBOX_ICU_CPU1
] =
466 priv
->local_icu
[TH_1520_MBOX_ICU_KERNEL_CPU0
] +
467 TH_1520_MBOX_CHAN_RES_SIZE
;
468 priv
->local_icu
[TH_1520_MBOX_ICU_CPU2
] =
469 priv
->local_icu
[TH_1520_MBOX_ICU_CPU1
] +
470 TH_1520_MBOX_CHAN_RES_SIZE
;
471 priv
->local_icu
[TH_1520_MBOX_ICU_CPU3
] =
472 priv
->local_icu
[TH_1520_MBOX_ICU_CPU2
] +
473 TH_1520_MBOX_CHAN_RES_SIZE
;
475 priv
->cur_cpu_ch_base
= priv
->local_icu
[TH_1520_MBOX_ICU_KERNEL_CPU0
];
477 priv
->irq
= platform_get_irq(pdev
, 0);
482 for (i
= 0; i
< TH_1520_MBOX_CHANS
; i
++) {
483 struct th1520_mbox_con_priv
*cp
= &priv
->con_priv
[i
];
486 cp
->chan
= &priv
->mbox_chans
[i
];
487 priv
->mbox_chans
[i
].con_priv
= cp
;
488 snprintf(cp
->irq_desc
, sizeof(cp
->irq_desc
),
489 "th1520_mbox_chan[%i]", cp
->idx
);
491 cp
->comm_local_base
= priv
->local_icu
[i
];
492 if (i
!= TH_1520_MBOX_ICU_KERNEL_CPU0
) {
493 cp
->comm_remote_base
= priv
->remote_icu
[remote_idx
];
498 spin_lock_init(&priv
->mbox_lock
);
500 priv
->mbox
.dev
= dev
;
501 priv
->mbox
.ops
= &th1520_mbox_ops
;
502 priv
->mbox
.chans
= priv
->mbox_chans
;
503 priv
->mbox
.num_chans
= TH_1520_MBOX_CHANS
;
504 priv
->mbox
.of_xlate
= th1520_mbox_xlate
;
505 priv
->mbox
.txdone_irq
= true;
507 platform_set_drvdata(pdev
, priv
);
509 ret
= th1520_mbox_init_generic(priv
);
511 dev_err(dev
, "Failed to init mailbox context\n");
515 return devm_mbox_controller_register(dev
, &priv
->mbox
);
518 static const struct of_device_id th1520_mbox_dt_ids
[] = {
519 { .compatible
= "thead,th1520-mbox" },
522 MODULE_DEVICE_TABLE(of
, th1520_mbox_dt_ids
);
524 #ifdef CONFIG_PM_SLEEP
525 static int __maybe_unused
th1520_mbox_suspend_noirq(struct device
*dev
)
527 struct th1520_mbox_priv
*priv
= dev_get_drvdata(dev
);
528 struct th1520_mbox_context
*ctx
= priv
->ctx
;
531 * ONLY interrupt mask bit should be stored and restores.
532 * INFO data all assumed to be lost.
534 for (i
= 0; i
< TH_1520_MBOX_CHANS
; i
++) {
536 ioread32(priv
->local_icu
[i
] + TH_1520_MBOX_MASK
);
541 static int __maybe_unused
th1520_mbox_resume_noirq(struct device
*dev
)
543 struct th1520_mbox_priv
*priv
= dev_get_drvdata(dev
);
544 struct th1520_mbox_context
*ctx
= priv
->ctx
;
547 for (i
= 0; i
< TH_1520_MBOX_CHANS
; i
++) {
548 iowrite32(ctx
->intr_mask
[i
],
549 priv
->local_icu
[i
] + TH_1520_MBOX_MASK
);
556 static int __maybe_unused
th1520_mbox_runtime_suspend(struct device
*dev
)
558 struct th1520_mbox_priv
*priv
= dev_get_drvdata(dev
);
560 clk_bulk_disable_unprepare(ARRAY_SIZE(priv
->clocks
), priv
->clocks
);
565 static int __maybe_unused
th1520_mbox_runtime_resume(struct device
*dev
)
567 struct th1520_mbox_priv
*priv
= dev_get_drvdata(dev
);
570 ret
= clk_bulk_prepare_enable(ARRAY_SIZE(priv
->clocks
), priv
->clocks
);
572 dev_err(dev
, "Failed to enable clocks in runtime resume\n");
577 static const struct dev_pm_ops th1520_mbox_pm_ops
= {
578 #ifdef CONFIG_PM_SLEEP
579 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(th1520_mbox_suspend_noirq
,
580 th1520_mbox_resume_noirq
)
582 SET_RUNTIME_PM_OPS(th1520_mbox_runtime_suspend
,
583 th1520_mbox_runtime_resume
, NULL
)
586 static struct platform_driver th1520_mbox_driver
= {
587 .probe
= th1520_mbox_probe
,
589 .name
= "th1520-mbox",
590 .of_match_table
= th1520_mbox_dt_ids
,
591 .pm
= &th1520_mbox_pm_ops
,
594 module_platform_driver(th1520_mbox_driver
);
596 MODULE_DESCRIPTION("Thead TH-1520 mailbox IPC driver");
597 MODULE_LICENSE("GPL");