1 // SPDX-License-Identifier: GPL-2.0-only
3 * TI K3 Cortex-M4 Remote Processor(s) driver
5 * Copyright (C) 2021-2024 Texas Instruments Incorporated - https://www.ti.com/
6 * Hari Nagalla <hnagalla@ti.com>
10 #include <linux/mailbox_client.h>
11 #include <linux/module.h>
12 #include <linux/of_address.h>
13 #include <linux/of_reserved_mem.h>
14 #include <linux/platform_device.h>
15 #include <linux/remoteproc.h>
16 #include <linux/reset.h>
17 #include <linux/slab.h>
19 #include "omap_remoteproc.h"
20 #include "remoteproc_internal.h"
21 #include "ti_sci_proc.h"
23 #define K3_M4_IRAM_DEV_ADDR 0x00000
24 #define K3_M4_DRAM_DEV_ADDR 0x30000
27 * struct k3_m4_rproc_mem - internal memory structure
28 * @cpu_addr: MPU virtual address of the memory region
29 * @bus_addr: Bus address used to access the memory region
30 * @dev_addr: Device address of the memory region from remote processor view
31 * @size: Size of the memory region
33 struct k3_m4_rproc_mem
{
34 void __iomem
*cpu_addr
;
41 * struct k3_m4_rproc_mem_data - memory definitions for a remote processor
42 * @name: name for this memory entry
43 * @dev_addr: device address for the memory entry
45 struct k3_m4_rproc_mem_data
{
51 * struct k3_m4_rproc - k3 remote processor driver structure
52 * @dev: cached device pointer
53 * @mem: internal memory regions data
54 * @num_mems: number of internal memory regions
55 * @rmem: reserved memory regions data
56 * @num_rmems: number of reserved memory regions
57 * @reset: reset control handle
58 * @tsp: TI-SCI processor control handle
59 * @ti_sci: TI-SCI handle
60 * @ti_sci_id: TI-SCI device identifier
61 * @mbox: mailbox channel handle
62 * @client: mailbox client to request the mailbox channel
66 struct k3_m4_rproc_mem
*mem
;
68 struct k3_m4_rproc_mem
*rmem
;
70 struct reset_control
*reset
;
71 struct ti_sci_proc
*tsp
;
72 const struct ti_sci_handle
*ti_sci
;
74 struct mbox_chan
*mbox
;
75 struct mbox_client client
;
79 * k3_m4_rproc_mbox_callback() - inbound mailbox message handler
80 * @client: mailbox client pointer used for requesting the mailbox channel
81 * @data: mailbox payload
83 * This handler is invoked by the K3 mailbox driver whenever a mailbox
84 * message is received. Usually, the mailbox payload simply contains
85 * the index of the virtqueue that is kicked by the remote processor,
86 * and we let remoteproc core handle it.
88 * In addition to virtqueue indices, we also have some out-of-band values
89 * that indicate different events. Those values are deliberately very
90 * large so they don't coincide with virtqueue indices.
92 static void k3_m4_rproc_mbox_callback(struct mbox_client
*client
, void *data
)
94 struct device
*dev
= client
->dev
;
95 struct rproc
*rproc
= dev_get_drvdata(dev
);
96 u32 msg
= (u32
)(uintptr_t)(data
);
98 dev_dbg(dev
, "mbox msg: 0x%x\n", msg
);
103 * remoteproc detected an exception, but error recovery is not
104 * supported. So, just log this for now
106 dev_err(dev
, "K3 rproc %s crashed\n", rproc
->name
);
108 case RP_MBOX_ECHO_REPLY
:
109 dev_info(dev
, "received echo reply from %s\n", rproc
->name
);
112 /* silently handle all other valid messages */
113 if (msg
>= RP_MBOX_READY
&& msg
< RP_MBOX_END_MSG
)
115 if (msg
> rproc
->max_notifyid
) {
116 dev_dbg(dev
, "dropping unknown message 0x%x", msg
);
119 /* msg contains the index of the triggered vring */
120 if (rproc_vq_interrupt(rproc
, msg
) == IRQ_NONE
)
121 dev_dbg(dev
, "no message was found in vqid %d\n", msg
);
126 * Kick the remote processor to notify about pending unprocessed messages.
127 * The vqid usage is not used and is inconsequential, as the kick is performed
128 * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
129 * the remote processor is expected to process both its Tx and Rx virtqueues.
131 static void k3_m4_rproc_kick(struct rproc
*rproc
, int vqid
)
133 struct k3_m4_rproc
*kproc
= rproc
->priv
;
134 struct device
*dev
= kproc
->dev
;
139 * Send the index of the triggered virtqueue in the mailbox payload.
140 * NOTE: msg is cast to uintptr_t to prevent compiler warnings when
141 * void* is 64bit. It is safely cast back to u32 in the mailbox driver.
143 ret
= mbox_send_message(kproc
->mbox
, (void *)(uintptr_t)msg
);
145 dev_err(dev
, "failed to send mailbox message, status = %d\n",
149 static int k3_m4_rproc_ping_mbox(struct k3_m4_rproc
*kproc
)
151 struct device
*dev
= kproc
->dev
;
155 * Ping the remote processor, this is only for sanity-sake for now;
156 * there is no functional effect whatsoever.
158 * Note that the reply will _not_ arrive immediately: this message
159 * will wait in the mailbox fifo until the remote processor is booted.
161 ret
= mbox_send_message(kproc
->mbox
, (void *)RP_MBOX_ECHO_REQUEST
);
163 dev_err(dev
, "mbox_send_message failed: %d\n", ret
);
171 * The M4 cores have a local reset that affects only the CPU, and a
172 * generic module reset that powers on the device and allows the internal
173 * memories to be accessed while the local reset is asserted. This function is
174 * used to release the global reset on remote cores to allow loading into the
175 * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
176 * firmware loading, and is followed by the .start() ops after loading to
177 * actually let the remote cores to run.
179 static int k3_m4_rproc_prepare(struct rproc
*rproc
)
181 struct k3_m4_rproc
*kproc
= rproc
->priv
;
182 struct device
*dev
= kproc
->dev
;
185 /* If the core is running already no need to deassert the module reset */
186 if (rproc
->state
== RPROC_DETACHED
)
190 * Ensure the local reset is asserted so the core doesn't
191 * execute bogus code when the module reset is released.
193 ret
= reset_control_assert(kproc
->reset
);
195 dev_err(dev
, "could not assert local reset\n");
199 ret
= reset_control_status(kproc
->reset
);
201 dev_err(dev
, "local reset still not asserted\n");
205 ret
= kproc
->ti_sci
->ops
.dev_ops
.get_device(kproc
->ti_sci
,
208 dev_err(dev
, "could not deassert module-reset for internal RAM loading\n");
216 * This function implements the .unprepare() ops and performs the complimentary
217 * operations to that of the .prepare() ops. The function is used to assert the
218 * global reset on applicable cores. This completes the second portion of
219 * powering down the remote core. The cores themselves are only halted in the
220 * .stop() callback through the local reset, and the .unprepare() ops is invoked
221 * by the remoteproc core after the remoteproc is stopped to balance the global
224 static int k3_m4_rproc_unprepare(struct rproc
*rproc
)
226 struct k3_m4_rproc
*kproc
= rproc
->priv
;
227 struct device
*dev
= kproc
->dev
;
230 /* If the core is going to be detached do not assert the module reset */
231 if (rproc
->state
== RPROC_ATTACHED
)
234 ret
= kproc
->ti_sci
->ops
.dev_ops
.put_device(kproc
->ti_sci
,
237 dev_err(dev
, "module-reset assert failed\n");
245 * This function implements the .get_loaded_rsc_table() callback and is used
246 * to provide the resource table for a booted remote processor in IPC-only
247 * mode. The remote processor firmwares follow a design-by-contract approach
248 * and are expected to have the resource table at the base of the DDR region
249 * reserved for firmware usage. This provides flexibility for the remote
250 * processor to be booted by different bootloaders that may or may not have the
251 * ability to publish the resource table address and size through a DT
254 static struct resource_table
*k3_m4_get_loaded_rsc_table(struct rproc
*rproc
,
255 size_t *rsc_table_sz
)
257 struct k3_m4_rproc
*kproc
= rproc
->priv
;
258 struct device
*dev
= kproc
->dev
;
260 if (!kproc
->rmem
[0].cpu_addr
) {
261 dev_err(dev
, "memory-region #1 does not exist, loaded rsc table can't be found");
262 return ERR_PTR(-ENOMEM
);
266 * NOTE: The resource table size is currently hard-coded to a maximum
267 * of 256 bytes. The most common resource table usage for K3 firmwares
268 * is to only have the vdev resource entry and an optional trace entry.
269 * The exact size could be computed based on resource table address, but
270 * the hard-coded value suffices to support the IPC-only mode.
273 return (__force
struct resource_table
*)kproc
->rmem
[0].cpu_addr
;
277 * Custom function to translate a remote processor device address (internal
278 * RAMs only) to a kernel virtual address. The remote processors can access
279 * their RAMs at either an internal address visible only from a remote
280 * processor, or at the SoC-level bus address. Both these addresses need to be
281 * looked through for translation. The translated addresses can be used either
282 * by the remoteproc core for loading (when using kernel remoteproc loader), or
283 * by any rpmsg bus drivers.
285 static void *k3_m4_rproc_da_to_va(struct rproc
*rproc
, u64 da
, size_t len
, bool *is_iomem
)
287 struct k3_m4_rproc
*kproc
= rproc
->priv
;
288 void __iomem
*va
= NULL
;
289 phys_addr_t bus_addr
;
290 u32 dev_addr
, offset
;
297 for (i
= 0; i
< kproc
->num_mems
; i
++) {
298 bus_addr
= kproc
->mem
[i
].bus_addr
;
299 dev_addr
= kproc
->mem
[i
].dev_addr
;
300 size
= kproc
->mem
[i
].size
;
302 /* handle M4-view addresses */
303 if (da
>= dev_addr
&& ((da
+ len
) <= (dev_addr
+ size
))) {
304 offset
= da
- dev_addr
;
305 va
= kproc
->mem
[i
].cpu_addr
+ offset
;
306 return (__force
void *)va
;
309 /* handle SoC-view addresses */
310 if (da
>= bus_addr
&& ((da
+ len
) <= (bus_addr
+ size
))) {
311 offset
= da
- bus_addr
;
312 va
= kproc
->mem
[i
].cpu_addr
+ offset
;
313 return (__force
void *)va
;
317 /* handle static DDR reserved memory regions */
318 for (i
= 0; i
< kproc
->num_rmems
; i
++) {
319 dev_addr
= kproc
->rmem
[i
].dev_addr
;
320 size
= kproc
->rmem
[i
].size
;
322 if (da
>= dev_addr
&& ((da
+ len
) <= (dev_addr
+ size
))) {
323 offset
= da
- dev_addr
;
324 va
= kproc
->rmem
[i
].cpu_addr
+ offset
;
325 return (__force
void *)va
;
332 static int k3_m4_rproc_of_get_memories(struct platform_device
*pdev
,
333 struct k3_m4_rproc
*kproc
)
335 static const char * const mem_names
[] = { "iram", "dram" };
336 static const u32 mem_addrs
[] = { K3_M4_IRAM_DEV_ADDR
, K3_M4_DRAM_DEV_ADDR
};
337 struct device
*dev
= &pdev
->dev
;
338 struct resource
*res
;
342 num_mems
= ARRAY_SIZE(mem_names
);
343 kproc
->mem
= devm_kcalloc(kproc
->dev
, num_mems
,
344 sizeof(*kproc
->mem
), GFP_KERNEL
);
348 for (i
= 0; i
< num_mems
; i
++) {
349 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
352 dev_err(dev
, "found no memory resource for %s\n",
356 if (!devm_request_mem_region(dev
, res
->start
,
359 dev_err(dev
, "could not request %s region for resource\n",
364 kproc
->mem
[i
].cpu_addr
= devm_ioremap_wc(dev
, res
->start
,
366 if (!kproc
->mem
[i
].cpu_addr
) {
367 dev_err(dev
, "failed to map %s memory\n",
371 kproc
->mem
[i
].bus_addr
= res
->start
;
372 kproc
->mem
[i
].dev_addr
= mem_addrs
[i
];
373 kproc
->mem
[i
].size
= resource_size(res
);
375 dev_dbg(dev
, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
376 mem_names
[i
], &kproc
->mem
[i
].bus_addr
,
377 kproc
->mem
[i
].size
, kproc
->mem
[i
].cpu_addr
,
378 kproc
->mem
[i
].dev_addr
);
380 kproc
->num_mems
= num_mems
;
385 static void k3_m4_rproc_dev_mem_release(void *data
)
387 struct device
*dev
= data
;
389 of_reserved_mem_device_release(dev
);
392 static int k3_m4_reserved_mem_init(struct k3_m4_rproc
*kproc
)
394 struct device
*dev
= kproc
->dev
;
395 struct device_node
*np
= dev
->of_node
;
396 struct device_node
*rmem_np
;
397 struct reserved_mem
*rmem
;
401 num_rmems
= of_property_count_elems_of_size(np
, "memory-region",
404 dev_err(dev
, "device does not reserved memory regions (%d)\n",
409 dev_err(dev
, "device needs at least two memory regions to be defined, num = %d\n",
414 /* use reserved memory region 0 for vring DMA allocations */
415 ret
= of_reserved_mem_device_init_by_idx(dev
, np
, 0);
417 dev_err(dev
, "device cannot initialize DMA pool (%d)\n", ret
);
420 ret
= devm_add_action_or_reset(dev
, k3_m4_rproc_dev_mem_release
, dev
);
425 kproc
->rmem
= devm_kcalloc(dev
, num_rmems
, sizeof(*kproc
->rmem
), GFP_KERNEL
);
429 /* use remaining reserved memory regions for static carveouts */
430 for (i
= 0; i
< num_rmems
; i
++) {
431 rmem_np
= of_parse_phandle(np
, "memory-region", i
+ 1);
435 rmem
= of_reserved_mem_lookup(rmem_np
);
436 of_node_put(rmem_np
);
440 kproc
->rmem
[i
].bus_addr
= rmem
->base
;
441 /* 64-bit address regions currently not supported */
442 kproc
->rmem
[i
].dev_addr
= (u32
)rmem
->base
;
443 kproc
->rmem
[i
].size
= rmem
->size
;
444 kproc
->rmem
[i
].cpu_addr
= devm_ioremap_wc(dev
, rmem
->base
, rmem
->size
);
445 if (!kproc
->rmem
[i
].cpu_addr
) {
446 dev_err(dev
, "failed to map reserved memory#%d at %pa of size %pa\n",
447 i
+ 1, &rmem
->base
, &rmem
->size
);
451 dev_dbg(dev
, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
452 i
+ 1, &kproc
->rmem
[i
].bus_addr
,
453 kproc
->rmem
[i
].size
, kproc
->rmem
[i
].cpu_addr
,
454 kproc
->rmem
[i
].dev_addr
);
456 kproc
->num_rmems
= num_rmems
;
461 static void k3_m4_release_tsp(void *data
)
463 struct ti_sci_proc
*tsp
= data
;
465 ti_sci_proc_release(tsp
);
469 * Power up the M4 remote processor.
471 * This function will be invoked only after the firmware for this rproc
472 * was loaded, parsed successfully, and all of its resource requirements
473 * were met. This callback is invoked only in remoteproc mode.
475 static int k3_m4_rproc_start(struct rproc
*rproc
)
477 struct k3_m4_rproc
*kproc
= rproc
->priv
;
478 struct device
*dev
= kproc
->dev
;
481 ret
= k3_m4_rproc_ping_mbox(kproc
);
485 ret
= reset_control_deassert(kproc
->reset
);
487 dev_err(dev
, "local-reset deassert failed, ret = %d\n", ret
);
495 * Stop the M4 remote processor.
497 * This function puts the M4 processor into reset, and finishes processing
498 * of any pending messages. This callback is invoked only in remoteproc mode.
500 static int k3_m4_rproc_stop(struct rproc
*rproc
)
502 struct k3_m4_rproc
*kproc
= rproc
->priv
;
503 struct device
*dev
= kproc
->dev
;
506 ret
= reset_control_assert(kproc
->reset
);
508 dev_err(dev
, "local-reset assert failed, ret = %d\n", ret
);
516 * Attach to a running M4 remote processor (IPC-only mode)
518 * The remote processor is already booted, so there is no need to issue any
519 * TI-SCI commands to boot the M4 core. This callback is used only in IPC-only
522 static int k3_m4_rproc_attach(struct rproc
*rproc
)
524 struct k3_m4_rproc
*kproc
= rproc
->priv
;
527 ret
= k3_m4_rproc_ping_mbox(kproc
);
535 * Detach from a running M4 remote processor (IPC-only mode)
537 * This rproc detach callback performs the opposite operation to attach
538 * callback, the M4 core is not stopped and will be left to continue to
539 * run its booted firmware. This callback is invoked only in IPC-only mode.
541 static int k3_m4_rproc_detach(struct rproc
*rproc
)
546 static const struct rproc_ops k3_m4_rproc_ops
= {
547 .prepare
= k3_m4_rproc_prepare
,
548 .unprepare
= k3_m4_rproc_unprepare
,
549 .start
= k3_m4_rproc_start
,
550 .stop
= k3_m4_rproc_stop
,
551 .attach
= k3_m4_rproc_attach
,
552 .detach
= k3_m4_rproc_detach
,
553 .kick
= k3_m4_rproc_kick
,
554 .da_to_va
= k3_m4_rproc_da_to_va
,
555 .get_loaded_rsc_table
= k3_m4_get_loaded_rsc_table
,
558 static int k3_m4_rproc_probe(struct platform_device
*pdev
)
560 struct device
*dev
= &pdev
->dev
;
561 struct k3_m4_rproc
*kproc
;
564 bool r_state
= false;
565 bool p_state
= false;
568 ret
= rproc_of_parse_firmware(dev
, 0, &fw_name
);
570 return dev_err_probe(dev
, ret
, "failed to parse firmware-name property\n");
572 rproc
= devm_rproc_alloc(dev
, dev_name(dev
), &k3_m4_rproc_ops
, fw_name
,
577 rproc
->has_iommu
= false;
578 rproc
->recovery_disabled
= true;
581 platform_set_drvdata(pdev
, rproc
);
583 kproc
->ti_sci
= devm_ti_sci_get_by_phandle(dev
, "ti,sci");
584 if (IS_ERR(kproc
->ti_sci
))
585 return dev_err_probe(dev
, PTR_ERR(kproc
->ti_sci
),
586 "failed to get ti-sci handle\n");
588 ret
= of_property_read_u32(dev
->of_node
, "ti,sci-dev-id", &kproc
->ti_sci_id
);
590 return dev_err_probe(dev
, ret
, "missing 'ti,sci-dev-id' property\n");
592 kproc
->reset
= devm_reset_control_get_exclusive(dev
, NULL
);
593 if (IS_ERR(kproc
->reset
))
594 return dev_err_probe(dev
, PTR_ERR(kproc
->reset
), "failed to get reset\n");
596 kproc
->tsp
= ti_sci_proc_of_get_tsp(dev
, kproc
->ti_sci
);
597 if (IS_ERR(kproc
->tsp
))
598 return dev_err_probe(dev
, PTR_ERR(kproc
->tsp
),
599 "failed to construct ti-sci proc control\n");
601 ret
= ti_sci_proc_request(kproc
->tsp
);
603 return dev_err_probe(dev
, ret
, "ti_sci_proc_request failed\n");
604 ret
= devm_add_action_or_reset(dev
, k3_m4_release_tsp
, kproc
->tsp
);
608 ret
= k3_m4_rproc_of_get_memories(pdev
, kproc
);
612 ret
= k3_m4_reserved_mem_init(kproc
);
614 return dev_err_probe(dev
, ret
, "reserved memory init failed\n");
616 ret
= kproc
->ti_sci
->ops
.dev_ops
.is_on(kproc
->ti_sci
, kproc
->ti_sci_id
,
619 return dev_err_probe(dev
, ret
,
620 "failed to get initial state, mode cannot be determined\n");
622 /* configure devices for either remoteproc or IPC-only mode */
624 rproc
->state
= RPROC_DETACHED
;
625 dev_info(dev
, "configured M4F for IPC-only mode\n");
627 dev_info(dev
, "configured M4F for remoteproc mode\n");
630 kproc
->client
.dev
= dev
;
631 kproc
->client
.tx_done
= NULL
;
632 kproc
->client
.rx_callback
= k3_m4_rproc_mbox_callback
;
633 kproc
->client
.tx_block
= false;
634 kproc
->client
.knows_txdone
= false;
635 kproc
->mbox
= mbox_request_channel(&kproc
->client
, 0);
636 if (IS_ERR(kproc
->mbox
))
637 return dev_err_probe(dev
, PTR_ERR(kproc
->mbox
),
638 "mbox_request_channel failed\n");
640 ret
= devm_rproc_add(dev
, rproc
);
642 return dev_err_probe(dev
, ret
,
643 "failed to register device with remoteproc core\n");
648 static const struct of_device_id k3_m4_of_match
[] = {
649 { .compatible
= "ti,am64-m4fss", },
652 MODULE_DEVICE_TABLE(of
, k3_m4_of_match
);
654 static struct platform_driver k3_m4_rproc_driver
= {
655 .probe
= k3_m4_rproc_probe
,
657 .name
= "k3-m4-rproc",
658 .of_match_table
= k3_m4_of_match
,
661 module_platform_driver(k3_m4_rproc_driver
);
663 MODULE_AUTHOR("Hari Nagalla <hnagalla@ti.com>");
664 MODULE_DESCRIPTION("TI K3 M4 Remoteproc driver");
665 MODULE_LICENSE("GPL");