1 // SPDX-License-Identifier: GPL-2.0
3 * ZynqMP R5 Remote Processor driver
7 #include <dt-bindings/power/xlnx-zynqmp-power.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/firmware/xlnx-zynqmp.h>
10 #include <linux/kernel.h>
11 #include <linux/mailbox_client.h>
12 #include <linux/mailbox/zynqmp-ipi-message.h>
13 #include <linux/module.h>
14 #include <linux/of_address.h>
15 #include <linux/of_platform.h>
16 #include <linux/of_reserved_mem.h>
17 #include <linux/platform_device.h>
18 #include <linux/remoteproc.h>
20 #include "remoteproc_internal.h"
22 /* IPI buffer MAX length */
23 #define IPI_BUF_LEN_MAX 32U
25 /* RX mailbox client buffer max length */
26 #define MBOX_CLIENT_BUF_MAX (IPI_BUF_LEN_MAX + \
27 sizeof(struct zynqmp_ipi_message))
29 #define RSC_TBL_XLNX_MAGIC ((uint32_t)'x' << 24 | (uint32_t)'a' << 16 | \
30 (uint32_t)'m' << 8 | (uint32_t)'p')
33 * settings for RPU cluster mode which
34 * reflects possible values of xlnx,cluster-mode dt-property
36 enum zynqmp_r5_cluster_mode
{
37 SPLIT_MODE
= 0, /* When cores run as separate processor */
38 LOCKSTEP_MODE
= 1, /* cores execute same code in lockstep,clk-for-clk */
39 SINGLE_CPU_MODE
= 2, /* core0 is held in reset and only core1 runs */
43 * struct mem_bank_data - Memory Bank description
45 * @addr: Start address of memory bank
47 * @size: Size of Memory bank
48 * @pm_domain_id: Power-domains id of memory bank for firmware to turn on/off
49 * @bank_name: name of the bank for remoteproc framework
51 struct mem_bank_data
{
60 * struct zynqmp_sram_bank - sram bank description
62 * @sram_res: sram address region information
63 * @da: device address of sram
65 struct zynqmp_sram_bank
{
66 struct resource sram_res
;
73 * @rx_mc_buf: to copy data from mailbox rx channel
74 * @tx_mc_buf: to copy data to mailbox tx channel
75 * @r5_core: this mailbox's corresponding r5_core pointer
76 * @mbox_work: schedule work after receiving data from mailbox
77 * @mbox_cl: mailbox client
78 * @tx_chan: mailbox tx channel
79 * @rx_chan: mailbox rx channel
82 unsigned char rx_mc_buf
[MBOX_CLIENT_BUF_MAX
];
83 unsigned char tx_mc_buf
[MBOX_CLIENT_BUF_MAX
];
84 struct zynqmp_r5_core
*r5_core
;
85 struct work_struct mbox_work
;
86 struct mbox_client mbox_cl
;
87 struct mbox_chan
*tx_chan
;
88 struct mbox_chan
*rx_chan
;
94 * Platform specific data structure used to sync resource table address.
95 * It's important to maintain order and size of each field on remote side.
97 * @version: version of data structure
98 * @magic_num: 32-bit magic number.
99 * @comp_magic_num: complement of above magic number
100 * @rsc_tbl_size: resource table size
101 * @rsc_tbl: resource table address
103 struct rsc_tbl_data
{
106 const u32 comp_magic_num
;
107 const u32 rsc_tbl_size
;
108 const uintptr_t rsc_tbl
;
112 * Hardcoded TCM bank values. This will stay in driver to maintain backward
113 * compatibility with device-tree that does not have TCM information.
115 static const struct mem_bank_data zynqmp_tcm_banks_split
[] = {
116 {0xffe00000UL
, 0x0, 0x10000UL
, PD_R5_0_ATCM
, "atcm0"}, /* TCM 64KB each */
117 {0xffe20000UL
, 0x20000, 0x10000UL
, PD_R5_0_BTCM
, "btcm0"},
118 {0xffe90000UL
, 0x0, 0x10000UL
, PD_R5_1_ATCM
, "atcm1"},
119 {0xffeb0000UL
, 0x20000, 0x10000UL
, PD_R5_1_BTCM
, "btcm1"},
122 /* In lockstep mode cluster uses each 64KB TCM from second core as well */
123 static const struct mem_bank_data zynqmp_tcm_banks_lockstep
[] = {
124 {0xffe00000UL
, 0x0, 0x10000UL
, PD_R5_0_ATCM
, "atcm0"}, /* TCM 64KB each */
125 {0xffe20000UL
, 0x20000, 0x10000UL
, PD_R5_0_BTCM
, "btcm0"},
126 {0xffe10000UL
, 0x10000, 0x10000UL
, PD_R5_1_ATCM
, "atcm1"},
127 {0xffe30000UL
, 0x30000, 0x10000UL
, PD_R5_1_BTCM
, "btcm1"},
131 * struct zynqmp_r5_core
133 * @rsc_tbl_va: resource table virtual address
134 * @sram: Array of sram memories assigned to this core
135 * @num_sram: number of sram for this core
136 * @dev: device of RPU instance
137 * @np: device node of RPU instance
138 * @tcm_bank_count: number TCM banks accessible to this RPU
139 * @tcm_banks: array of each TCM bank data
140 * @rproc: rproc handle
141 * @rsc_tbl_size: resource table size retrieved from remote
142 * @pm_domain_id: RPU CPU power domain id
143 * @ipi: pointer to mailbox information
145 struct zynqmp_r5_core
{
146 void __iomem
*rsc_tbl_va
;
147 struct zynqmp_sram_bank
*sram
;
150 struct device_node
*np
;
152 struct mem_bank_data
**tcm_banks
;
156 struct mbox_info
*ipi
;
160 * struct zynqmp_r5_cluster
162 * @dev: r5f subsystem cluster device node
163 * @mode: cluster mode of type zynqmp_r5_cluster_mode
164 * @core_count: number of r5 cores used for this cluster mode
165 * @r5_cores: Array of pointers pointing to r5 core
167 struct zynqmp_r5_cluster
{
169 enum zynqmp_r5_cluster_mode mode
;
171 struct zynqmp_r5_core
**r5_cores
;
175 * event_notified_idr_cb() - callback for vq_interrupt per notifyid
176 * @id: rproc->notify id
177 * @ptr: pointer to idr private data
178 * @data: data passed to idr_for_each callback
180 * Pass notification to remoteproc virtio
182 * Return: 0. having return is to satisfy the idr_for_each() function
183 * pointer input argument requirement.
185 static int event_notified_idr_cb(int id
, void *ptr
, void *data
)
187 struct rproc
*rproc
= data
;
189 if (rproc_vq_interrupt(rproc
, id
) == IRQ_NONE
)
190 dev_dbg(&rproc
->dev
, "data not found for vqid=%d\n", id
);
196 * handle_event_notified() - remoteproc notification work function
197 * @work: pointer to the work structure
199 * It checks each registered remoteproc notify IDs.
201 static void handle_event_notified(struct work_struct
*work
)
203 struct mbox_info
*ipi
;
206 ipi
= container_of(work
, struct mbox_info
, mbox_work
);
207 rproc
= ipi
->r5_core
->rproc
;
210 * We only use IPI for interrupt. The RPU firmware side may or may
211 * not write the notifyid when it trigger IPI.
212 * And thus, we scan through all the registered notifyids and
213 * find which one is valid to get the message.
214 * Even if message from firmware is NULL, we attempt to get vqid
216 idr_for_each(&rproc
->notifyids
, event_notified_idr_cb
, rproc
);
220 * zynqmp_r5_mb_rx_cb() - receive channel mailbox callback
221 * @cl: mailbox client
222 * @msg: message pointer
224 * Receive data from ipi buffer, ack interrupt and then
225 * it will schedule the R5 notification work.
227 static void zynqmp_r5_mb_rx_cb(struct mbox_client
*cl
, void *msg
)
229 struct zynqmp_ipi_message
*ipi_msg
, *buf_msg
;
230 struct mbox_info
*ipi
;
233 ipi
= container_of(cl
, struct mbox_info
, mbox_cl
);
235 /* copy data from ipi buffer to r5_core */
236 ipi_msg
= (struct zynqmp_ipi_message
*)msg
;
237 buf_msg
= (struct zynqmp_ipi_message
*)ipi
->rx_mc_buf
;
239 if (len
> IPI_BUF_LEN_MAX
) {
240 dev_warn(cl
->dev
, "msg size exceeded than %d\n",
242 len
= IPI_BUF_LEN_MAX
;
245 memcpy(buf_msg
->data
, ipi_msg
->data
, len
);
247 /* received and processed interrupt ack */
248 if (mbox_send_message(ipi
->rx_chan
, NULL
) < 0)
249 dev_err(cl
->dev
, "ack failed to mbox rx_chan\n");
251 schedule_work(&ipi
->mbox_work
);
255 * zynqmp_r5_setup_mbox() - Setup mailboxes related properties
256 * this is used for each individual R5 core
258 * @cdev: child node device
260 * Function to setup mailboxes related properties
261 * return : NULL if failed else pointer to mbox_info
263 static struct mbox_info
*zynqmp_r5_setup_mbox(struct device
*cdev
)
265 struct mbox_client
*mbox_cl
;
266 struct mbox_info
*ipi
;
268 ipi
= kzalloc(sizeof(*ipi
), GFP_KERNEL
);
272 mbox_cl
= &ipi
->mbox_cl
;
273 mbox_cl
->rx_callback
= zynqmp_r5_mb_rx_cb
;
274 mbox_cl
->tx_block
= false;
275 mbox_cl
->knows_txdone
= false;
276 mbox_cl
->tx_done
= NULL
;
279 /* Request TX and RX channels */
280 ipi
->tx_chan
= mbox_request_channel_byname(mbox_cl
, "tx");
281 if (IS_ERR(ipi
->tx_chan
)) {
284 dev_warn(cdev
, "mbox tx channel request failed\n");
288 ipi
->rx_chan
= mbox_request_channel_byname(mbox_cl
, "rx");
289 if (IS_ERR(ipi
->rx_chan
)) {
290 mbox_free_channel(ipi
->tx_chan
);
294 dev_warn(cdev
, "mbox rx channel request failed\n");
298 INIT_WORK(&ipi
->mbox_work
, handle_event_notified
);
303 static void zynqmp_r5_free_mbox(struct mbox_info
*ipi
)
309 mbox_free_channel(ipi
->tx_chan
);
314 mbox_free_channel(ipi
->rx_chan
);
322 * zynqmp_r5_core_kick() - kick a firmware if mbox is provided
323 * @rproc: r5 core's corresponding rproc structure
324 * @vqid: virtqueue ID
326 static void zynqmp_r5_rproc_kick(struct rproc
*rproc
, int vqid
)
328 struct zynqmp_r5_core
*r5_core
= rproc
->priv
;
329 struct device
*dev
= r5_core
->dev
;
330 struct zynqmp_ipi_message
*mb_msg
;
331 struct mbox_info
*ipi
;
338 mb_msg
= (struct zynqmp_ipi_message
*)ipi
->tx_mc_buf
;
339 memcpy(mb_msg
->data
, &vqid
, sizeof(vqid
));
340 mb_msg
->len
= sizeof(vqid
);
341 ret
= mbox_send_message(ipi
->tx_chan
, mb_msg
);
343 dev_warn(dev
, "failed to send message\n");
347 * zynqmp_r5_rproc_start()
348 * @rproc: single R5 core's corresponding rproc instance
350 * Start R5 Core from designated boot address.
352 * return 0 on success, otherwise non-zero value on failure
354 static int zynqmp_r5_rproc_start(struct rproc
*rproc
)
356 struct zynqmp_r5_core
*r5_core
= rproc
->priv
;
357 enum rpu_boot_mem bootmem
;
361 * The exception vector pointers (EVP) refer to the base-address of
362 * exception vectors (for reset, IRQ, FIQ, etc). The reset-vector
363 * starts at the base-address and subsequent vectors are on 4-byte
366 * Exception vectors can start either from 0x0000_0000 (LOVEC) or
367 * from 0xFFFF_0000 (HIVEC) which is mapped in the OCM (On-Chip Memory)
369 * Usually firmware will put Exception vectors at LOVEC.
371 * It is not recommend that you change the exception vector.
372 * Changing the EVP to HIVEC will result in increased interrupt latency
373 * and jitter. Also, if the OCM is secured and the Cortex-R5F processor
374 * is non-secured, then the Cortex-R5F processor cannot access the
375 * HIVEC exception vectors in the OCM.
377 bootmem
= (rproc
->bootaddr
>= 0xFFFC0000) ?
378 PM_RPU_BOOTMEM_HIVEC
: PM_RPU_BOOTMEM_LOVEC
;
380 dev_dbg(r5_core
->dev
, "RPU boot addr 0x%llx from %s.", rproc
->bootaddr
,
381 bootmem
== PM_RPU_BOOTMEM_HIVEC
? "OCM" : "TCM");
383 ret
= zynqmp_pm_request_wake(r5_core
->pm_domain_id
, 1,
384 bootmem
, ZYNQMP_PM_REQUEST_ACK_NO
);
386 dev_err(r5_core
->dev
,
387 "failed to start RPU = 0x%x\n", r5_core
->pm_domain_id
);
392 * zynqmp_r5_rproc_stop()
393 * @rproc: single R5 core's corresponding rproc instance
395 * Power down R5 Core.
397 * return 0 on success, otherwise non-zero value on failure
399 static int zynqmp_r5_rproc_stop(struct rproc
*rproc
)
401 struct zynqmp_r5_core
*r5_core
= rproc
->priv
;
404 ret
= zynqmp_pm_force_pwrdwn(r5_core
->pm_domain_id
,
405 ZYNQMP_PM_REQUEST_ACK_BLOCKING
);
407 dev_err(r5_core
->dev
, "failed to stop remoteproc RPU %d\n", ret
);
413 * zynqmp_r5_mem_region_map()
414 * @rproc: single R5 core's corresponding rproc instance
415 * @mem: mem descriptor to map reserved memory-regions
417 * Callback to map va for memory-region's carveout.
419 * return 0 on success, otherwise non-zero value on failure
421 static int zynqmp_r5_mem_region_map(struct rproc
*rproc
,
422 struct rproc_mem_entry
*mem
)
426 va
= ioremap_wc(mem
->dma
, mem
->len
);
427 if (IS_ERR_OR_NULL(va
))
430 mem
->va
= (void *)va
;
436 * zynqmp_r5_rproc_mem_unmap
437 * @rproc: single R5 core's corresponding rproc instance
438 * @mem: mem entry to unmap
440 * Unmap memory-region carveout
442 * return: always returns 0
444 static int zynqmp_r5_mem_region_unmap(struct rproc
*rproc
,
445 struct rproc_mem_entry
*mem
)
447 iounmap((void __iomem
*)mem
->va
);
452 * add_mem_regions_carveout()
453 * @rproc: single R5 core's corresponding rproc instance
455 * Construct rproc mem carveouts from memory-region property nodes
457 * return 0 on success, otherwise non-zero value on failure
459 static int add_mem_regions_carveout(struct rproc
*rproc
)
461 struct rproc_mem_entry
*rproc_mem
;
462 struct zynqmp_r5_core
*r5_core
;
463 struct of_phandle_iterator it
;
464 struct reserved_mem
*rmem
;
467 r5_core
= rproc
->priv
;
469 /* Register associated reserved memory regions */
470 of_phandle_iterator_init(&it
, r5_core
->np
, "memory-region", NULL
, 0);
472 while (of_phandle_iterator_next(&it
) == 0) {
473 rmem
= of_reserved_mem_lookup(it
.node
);
475 of_node_put(it
.node
);
476 dev_err(&rproc
->dev
, "unable to acquire memory-region\n");
480 if (!strcmp(it
.node
->name
, "vdev0buffer")) {
481 /* Init reserved memory for vdev buffer */
482 rproc_mem
= rproc_of_resm_mem_entry_init(&rproc
->dev
, i
,
487 /* Register associated reserved memory regions */
488 rproc_mem
= rproc_mem_entry_init(&rproc
->dev
, NULL
,
489 (dma_addr_t
)rmem
->base
,
490 rmem
->size
, rmem
->base
,
491 zynqmp_r5_mem_region_map
,
492 zynqmp_r5_mem_region_unmap
,
497 of_node_put(it
.node
);
501 rproc_add_carveout(rproc
, rproc_mem
);
502 rproc_coredump_add_segment(rproc
, rmem
->base
, rmem
->size
);
504 dev_dbg(&rproc
->dev
, "reserved mem carveout %s addr=%llx, size=0x%llx",
505 it
.node
->name
, rmem
->base
, rmem
->size
);
512 static int add_sram_carveouts(struct rproc
*rproc
)
514 struct zynqmp_r5_core
*r5_core
= rproc
->priv
;
515 struct rproc_mem_entry
*rproc_mem
;
516 struct zynqmp_sram_bank
*sram
;
521 for (i
= 0; i
< r5_core
->num_sram
; i
++) {
522 sram
= &r5_core
->sram
[i
];
524 dma_addr
= (dma_addr_t
)sram
->sram_res
.start
;
526 len
= resource_size(&sram
->sram_res
);
529 rproc_mem
= rproc_mem_entry_init(&rproc
->dev
, NULL
,
532 zynqmp_r5_mem_region_map
,
533 zynqmp_r5_mem_region_unmap
,
534 sram
->sram_res
.name
);
536 dev_err(&rproc
->dev
, "failed to add sram %s da=0x%x, size=0x%lx",
537 sram
->sram_res
.name
, da
, len
);
541 rproc_add_carveout(rproc
, rproc_mem
);
542 rproc_coredump_add_segment(rproc
, da
, len
);
544 dev_dbg(&rproc
->dev
, "sram carveout %s addr=%llx, da=0x%x, size=0x%lx",
545 sram
->sram_res
.name
, dma_addr
, da
, len
);
553 * @rproc: single R5 core's corresponding rproc instance
554 * @mem: tcm mem entry to unmap
556 * Unmap TCM banks when powering down R5 core.
560 static int tcm_mem_unmap(struct rproc
*rproc
, struct rproc_mem_entry
*mem
)
562 iounmap((void __iomem
*)mem
->va
);
569 * @rproc: single R5 core's corresponding rproc instance
570 * @mem: tcm memory entry descriptor
572 * Given TCM bank entry, this func setup virtual address for TCM bank
573 * remoteproc carveout. It also takes care of va to da address translation
575 * return 0 on success, otherwise non-zero value on failure
577 static int tcm_mem_map(struct rproc
*rproc
,
578 struct rproc_mem_entry
*mem
)
582 va
= ioremap_wc(mem
->dma
, mem
->len
);
583 if (IS_ERR_OR_NULL(va
))
586 /* Update memory entry va */
587 mem
->va
= (void *)va
;
590 memset_io(va
, 0, mem
->len
);
597 * @rproc: single R5 core's corresponding rproc instance
599 * allocate and add remoteproc carveout for TCM memory
601 * return 0 on success, otherwise non-zero value on failure
603 static int add_tcm_banks(struct rproc
*rproc
)
605 struct rproc_mem_entry
*rproc_mem
;
606 struct zynqmp_r5_core
*r5_core
;
607 int i
, num_banks
, ret
;
608 phys_addr_t bank_addr
;
615 r5_core
= rproc
->priv
;
617 num_banks
= r5_core
->tcm_bank_count
;
620 * Power-on Each 64KB TCM,
621 * register its address space, map and unmap functions
622 * and add carveouts accordingly
624 for (i
= 0; i
< num_banks
; i
++) {
625 bank_addr
= r5_core
->tcm_banks
[i
]->addr
;
626 da
= r5_core
->tcm_banks
[i
]->da
;
627 bank_name
= r5_core
->tcm_banks
[i
]->bank_name
;
628 bank_size
= r5_core
->tcm_banks
[i
]->size
;
629 pm_domain_id
= r5_core
->tcm_banks
[i
]->pm_domain_id
;
631 ret
= zynqmp_pm_request_node(pm_domain_id
,
632 ZYNQMP_PM_CAPABILITY_ACCESS
, 0,
633 ZYNQMP_PM_REQUEST_ACK_BLOCKING
);
635 dev_err(dev
, "failed to turn on TCM 0x%x", pm_domain_id
);
639 dev_dbg(dev
, "TCM carveout %s addr=%llx, da=0x%x, size=0x%lx",
640 bank_name
, bank_addr
, da
, bank_size
);
643 * In DETACHED state firmware is already running so no need to
644 * request add TCM registers. However, request TCM PD node to let
645 * platform management firmware know that TCM is in use.
647 if (rproc
->state
== RPROC_DETACHED
)
650 rproc_mem
= rproc_mem_entry_init(dev
, NULL
, bank_addr
,
652 tcm_mem_map
, tcm_mem_unmap
,
656 zynqmp_pm_release_node(pm_domain_id
);
660 rproc_add_carveout(rproc
, rproc_mem
);
661 rproc_coredump_add_segment(rproc
, da
, bank_size
);
667 /* If failed, Turn off all TCM banks turned on before */
668 for (i
--; i
>= 0; i
--) {
669 pm_domain_id
= r5_core
->tcm_banks
[i
]->pm_domain_id
;
670 zynqmp_pm_release_node(pm_domain_id
);
676 * zynqmp_r5_parse_fw()
677 * @rproc: single R5 core's corresponding rproc instance
678 * @fw: ptr to firmware to be loaded onto r5 core
680 * get resource table if available
682 * return 0 on success, otherwise non-zero value on failure
684 static int zynqmp_r5_parse_fw(struct rproc
*rproc
, const struct firmware
*fw
)
688 ret
= rproc_elf_load_rsc_table(rproc
, fw
);
689 if (ret
== -EINVAL
) {
691 * resource table only required for IPC.
692 * if not present, this is not necessarily an error;
693 * for example, loading r5 hello world application
694 * so simply inform user and keep going.
696 dev_info(&rproc
->dev
, "no resource table found.\n");
703 * zynqmp_r5_rproc_prepare()
704 * adds carveouts for TCM bank and reserved memory regions
706 * @rproc: Device node of each rproc
708 * Return: 0 for success else < 0 error code
710 static int zynqmp_r5_rproc_prepare(struct rproc
*rproc
)
714 ret
= add_tcm_banks(rproc
);
716 dev_err(&rproc
->dev
, "failed to get TCM banks, err %d\n", ret
);
720 ret
= add_mem_regions_carveout(rproc
);
722 dev_err(&rproc
->dev
, "failed to get reserve mem regions %d\n", ret
);
726 ret
= add_sram_carveouts(rproc
);
728 dev_err(&rproc
->dev
, "failed to get sram carveout %d\n", ret
);
736 * zynqmp_r5_rproc_unprepare()
737 * Turns off TCM banks using power-domain id
739 * @rproc: Device node of each rproc
743 static int zynqmp_r5_rproc_unprepare(struct rproc
*rproc
)
745 struct zynqmp_r5_core
*r5_core
;
749 r5_core
= rproc
->priv
;
751 for (i
= 0; i
< r5_core
->tcm_bank_count
; i
++) {
752 pm_domain_id
= r5_core
->tcm_banks
[i
]->pm_domain_id
;
753 if (zynqmp_pm_release_node(pm_domain_id
))
754 dev_warn(r5_core
->dev
,
755 "can't turn off TCM bank 0x%x", pm_domain_id
);
761 static struct resource_table
*zynqmp_r5_get_loaded_rsc_table(struct rproc
*rproc
,
764 struct zynqmp_r5_core
*r5_core
;
766 r5_core
= rproc
->priv
;
768 *size
= r5_core
->rsc_tbl_size
;
770 return (struct resource_table
*)r5_core
->rsc_tbl_va
;
773 static int zynqmp_r5_get_rsc_table_va(struct zynqmp_r5_core
*r5_core
)
775 struct resource_table
*rsc_tbl_addr
;
776 struct device
*dev
= r5_core
->dev
;
777 struct rsc_tbl_data
*rsc_data_va
;
778 struct resource res_mem
;
779 struct device_node
*np
;
783 * It is expected from remote processor firmware to provide resource
784 * table address via struct rsc_tbl_data data structure.
785 * Start address of first entry under "memory-region" property list
786 * contains that data structure which holds resource table address, size
787 * and some magic number to validate correct resource table entry.
789 np
= of_parse_phandle(r5_core
->np
, "memory-region", 0);
791 dev_err(dev
, "failed to get memory region dev node\n");
795 ret
= of_address_to_resource(np
, 0, &res_mem
);
798 dev_err(dev
, "failed to get memory-region resource addr\n");
802 rsc_data_va
= (struct rsc_tbl_data
*)ioremap_wc(res_mem
.start
,
803 sizeof(struct rsc_tbl_data
));
805 dev_err(dev
, "failed to map resource table data address\n");
810 * If RSC_TBL_XLNX_MAGIC number and its complement isn't found then
811 * do not consider resource table address valid and don't attach
813 if (rsc_data_va
->magic_num
!= RSC_TBL_XLNX_MAGIC
||
814 rsc_data_va
->comp_magic_num
!= ~RSC_TBL_XLNX_MAGIC
) {
815 dev_dbg(dev
, "invalid magic number, won't attach\n");
819 r5_core
->rsc_tbl_va
= ioremap_wc(rsc_data_va
->rsc_tbl
,
820 rsc_data_va
->rsc_tbl_size
);
821 if (!r5_core
->rsc_tbl_va
) {
822 dev_err(dev
, "failed to get resource table va\n");
826 rsc_tbl_addr
= (struct resource_table
*)r5_core
->rsc_tbl_va
;
829 * As of now resource table version 1 is expected. Don't fail to attach
830 * but warn users about it.
832 if (rsc_tbl_addr
->ver
!= 1)
833 dev_warn(dev
, "unexpected resource table version %d\n",
836 r5_core
->rsc_tbl_size
= rsc_data_va
->rsc_tbl_size
;
838 iounmap((void __iomem
*)rsc_data_va
);
843 static int zynqmp_r5_attach(struct rproc
*rproc
)
845 dev_dbg(&rproc
->dev
, "rproc %d attached\n", rproc
->index
);
850 static int zynqmp_r5_detach(struct rproc
*rproc
)
853 * Generate last notification to remote after clearing virtio flag.
854 * Remote can avoid polling on virtio reset flag if kick is generated
855 * during detach by host and check virtio reset flag on kick interrupt.
857 zynqmp_r5_rproc_kick(rproc
, 0);
862 static const struct rproc_ops zynqmp_r5_rproc_ops
= {
863 .prepare
= zynqmp_r5_rproc_prepare
,
864 .unprepare
= zynqmp_r5_rproc_unprepare
,
865 .start
= zynqmp_r5_rproc_start
,
866 .stop
= zynqmp_r5_rproc_stop
,
867 .load
= rproc_elf_load_segments
,
868 .parse_fw
= zynqmp_r5_parse_fw
,
869 .find_loaded_rsc_table
= rproc_elf_find_loaded_rsc_table
,
870 .sanity_check
= rproc_elf_sanity_check
,
871 .get_boot_addr
= rproc_elf_get_boot_addr
,
872 .kick
= zynqmp_r5_rproc_kick
,
873 .get_loaded_rsc_table
= zynqmp_r5_get_loaded_rsc_table
,
874 .attach
= zynqmp_r5_attach
,
875 .detach
= zynqmp_r5_detach
,
879 * zynqmp_r5_add_rproc_core()
880 * Allocate and add struct rproc object for each r5f core
881 * This is called for each individual r5f core
883 * @cdev: Device node of each r5 core
885 * Return: zynqmp_r5_core object for success else error code pointer
887 static struct zynqmp_r5_core
*zynqmp_r5_add_rproc_core(struct device
*cdev
)
889 struct zynqmp_r5_core
*r5_core
;
890 struct rproc
*r5_rproc
;
893 /* Set up DMA mask */
894 ret
= dma_set_coherent_mask(cdev
, DMA_BIT_MASK(32));
898 /* Allocate remoteproc instance */
899 r5_rproc
= rproc_alloc(cdev
, dev_name(cdev
),
900 &zynqmp_r5_rproc_ops
,
901 NULL
, sizeof(struct zynqmp_r5_core
));
903 dev_err(cdev
, "failed to allocate memory for rproc instance\n");
904 return ERR_PTR(-ENOMEM
);
907 rproc_coredump_set_elf_info(r5_rproc
, ELFCLASS32
, EM_ARM
);
909 r5_rproc
->auto_boot
= false;
910 r5_core
= r5_rproc
->priv
;
912 r5_core
->np
= dev_of_node(cdev
);
914 dev_err(cdev
, "can't get device node for r5 core\n");
919 /* Add R5 remoteproc core */
920 ret
= rproc_add(r5_rproc
);
922 dev_err(cdev
, "failed to add r5 remoteproc\n");
927 * If firmware is already available in the memory then move rproc state
928 * to DETACHED. Firmware can be preloaded via debugger or by any other
929 * agent (processors) in the system.
930 * If firmware isn't available in the memory and resource table isn't
931 * found, then rproc state remains OFFLINE.
933 if (!zynqmp_r5_get_rsc_table_va(r5_core
))
934 r5_rproc
->state
= RPROC_DETACHED
;
936 r5_core
->rproc
= r5_rproc
;
940 rproc_free(r5_rproc
);
944 static int zynqmp_r5_get_sram_banks(struct zynqmp_r5_core
*r5_core
)
946 struct device_node
*np
= r5_core
->np
;
947 struct device
*dev
= r5_core
->dev
;
948 struct zynqmp_sram_bank
*sram
;
949 struct device_node
*sram_np
;
950 int num_sram
, i
, ret
;
953 /* "sram" is optional property. Do not fail, if unavailable. */
954 if (!of_property_present(r5_core
->np
, "sram"))
957 num_sram
= of_property_count_elems_of_size(np
, "sram", sizeof(phandle
));
959 dev_err(dev
, "Invalid sram property, ret = %d\n",
964 sram
= devm_kcalloc(dev
, num_sram
,
965 sizeof(struct zynqmp_sram_bank
), GFP_KERNEL
);
969 for (i
= 0; i
< num_sram
; i
++) {
970 sram_np
= of_parse_phandle(np
, "sram", i
);
972 dev_err(dev
, "failed to get sram %d phandle\n", i
);
976 if (!of_device_is_available(sram_np
)) {
977 dev_err(dev
, "sram device not available\n");
982 ret
= of_address_to_resource(sram_np
, 0, &sram
[i
].sram_res
);
984 dev_err(dev
, "addr to res failed\n");
988 /* Get SRAM device address */
989 ret
= of_property_read_reg(sram_np
, i
, &abs_addr
, &size
);
991 dev_err(dev
, "failed to get reg property\n");
995 sram
[i
].da
= (u32
)abs_addr
;
997 of_node_put(sram_np
);
999 dev_dbg(dev
, "sram %d: name=%s, addr=0x%llx, da=0x%x, size=0x%llx\n",
1000 i
, sram
[i
].sram_res
.name
, sram
[i
].sram_res
.start
,
1001 sram
[i
].da
, resource_size(&sram
[i
].sram_res
));
1004 r5_core
->sram
= sram
;
1005 r5_core
->num_sram
= num_sram
;
1010 of_node_put(sram_np
);
1015 static int zynqmp_r5_get_tcm_node_from_dt(struct zynqmp_r5_cluster
*cluster
)
1017 int i
, j
, tcm_bank_count
, ret
, tcm_pd_idx
, pd_count
;
1018 struct of_phandle_args out_args
;
1019 struct zynqmp_r5_core
*r5_core
;
1020 struct platform_device
*cpdev
;
1021 struct mem_bank_data
*tcm
;
1022 struct device_node
*np
;
1023 struct resource
*res
;
1027 for (i
= 0; i
< cluster
->core_count
; i
++) {
1028 r5_core
= cluster
->r5_cores
[i
];
1032 pd_count
= of_count_phandle_with_args(np
, "power-domains",
1033 "#power-domain-cells");
1035 if (pd_count
<= 0) {
1036 dev_err(dev
, "invalid power-domains property, %d\n", pd_count
);
1040 /* First entry in power-domains list is for r5 core, rest for TCM. */
1041 tcm_bank_count
= pd_count
- 1;
1043 if (tcm_bank_count
<= 0) {
1044 dev_err(dev
, "invalid TCM count %d\n", tcm_bank_count
);
1048 r5_core
->tcm_banks
= devm_kcalloc(dev
, tcm_bank_count
,
1049 sizeof(struct mem_bank_data
*),
1051 if (!r5_core
->tcm_banks
)
1054 r5_core
->tcm_bank_count
= tcm_bank_count
;
1055 for (j
= 0, tcm_pd_idx
= 1; j
< tcm_bank_count
; j
++, tcm_pd_idx
++) {
1056 tcm
= devm_kzalloc(dev
, sizeof(struct mem_bank_data
),
1061 r5_core
->tcm_banks
[j
] = tcm
;
1063 /* Get power-domains id of TCM. */
1064 ret
= of_parse_phandle_with_args(np
, "power-domains",
1065 "#power-domain-cells",
1066 tcm_pd_idx
, &out_args
);
1068 dev_err(r5_core
->dev
,
1069 "failed to get tcm %d pm domain, ret %d\n",
1073 tcm
->pm_domain_id
= out_args
.args
[0];
1074 of_node_put(out_args
.np
);
1076 /* Get TCM address without translation. */
1077 ret
= of_property_read_reg(np
, j
, &abs_addr
, &size
);
1079 dev_err(dev
, "failed to get reg property\n");
1084 * Remote processor can address only 32 bits
1085 * so convert 64-bits into 32-bits. This will discard
1086 * any unwanted upper 32-bits.
1088 tcm
->da
= (u32
)abs_addr
;
1089 tcm
->size
= (u32
)size
;
1091 cpdev
= to_platform_device(dev
);
1092 res
= platform_get_resource(cpdev
, IORESOURCE_MEM
, j
);
1094 dev_err(dev
, "failed to get tcm resource\n");
1098 tcm
->addr
= (u32
)res
->start
;
1099 tcm
->bank_name
= (char *)res
->name
;
1100 res
= devm_request_mem_region(dev
, tcm
->addr
, tcm
->size
,
1103 dev_err(dev
, "failed to request tcm resource\n");
1113 * zynqmp_r5_get_tcm_node()
1114 * Ideally this function should parse tcm node and store information
1115 * in r5_core instance. For now, Hardcoded TCM information is used.
1116 * This approach is used as TCM bindings for system-dt is being developed
1118 * @cluster: pointer to zynqmp_r5_cluster type object
1120 * Return: 0 for success and < 0 error code for failure.
1122 static int zynqmp_r5_get_tcm_node(struct zynqmp_r5_cluster
*cluster
)
1124 const struct mem_bank_data
*zynqmp_tcm_banks
;
1125 struct device
*dev
= cluster
->dev
;
1126 struct zynqmp_r5_core
*r5_core
;
1127 int tcm_bank_count
, tcm_node
;
1130 if (cluster
->mode
== SPLIT_MODE
) {
1131 zynqmp_tcm_banks
= zynqmp_tcm_banks_split
;
1132 tcm_bank_count
= ARRAY_SIZE(zynqmp_tcm_banks_split
);
1134 zynqmp_tcm_banks
= zynqmp_tcm_banks_lockstep
;
1135 tcm_bank_count
= ARRAY_SIZE(zynqmp_tcm_banks_lockstep
);
1138 /* count per core tcm banks */
1139 tcm_bank_count
= tcm_bank_count
/ cluster
->core_count
;
1142 * r5 core 0 will use all of TCM banks in lockstep mode.
1143 * In split mode, r5 core0 will use 128k and r5 core1 will use another
1144 * 128k. Assign TCM banks to each core accordingly
1147 for (i
= 0; i
< cluster
->core_count
; i
++) {
1148 r5_core
= cluster
->r5_cores
[i
];
1149 r5_core
->tcm_banks
= devm_kcalloc(dev
, tcm_bank_count
,
1150 sizeof(struct mem_bank_data
*),
1152 if (!r5_core
->tcm_banks
)
1155 for (j
= 0; j
< tcm_bank_count
; j
++) {
1157 * Use pre-defined TCM reg values.
1158 * Eventually this should be replaced by values
1161 r5_core
->tcm_banks
[j
] =
1162 (struct mem_bank_data
*)&zynqmp_tcm_banks
[tcm_node
];
1166 r5_core
->tcm_bank_count
= tcm_bank_count
;
1173 * zynqmp_r5_core_init()
1174 * Create and initialize zynqmp_r5_core type object
1176 * @cluster: pointer to zynqmp_r5_cluster type object
1177 * @fw_reg_val: value expected by firmware to configure RPU cluster mode
1178 * @tcm_mode: value expected by fw to configure TCM mode (lockstep or split)
1180 * Return: 0 for success and error code for failure.
1182 static int zynqmp_r5_core_init(struct zynqmp_r5_cluster
*cluster
,
1183 enum rpu_oper_mode fw_reg_val
,
1184 enum rpu_tcm_comb tcm_mode
)
1186 struct device
*dev
= cluster
->dev
;
1187 struct zynqmp_r5_core
*r5_core
;
1188 int ret
= -EINVAL
, i
;
1190 r5_core
= cluster
->r5_cores
[0];
1192 /* Maintain backward compatibility for zynqmp by using hardcode TCM address. */
1193 if (of_property_present(r5_core
->np
, "reg"))
1194 ret
= zynqmp_r5_get_tcm_node_from_dt(cluster
);
1195 else if (device_is_compatible(dev
, "xlnx,zynqmp-r5fss"))
1196 ret
= zynqmp_r5_get_tcm_node(cluster
);
1199 dev_err(dev
, "can't get tcm, err %d\n", ret
);
1203 for (i
= 0; i
< cluster
->core_count
; i
++) {
1204 r5_core
= cluster
->r5_cores
[i
];
1206 /* Initialize r5 cores with power-domains parsed from dts */
1207 ret
= of_property_read_u32_index(r5_core
->np
, "power-domains",
1208 1, &r5_core
->pm_domain_id
);
1210 dev_err(dev
, "failed to get power-domains property\n");
1214 ret
= zynqmp_pm_set_rpu_mode(r5_core
->pm_domain_id
, fw_reg_val
);
1216 dev_err(r5_core
->dev
, "failed to set RPU mode\n");
1220 if (of_property_present(dev_of_node(dev
), "xlnx,tcm-mode") ||
1221 device_is_compatible(dev
, "xlnx,zynqmp-r5fss")) {
1222 ret
= zynqmp_pm_set_tcm_config(r5_core
->pm_domain_id
,
1225 dev_err(r5_core
->dev
, "failed to configure TCM\n");
1230 ret
= zynqmp_r5_get_sram_banks(r5_core
);
1239 * zynqmp_r5_cluster_init()
1240 * Create and initialize zynqmp_r5_cluster type object
1242 * @cluster: pointer to zynqmp_r5_cluster type object
1244 * Return: 0 for success and error code for failure.
1246 static int zynqmp_r5_cluster_init(struct zynqmp_r5_cluster
*cluster
)
1248 enum zynqmp_r5_cluster_mode cluster_mode
= LOCKSTEP_MODE
;
1249 struct device
*dev
= cluster
->dev
;
1250 struct device_node
*dev_node
= dev_of_node(dev
);
1251 struct platform_device
*child_pdev
;
1252 struct zynqmp_r5_core
**r5_cores
;
1253 enum rpu_oper_mode fw_reg_val
;
1254 struct device
**child_devs
;
1255 struct device_node
*child
;
1256 enum rpu_tcm_comb tcm_mode
;
1257 int core_count
, ret
, i
;
1258 struct mbox_info
*ipi
;
1260 ret
= of_property_read_u32(dev_node
, "xlnx,cluster-mode", &cluster_mode
);
1263 * on success returns 0, if not defined then returns -EINVAL,
1264 * In that case, default is LOCKSTEP mode. Other than that
1265 * returns relative error code < 0.
1267 if (ret
!= -EINVAL
&& ret
!= 0) {
1268 dev_err(dev
, "Invalid xlnx,cluster-mode property\n");
1273 * For now driver only supports split mode and lockstep mode.
1274 * fail driver probe if either of that is not set in dts.
1276 if (cluster_mode
== LOCKSTEP_MODE
) {
1277 fw_reg_val
= PM_RPU_MODE_LOCKSTEP
;
1278 } else if (cluster_mode
== SPLIT_MODE
) {
1279 fw_reg_val
= PM_RPU_MODE_SPLIT
;
1281 dev_err(dev
, "driver does not support cluster mode %d\n", cluster_mode
);
1285 if (of_property_present(dev_node
, "xlnx,tcm-mode")) {
1286 ret
= of_property_read_u32(dev_node
, "xlnx,tcm-mode", (u32
*)&tcm_mode
);
1289 } else if (device_is_compatible(dev
, "xlnx,zynqmp-r5fss")) {
1290 if (cluster_mode
== LOCKSTEP_MODE
)
1291 tcm_mode
= PM_RPU_TCM_COMB
;
1293 tcm_mode
= PM_RPU_TCM_SPLIT
;
1295 tcm_mode
= PM_RPU_TCM_COMB
;
1299 * Number of cores is decided by number of child nodes of
1300 * r5f subsystem node in dts. If Split mode is used in dts
1301 * 2 child nodes are expected.
1302 * In lockstep mode if two child nodes are available,
1303 * only use first child node and consider it as core0
1304 * and ignore core1 dt node.
1306 core_count
= of_get_available_child_count(dev_node
);
1307 if (core_count
== 0) {
1308 dev_err(dev
, "Invalid number of r5 cores %d", core_count
);
1310 } else if (cluster_mode
== SPLIT_MODE
&& core_count
!= 2) {
1311 dev_err(dev
, "Invalid number of r5 cores for split mode\n");
1313 } else if (cluster_mode
== LOCKSTEP_MODE
&& core_count
== 2) {
1314 dev_warn(dev
, "Only r5 core0 will be used\n");
1318 child_devs
= kcalloc(core_count
, sizeof(struct device
*), GFP_KERNEL
);
1322 r5_cores
= kcalloc(core_count
,
1323 sizeof(struct zynqmp_r5_core
*), GFP_KERNEL
);
1330 for_each_available_child_of_node(dev_node
, child
) {
1331 child_pdev
= of_find_device_by_node(child
);
1335 goto release_r5_cores
;
1338 child_devs
[i
] = &child_pdev
->dev
;
1340 /* create and add remoteproc instance of type struct rproc */
1341 r5_cores
[i
] = zynqmp_r5_add_rproc_core(&child_pdev
->dev
);
1342 if (IS_ERR(r5_cores
[i
])) {
1344 ret
= PTR_ERR(r5_cores
[i
]);
1346 goto release_r5_cores
;
1350 * If mailbox nodes are disabled using "status" property then
1351 * setting up mailbox channels will fail.
1353 ipi
= zynqmp_r5_setup_mbox(&child_pdev
->dev
);
1355 r5_cores
[i
]->ipi
= ipi
;
1356 ipi
->r5_core
= r5_cores
[i
];
1360 * If two child nodes are available in dts in lockstep mode,
1361 * then ignore second child node.
1363 if (cluster_mode
== LOCKSTEP_MODE
) {
1371 cluster
->mode
= cluster_mode
;
1372 cluster
->core_count
= core_count
;
1373 cluster
->r5_cores
= r5_cores
;
1375 ret
= zynqmp_r5_core_init(cluster
, fw_reg_val
, tcm_mode
);
1377 dev_err(dev
, "failed to init r5 core err %d\n", ret
);
1378 cluster
->core_count
= 0;
1379 cluster
->r5_cores
= NULL
;
1382 * at this point rproc resources for each core are allocated.
1383 * adjust index to free resources in reverse order
1386 goto release_r5_cores
;
1394 put_device(child_devs
[i
]);
1396 zynqmp_r5_free_mbox(r5_cores
[i
]->ipi
);
1397 of_reserved_mem_device_release(r5_cores
[i
]->dev
);
1398 rproc_del(r5_cores
[i
]->rproc
);
1399 rproc_free(r5_cores
[i
]->rproc
);
1408 static void zynqmp_r5_cluster_exit(void *data
)
1410 struct platform_device
*pdev
= data
;
1411 struct zynqmp_r5_cluster
*cluster
;
1412 struct zynqmp_r5_core
*r5_core
;
1415 cluster
= platform_get_drvdata(pdev
);
1419 for (i
= 0; i
< cluster
->core_count
; i
++) {
1420 r5_core
= cluster
->r5_cores
[i
];
1421 zynqmp_r5_free_mbox(r5_core
->ipi
);
1422 iounmap(r5_core
->rsc_tbl_va
);
1423 of_reserved_mem_device_release(r5_core
->dev
);
1424 put_device(r5_core
->dev
);
1425 rproc_del(r5_core
->rproc
);
1426 rproc_free(r5_core
->rproc
);
1429 kfree(cluster
->r5_cores
);
1431 platform_set_drvdata(pdev
, NULL
);
1435 * zynqmp_r5_remoteproc_probe()
1436 * parse device-tree, initialize hardware and allocate required resources
1437 * and remoteproc ops
1439 * @pdev: domain platform device for R5 cluster
1441 * Return: 0 for success and < 0 for failure.
1443 static int zynqmp_r5_remoteproc_probe(struct platform_device
*pdev
)
1445 struct zynqmp_r5_cluster
*cluster
;
1446 struct device
*dev
= &pdev
->dev
;
1449 cluster
= kzalloc(sizeof(*cluster
), GFP_KERNEL
);
1455 ret
= devm_of_platform_populate(dev
);
1457 dev_err_probe(dev
, ret
, "failed to populate platform dev\n");
1462 /* wire in so each core can be cleaned up at driver remove */
1463 platform_set_drvdata(pdev
, cluster
);
1465 ret
= zynqmp_r5_cluster_init(cluster
);
1468 platform_set_drvdata(pdev
, NULL
);
1469 dev_err_probe(dev
, ret
, "Invalid r5f subsystem device tree\n");
1473 ret
= devm_add_action_or_reset(dev
, zynqmp_r5_cluster_exit
, pdev
);
1480 /* Match table for OF platform binding */
1481 static const struct of_device_id zynqmp_r5_remoteproc_match
[] = {
1482 { .compatible
= "xlnx,versal-net-r52fss", },
1483 { .compatible
= "xlnx,versal-r5fss", },
1484 { .compatible
= "xlnx,zynqmp-r5fss", },
1485 { /* end of list */ },
1487 MODULE_DEVICE_TABLE(of
, zynqmp_r5_remoteproc_match
);
1489 static struct platform_driver zynqmp_r5_remoteproc_driver
= {
1490 .probe
= zynqmp_r5_remoteproc_probe
,
1492 .name
= "zynqmp_r5_remoteproc",
1493 .of_match_table
= zynqmp_r5_remoteproc_match
,
1496 module_platform_driver(zynqmp_r5_remoteproc_driver
);
1498 MODULE_DESCRIPTION("Xilinx R5F remote processor driver");
1499 MODULE_AUTHOR("Xilinx Inc.");
1500 MODULE_LICENSE("GPL");