1 // SPDX-License-Identifier: GPL-2.0
3 * For transport using shared mem structure.
5 * Copyright (C) 2019-2024 ARM Ltd.
8 #include <linux/ktime.h>
11 #include <linux/of_address.h>
12 #include <linux/processor.h>
13 #include <linux/types.h>
15 #include <linux/bug.h>
19 #define SCMI_SHMEM_LAYOUT_OVERHEAD 24
22 * SCMI specification requires all parameters, message headers, return
23 * arguments or any protocol data to be expressed in little endian
26 struct scmi_shared_mem
{
28 __le32 channel_status
;
29 #define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1)
30 #define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0)
33 #define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0)
39 static inline void shmem_memcpy_fromio32(void *to
,
40 const void __iomem
*from
,
43 WARN_ON(!IS_ALIGNED((unsigned long)from
, 4) ||
44 !IS_ALIGNED((unsigned long)to
, 4) ||
47 __ioread32_copy(to
, from
, count
/ 4);
50 static inline void shmem_memcpy_toio32(void __iomem
*to
,
54 WARN_ON(!IS_ALIGNED((unsigned long)to
, 4) ||
55 !IS_ALIGNED((unsigned long)from
, 4) ||
58 __iowrite32_copy(to
, from
, count
/ 4);
61 static struct scmi_shmem_io_ops shmem_io_ops32
= {
62 .fromio
= shmem_memcpy_fromio32
,
63 .toio
= shmem_memcpy_toio32
,
66 /* Wrappers are needed for proper memcpy_{from,to}_io expansion by the
69 static inline void shmem_memcpy_fromio(void *to
,
70 const void __iomem
*from
,
73 memcpy_fromio(to
, from
, count
);
76 static inline void shmem_memcpy_toio(void __iomem
*to
,
80 memcpy_toio(to
, from
, count
);
83 static struct scmi_shmem_io_ops shmem_io_ops_default
= {
84 .fromio
= shmem_memcpy_fromio
,
85 .toio
= shmem_memcpy_toio
,
88 static void shmem_tx_prepare(struct scmi_shared_mem __iomem
*shmem
,
89 struct scmi_xfer
*xfer
,
90 struct scmi_chan_info
*cinfo
,
91 shmem_copy_toio_t copy_toio
)
96 * Ideally channel must be free by now unless OS timeout last
97 * request and platform continued to process the same, wait
98 * until it releases the shared memory, otherwise we may endup
99 * overwriting its response with new message payload or vice-versa.
100 * Giving up anyway after twice the expected channel timeout so as
101 * not to bail-out on intermittent issues where the platform is
102 * occasionally a bit slower to answer.
104 * Note that after a timeout is detected we bail-out and carry on but
105 * the transport functionality is probably permanently compromised:
106 * this is just to ease debugging and avoid complete hangs on boot
107 * due to a misbehaving SCMI firmware.
109 stop
= ktime_add_ms(ktime_get(), 2 * cinfo
->rx_timeout_ms
);
110 spin_until_cond((ioread32(&shmem
->channel_status
) &
111 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE
) ||
112 ktime_after(ktime_get(), stop
));
113 if (!(ioread32(&shmem
->channel_status
) &
114 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE
)) {
117 "Timeout waiting for a free TX channel !\n");
121 /* Mark channel busy + clear error */
122 iowrite32(0x0, &shmem
->channel_status
);
123 iowrite32(xfer
->hdr
.poll_completion
? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED
,
125 iowrite32(sizeof(shmem
->msg_header
) + xfer
->tx
.len
, &shmem
->length
);
126 iowrite32(pack_scmi_header(&xfer
->hdr
), &shmem
->msg_header
);
128 copy_toio(shmem
->msg_payload
, xfer
->tx
.buf
, xfer
->tx
.len
);
131 static u32
shmem_read_header(struct scmi_shared_mem __iomem
*shmem
)
133 return ioread32(&shmem
->msg_header
);
136 static void shmem_fetch_response(struct scmi_shared_mem __iomem
*shmem
,
137 struct scmi_xfer
*xfer
,
138 shmem_copy_fromio_t copy_fromio
)
140 size_t len
= ioread32(&shmem
->length
);
142 xfer
->hdr
.status
= ioread32(shmem
->msg_payload
);
143 /* Skip the length of header and status in shmem area i.e 8 bytes */
144 xfer
->rx
.len
= min_t(size_t, xfer
->rx
.len
, len
> 8 ? len
- 8 : 0);
146 /* Take a copy to the rx buffer.. */
147 copy_fromio(xfer
->rx
.buf
, shmem
->msg_payload
+ 4, xfer
->rx
.len
);
150 static void shmem_fetch_notification(struct scmi_shared_mem __iomem
*shmem
,
151 size_t max_len
, struct scmi_xfer
*xfer
,
152 shmem_copy_fromio_t copy_fromio
)
154 size_t len
= ioread32(&shmem
->length
);
156 /* Skip only the length of header in shmem area i.e 4 bytes */
157 xfer
->rx
.len
= min_t(size_t, max_len
, len
> 4 ? len
- 4 : 0);
159 /* Take a copy to the rx buffer.. */
160 copy_fromio(xfer
->rx
.buf
, shmem
->msg_payload
, xfer
->rx
.len
);
163 static void shmem_clear_channel(struct scmi_shared_mem __iomem
*shmem
)
165 iowrite32(SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE
, &shmem
->channel_status
);
168 static bool shmem_poll_done(struct scmi_shared_mem __iomem
*shmem
,
169 struct scmi_xfer
*xfer
)
173 xfer_id
= MSG_XTRACT_TOKEN(ioread32(&shmem
->msg_header
));
175 if (xfer
->hdr
.seq
!= xfer_id
)
178 return ioread32(&shmem
->channel_status
) &
179 (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR
|
180 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE
);
183 static bool shmem_channel_free(struct scmi_shared_mem __iomem
*shmem
)
185 return (ioread32(&shmem
->channel_status
) &
186 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE
);
189 static bool shmem_channel_intr_enabled(struct scmi_shared_mem __iomem
*shmem
)
191 return ioread32(&shmem
->flags
) & SCMI_SHMEM_FLAG_INTR_ENABLED
;
194 static void __iomem
*shmem_setup_iomap(struct scmi_chan_info
*cinfo
,
195 struct device
*dev
, bool tx
,
196 struct resource
*res
,
197 struct scmi_shmem_io_ops
**ops
)
199 struct device_node
*shmem
__free(device_node
);
200 const char *desc
= tx
? "Tx" : "Rx";
201 int ret
, idx
= tx
? 0 : 1;
202 struct device
*cdev
= cinfo
->dev
;
203 struct resource lres
= {};
204 resource_size_t size
;
208 shmem
= of_parse_phandle(cdev
->of_node
, "shmem", idx
);
210 return IOMEM_ERR_PTR(-ENODEV
);
212 if (!of_device_is_compatible(shmem
, "arm,scmi-shmem"))
213 return IOMEM_ERR_PTR(-ENXIO
);
215 /* Use a local on-stack as a working area when not provided */
219 ret
= of_address_to_resource(shmem
, 0, res
);
221 dev_err(cdev
, "failed to get SCMI %s shared memory\n", desc
);
222 return IOMEM_ERR_PTR(ret
);
225 size
= resource_size(res
);
226 if (cinfo
->max_msg_size
+ SCMI_SHMEM_LAYOUT_OVERHEAD
> size
) {
227 dev_err(dev
, "misconfigured SCMI shared memory\n");
228 return IOMEM_ERR_PTR(-ENOSPC
);
231 addr
= devm_ioremap(dev
, res
->start
, size
);
233 dev_err(dev
, "failed to ioremap SCMI %s shared memory\n", desc
);
234 return IOMEM_ERR_PTR(-EADDRNOTAVAIL
);
237 of_property_read_u32(shmem
, "reg-io-width", ®_io_width
);
238 switch (reg_io_width
) {
240 *ops
= &shmem_io_ops32
;
243 *ops
= &shmem_io_ops_default
;
250 static const struct scmi_shared_mem_operations scmi_shmem_ops
= {
251 .tx_prepare
= shmem_tx_prepare
,
252 .read_header
= shmem_read_header
,
253 .fetch_response
= shmem_fetch_response
,
254 .fetch_notification
= shmem_fetch_notification
,
255 .clear_channel
= shmem_clear_channel
,
256 .poll_done
= shmem_poll_done
,
257 .channel_free
= shmem_channel_free
,
258 .channel_intr_enabled
= shmem_channel_intr_enabled
,
259 .setup_iomap
= shmem_setup_iomap
,
262 const struct scmi_shared_mem_operations
*scmi_shared_mem_operations_get(void)
264 return &scmi_shmem_ops
;