1 // SPDX-License-Identifier: GPL-2.0
3 * System Control and Management Interface (SCMI) Message Protocol driver
5 * SCMI Message Protocol is used between the System Control Processor(SCP)
6 * and the Application Processors(AP). The Message Handling Unit(MHU)
7 * provides a mechanism for inter-processor communication between SCP's
10 * SCP offers control and management of the core/cluster power states,
11 * various power domain DVFS including the core/cluster, certain system
12 * clocks configuration, thermal sensors and many others.
14 * Copyright (C) 2018 ARM Ltd.
17 #include <linux/bitmap.h>
18 #include <linux/export.h>
20 #include <linux/kernel.h>
21 #include <linux/ktime.h>
22 #include <linux/mailbox_client.h>
23 #include <linux/module.h>
24 #include <linux/of_address.h>
25 #include <linux/of_device.h>
26 #include <linux/processor.h>
27 #include <linux/semaphore.h>
28 #include <linux/slab.h>
32 #define MSG_ID_MASK GENMASK(7, 0)
33 #define MSG_TYPE_MASK GENMASK(9, 8)
34 #define MSG_PROTOCOL_ID_MASK GENMASK(17, 10)
35 #define MSG_TOKEN_ID_MASK GENMASK(27, 18)
36 #define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
37 #define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
39 enum scmi_error_codes
{
40 SCMI_SUCCESS
= 0, /* Success */
41 SCMI_ERR_SUPPORT
= -1, /* Not supported */
42 SCMI_ERR_PARAMS
= -2, /* Invalid Parameters */
43 SCMI_ERR_ACCESS
= -3, /* Invalid access/permission denied */
44 SCMI_ERR_ENTRY
= -4, /* Not found */
45 SCMI_ERR_RANGE
= -5, /* Value out of range */
46 SCMI_ERR_BUSY
= -6, /* Device busy */
47 SCMI_ERR_COMMS
= -7, /* Communication Error */
48 SCMI_ERR_GENERIC
= -8, /* Generic Error */
49 SCMI_ERR_HARDWARE
= -9, /* Hardware Error */
50 SCMI_ERR_PROTOCOL
= -10,/* Protocol Error */
54 /* List of all SCMI devices active in system */
55 static LIST_HEAD(scmi_list
);
56 /* Protection for the entire list */
57 static DEFINE_MUTEX(scmi_list_mutex
);
60 * struct scmi_xfers_info - Structure to manage transfer information
62 * @xfer_block: Preallocated Message array
63 * @xfer_alloc_table: Bitmap table for allocated messages.
64 * Index of this bitmap table is also used for message
65 * sequence identifier.
66 * @xfer_lock: Protection for message allocation
68 struct scmi_xfers_info
{
69 struct scmi_xfer
*xfer_block
;
70 unsigned long *xfer_alloc_table
;
75 * struct scmi_desc - Description of SoC integration
77 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
78 * @max_msg: Maximum number of messages that can be pending
79 * simultaneously in the system
80 * @max_msg_size: Maximum size of data per message that can be handled.
83 int max_rx_timeout_ms
;
89 * struct scmi_chan_info - Structure representing a SCMI channel informfation
92 * @chan: Transmit/Receive mailbox channel
93 * @payload: Transmit/Receive mailbox channel payload area
94 * @dev: Reference to device in the SCMI hierarchy corresponding to this
96 * @handle: Pointer to SCMI entity handle
98 struct scmi_chan_info
{
99 struct mbox_client cl
;
100 struct mbox_chan
*chan
;
101 void __iomem
*payload
;
103 struct scmi_handle
*handle
;
107 * struct scmi_info - Structure representing a SCMI instance
109 * @dev: Device pointer
110 * @desc: SoC description for this instance
111 * @handle: Instance of SCMI handle to send to clients
112 * @version: SCMI revision information containing protocol version,
113 * implementation version and (sub-)vendor identification.
114 * @minfo: Message info
115 * @tx_idr: IDR object to map protocol id to channel info pointer
116 * @protocols_imp: List of protocols implemented, currently maximum of
117 * MAX_PROTOCOLS_IMP elements allocated by the base protocol
119 * @users: Number of users of this instance
123 const struct scmi_desc
*desc
;
124 struct scmi_revision_info version
;
125 struct scmi_handle handle
;
126 struct scmi_xfers_info minfo
;
129 struct list_head node
;
133 #define client_to_scmi_chan_info(c) container_of(c, struct scmi_chan_info, cl)
134 #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
137 * SCMI specification requires all parameters, message headers, return
138 * arguments or any protocol data to be expressed in little endian
141 struct scmi_shared_mem
{
143 __le32 channel_status
;
144 #define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1)
145 #define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0)
148 #define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0)
154 static const int scmi_linux_errmap
[] = {
155 /* better than switch case as long as return value is continuous */
156 0, /* SCMI_SUCCESS */
157 -EOPNOTSUPP
, /* SCMI_ERR_SUPPORT */
158 -EINVAL
, /* SCMI_ERR_PARAM */
159 -EACCES
, /* SCMI_ERR_ACCESS */
160 -ENOENT
, /* SCMI_ERR_ENTRY */
161 -ERANGE
, /* SCMI_ERR_RANGE */
162 -EBUSY
, /* SCMI_ERR_BUSY */
163 -ECOMM
, /* SCMI_ERR_COMMS */
164 -EIO
, /* SCMI_ERR_GENERIC */
165 -EREMOTEIO
, /* SCMI_ERR_HARDWARE */
166 -EPROTO
, /* SCMI_ERR_PROTOCOL */
169 static inline int scmi_to_linux_errno(int errno
)
171 if (errno
< SCMI_SUCCESS
&& errno
> SCMI_ERR_MAX
)
172 return scmi_linux_errmap
[-errno
];
177 * scmi_dump_header_dbg() - Helper to dump a message header.
179 * @dev: Device pointer corresponding to the SCMI entity
180 * @hdr: pointer to header.
182 static inline void scmi_dump_header_dbg(struct device
*dev
,
183 struct scmi_msg_hdr
*hdr
)
185 dev_dbg(dev
, "Command ID: %x Sequence ID: %x Protocol: %x\n",
186 hdr
->id
, hdr
->seq
, hdr
->protocol_id
);
189 static void scmi_fetch_response(struct scmi_xfer
*xfer
,
190 struct scmi_shared_mem __iomem
*mem
)
192 xfer
->hdr
.status
= ioread32(mem
->msg_payload
);
193 /* Skip the length of header and statues in payload area i.e 8 bytes*/
194 xfer
->rx
.len
= min_t(size_t, xfer
->rx
.len
, ioread32(&mem
->length
) - 8);
196 /* Take a copy to the rx buffer.. */
197 memcpy_fromio(xfer
->rx
.buf
, mem
->msg_payload
+ 4, xfer
->rx
.len
);
201 * scmi_rx_callback() - mailbox client callback for receive messages
203 * @cl: client pointer
204 * @m: mailbox message
206 * Processes one received message to appropriate transfer information and
207 * signals completion of the transfer.
209 * NOTE: This function will be invoked in IRQ context, hence should be
210 * as optimal as possible.
212 static void scmi_rx_callback(struct mbox_client
*cl
, void *m
)
215 struct scmi_xfer
*xfer
;
216 struct scmi_chan_info
*cinfo
= client_to_scmi_chan_info(cl
);
217 struct device
*dev
= cinfo
->dev
;
218 struct scmi_info
*info
= handle_to_scmi_info(cinfo
->handle
);
219 struct scmi_xfers_info
*minfo
= &info
->minfo
;
220 struct scmi_shared_mem __iomem
*mem
= cinfo
->payload
;
222 xfer_id
= MSG_XTRACT_TOKEN(ioread32(&mem
->msg_header
));
224 /* Are we even expecting this? */
225 if (!test_bit(xfer_id
, minfo
->xfer_alloc_table
)) {
226 dev_err(dev
, "message for %d is not expected!\n", xfer_id
);
230 xfer
= &minfo
->xfer_block
[xfer_id
];
232 scmi_dump_header_dbg(dev
, &xfer
->hdr
);
233 /* Is the message of valid length? */
234 if (xfer
->rx
.len
> info
->desc
->max_msg_size
) {
235 dev_err(dev
, "unable to handle %zu xfer(max %d)\n",
236 xfer
->rx
.len
, info
->desc
->max_msg_size
);
240 scmi_fetch_response(xfer
, mem
);
241 complete(&xfer
->done
);
245 * pack_scmi_header() - packs and returns 32-bit header
247 * @hdr: pointer to header containing all the information on message id,
248 * protocol id and sequence id.
250 * Return: 32-bit packed command header to be sent to the platform.
252 static inline u32
pack_scmi_header(struct scmi_msg_hdr
*hdr
)
254 return FIELD_PREP(MSG_ID_MASK
, hdr
->id
) |
255 FIELD_PREP(MSG_TOKEN_ID_MASK
, hdr
->seq
) |
256 FIELD_PREP(MSG_PROTOCOL_ID_MASK
, hdr
->protocol_id
);
260 * scmi_tx_prepare() - mailbox client callback to prepare for the transfer
262 * @cl: client pointer
263 * @m: mailbox message
265 * This function prepares the shared memory which contains the header and the
268 static void scmi_tx_prepare(struct mbox_client
*cl
, void *m
)
270 struct scmi_xfer
*t
= m
;
271 struct scmi_chan_info
*cinfo
= client_to_scmi_chan_info(cl
);
272 struct scmi_shared_mem __iomem
*mem
= cinfo
->payload
;
274 /* Mark channel busy + clear error */
275 iowrite32(0x0, &mem
->channel_status
);
276 iowrite32(t
->hdr
.poll_completion
? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED
,
278 iowrite32(sizeof(mem
->msg_header
) + t
->tx
.len
, &mem
->length
);
279 iowrite32(pack_scmi_header(&t
->hdr
), &mem
->msg_header
);
281 memcpy_toio(mem
->msg_payload
, t
->tx
.buf
, t
->tx
.len
);
285 * scmi_xfer_get() - Allocate one message
287 * @handle: Pointer to SCMI entity handle
289 * Helper function which is used by various command functions that are
290 * exposed to clients of this driver for allocating a message traffic event.
292 * This function can sleep depending on pending requests already in the system
293 * for the SCMI entity. Further, this also holds a spinlock to maintain
294 * integrity of internal data structures.
296 * Return: 0 if all went fine, else corresponding error.
298 static struct scmi_xfer
*scmi_xfer_get(const struct scmi_handle
*handle
)
301 struct scmi_xfer
*xfer
;
302 unsigned long flags
, bit_pos
;
303 struct scmi_info
*info
= handle_to_scmi_info(handle
);
304 struct scmi_xfers_info
*minfo
= &info
->minfo
;
306 /* Keep the locked section as small as possible */
307 spin_lock_irqsave(&minfo
->xfer_lock
, flags
);
308 bit_pos
= find_first_zero_bit(minfo
->xfer_alloc_table
,
309 info
->desc
->max_msg
);
310 if (bit_pos
== info
->desc
->max_msg
) {
311 spin_unlock_irqrestore(&minfo
->xfer_lock
, flags
);
312 return ERR_PTR(-ENOMEM
);
314 set_bit(bit_pos
, minfo
->xfer_alloc_table
);
315 spin_unlock_irqrestore(&minfo
->xfer_lock
, flags
);
319 xfer
= &minfo
->xfer_block
[xfer_id
];
320 xfer
->hdr
.seq
= xfer_id
;
321 reinit_completion(&xfer
->done
);
327 * scmi_xfer_put() - Release a message
329 * @handle: Pointer to SCMI entity handle
330 * @xfer: message that was reserved by scmi_xfer_get
332 * This holds a spinlock to maintain integrity of internal data structures.
334 void scmi_xfer_put(const struct scmi_handle
*handle
, struct scmi_xfer
*xfer
)
337 struct scmi_info
*info
= handle_to_scmi_info(handle
);
338 struct scmi_xfers_info
*minfo
= &info
->minfo
;
341 * Keep the locked section as small as possible
342 * NOTE: we might escape with smp_mb and no lock here..
343 * but just be conservative and symmetric.
345 spin_lock_irqsave(&minfo
->xfer_lock
, flags
);
346 clear_bit(xfer
->hdr
.seq
, minfo
->xfer_alloc_table
);
347 spin_unlock_irqrestore(&minfo
->xfer_lock
, flags
);
351 scmi_xfer_poll_done(const struct scmi_chan_info
*cinfo
, struct scmi_xfer
*xfer
)
353 struct scmi_shared_mem __iomem
*mem
= cinfo
->payload
;
354 u16 xfer_id
= MSG_XTRACT_TOKEN(ioread32(&mem
->msg_header
));
356 if (xfer
->hdr
.seq
!= xfer_id
)
359 return ioread32(&mem
->channel_status
) &
360 (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR
|
361 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE
);
364 #define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
366 static bool scmi_xfer_done_no_timeout(const struct scmi_chan_info
*cinfo
,
367 struct scmi_xfer
*xfer
, ktime_t stop
)
369 ktime_t __cur
= ktime_get();
371 return scmi_xfer_poll_done(cinfo
, xfer
) || ktime_after(__cur
, stop
);
375 * scmi_do_xfer() - Do one transfer
377 * @handle: Pointer to SCMI entity handle
378 * @xfer: Transfer to initiate and wait for response
380 * Return: -ETIMEDOUT in case of no response, if transmit error,
381 * return corresponding error, else if all goes well,
384 int scmi_do_xfer(const struct scmi_handle
*handle
, struct scmi_xfer
*xfer
)
388 struct scmi_info
*info
= handle_to_scmi_info(handle
);
389 struct device
*dev
= info
->dev
;
390 struct scmi_chan_info
*cinfo
;
392 cinfo
= idr_find(&info
->tx_idr
, xfer
->hdr
.protocol_id
);
393 if (unlikely(!cinfo
))
396 ret
= mbox_send_message(cinfo
->chan
, xfer
);
398 dev_dbg(dev
, "mbox send fail %d\n", ret
);
402 /* mbox_send_message returns non-negative value on success, so reset */
405 if (xfer
->hdr
.poll_completion
) {
406 ktime_t stop
= ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS
);
408 spin_until_cond(scmi_xfer_done_no_timeout(cinfo
, xfer
, stop
));
410 if (ktime_before(ktime_get(), stop
))
411 scmi_fetch_response(xfer
, cinfo
->payload
);
415 /* And we wait for the response. */
416 timeout
= msecs_to_jiffies(info
->desc
->max_rx_timeout_ms
);
417 if (!wait_for_completion_timeout(&xfer
->done
, timeout
)) {
418 dev_err(dev
, "mbox timed out in resp(caller: %pS)\n",
424 if (!ret
&& xfer
->hdr
.status
)
425 ret
= scmi_to_linux_errno(xfer
->hdr
.status
);
428 * NOTE: we might prefer not to need the mailbox ticker to manage the
429 * transfer queueing since the protocol layer queues things by itself.
430 * Unfortunately, we have to kick the mailbox framework after we have
431 * received our message.
433 mbox_client_txdone(cinfo
->chan
, ret
);
439 * scmi_xfer_get_init() - Allocate and initialise one message
441 * @handle: Pointer to SCMI entity handle
442 * @msg_id: Message identifier
443 * @prot_id: Protocol identifier for the message
444 * @tx_size: transmit message size
445 * @rx_size: receive message size
446 * @p: pointer to the allocated and initialised message
448 * This function allocates the message using @scmi_xfer_get and
449 * initialise the header.
451 * Return: 0 if all went fine with @p pointing to message, else
452 * corresponding error.
454 int scmi_xfer_get_init(const struct scmi_handle
*handle
, u8 msg_id
, u8 prot_id
,
455 size_t tx_size
, size_t rx_size
, struct scmi_xfer
**p
)
458 struct scmi_xfer
*xfer
;
459 struct scmi_info
*info
= handle_to_scmi_info(handle
);
460 struct device
*dev
= info
->dev
;
462 /* Ensure we have sane transfer sizes */
463 if (rx_size
> info
->desc
->max_msg_size
||
464 tx_size
> info
->desc
->max_msg_size
)
467 xfer
= scmi_xfer_get(handle
);
470 dev_err(dev
, "failed to get free message slot(%d)\n", ret
);
474 xfer
->tx
.len
= tx_size
;
475 xfer
->rx
.len
= rx_size
? : info
->desc
->max_msg_size
;
476 xfer
->hdr
.id
= msg_id
;
477 xfer
->hdr
.protocol_id
= prot_id
;
478 xfer
->hdr
.poll_completion
= false;
486 * scmi_version_get() - command to get the revision of the SCMI entity
488 * @handle: Pointer to SCMI entity handle
489 * @protocol: Protocol identifier for the message
490 * @version: Holds returned version of protocol.
492 * Updates the SCMI information in the internal data structure.
494 * Return: 0 if all went fine, else return appropriate error.
496 int scmi_version_get(const struct scmi_handle
*handle
, u8 protocol
,
503 ret
= scmi_xfer_get_init(handle
, PROTOCOL_VERSION
, protocol
, 0,
504 sizeof(*version
), &t
);
508 ret
= scmi_do_xfer(handle
, t
);
510 rev_info
= t
->rx
.buf
;
511 *version
= le32_to_cpu(*rev_info
);
514 scmi_xfer_put(handle
, t
);
518 void scmi_setup_protocol_implemented(const struct scmi_handle
*handle
,
521 struct scmi_info
*info
= handle_to_scmi_info(handle
);
523 info
->protocols_imp
= prot_imp
;
527 scmi_is_protocol_implemented(const struct scmi_handle
*handle
, u8 prot_id
)
530 struct scmi_info
*info
= handle_to_scmi_info(handle
);
532 if (!info
->protocols_imp
)
535 for (i
= 0; i
< MAX_PROTOCOLS_IMP
; i
++)
536 if (info
->protocols_imp
[i
] == prot_id
)
542 * scmi_handle_get() - Get the SCMI handle for a device
544 * @dev: pointer to device for which we want SCMI handle
546 * NOTE: The function does not track individual clients of the framework
547 * and is expected to be maintained by caller of SCMI protocol library.
548 * scmi_handle_put must be balanced with successful scmi_handle_get
550 * Return: pointer to handle if successful, NULL on error
552 struct scmi_handle
*scmi_handle_get(struct device
*dev
)
555 struct scmi_info
*info
;
556 struct scmi_handle
*handle
= NULL
;
558 mutex_lock(&scmi_list_mutex
);
559 list_for_each(p
, &scmi_list
) {
560 info
= list_entry(p
, struct scmi_info
, node
);
561 if (dev
->parent
== info
->dev
) {
562 handle
= &info
->handle
;
567 mutex_unlock(&scmi_list_mutex
);
573 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
575 * @handle: handle acquired by scmi_handle_get
577 * NOTE: The function does not track individual clients of the framework
578 * and is expected to be maintained by caller of SCMI protocol library.
579 * scmi_handle_put must be balanced with successful scmi_handle_get
581 * Return: 0 is successfully released
582 * if null was passed, it returns -EINVAL;
584 int scmi_handle_put(const struct scmi_handle
*handle
)
586 struct scmi_info
*info
;
591 info
= handle_to_scmi_info(handle
);
592 mutex_lock(&scmi_list_mutex
);
593 if (!WARN_ON(!info
->users
))
595 mutex_unlock(&scmi_list_mutex
);
600 static const struct scmi_desc scmi_generic_desc
= {
601 .max_rx_timeout_ms
= 30, /* We may increase this if required */
602 .max_msg
= 20, /* Limited by MBOX_TX_QUEUE_LEN */
606 /* Each compatible listed below must have descriptor associated with it */
607 static const struct of_device_id scmi_of_match
[] = {
608 { .compatible
= "arm,scmi", .data
= &scmi_generic_desc
},
612 MODULE_DEVICE_TABLE(of
, scmi_of_match
);
614 static int scmi_xfer_info_init(struct scmi_info
*sinfo
)
617 struct scmi_xfer
*xfer
;
618 struct device
*dev
= sinfo
->dev
;
619 const struct scmi_desc
*desc
= sinfo
->desc
;
620 struct scmi_xfers_info
*info
= &sinfo
->minfo
;
622 /* Pre-allocated messages, no more than what hdr.seq can support */
623 if (WARN_ON(desc
->max_msg
>= MSG_TOKEN_MAX
)) {
624 dev_err(dev
, "Maximum message of %d exceeds supported %ld\n",
625 desc
->max_msg
, MSG_TOKEN_MAX
);
629 info
->xfer_block
= devm_kcalloc(dev
, desc
->max_msg
,
630 sizeof(*info
->xfer_block
), GFP_KERNEL
);
631 if (!info
->xfer_block
)
634 info
->xfer_alloc_table
= devm_kcalloc(dev
, BITS_TO_LONGS(desc
->max_msg
),
635 sizeof(long), GFP_KERNEL
);
636 if (!info
->xfer_alloc_table
)
639 /* Pre-initialize the buffer pointer to pre-allocated buffers */
640 for (i
= 0, xfer
= info
->xfer_block
; i
< desc
->max_msg
; i
++, xfer
++) {
641 xfer
->rx
.buf
= devm_kcalloc(dev
, sizeof(u8
), desc
->max_msg_size
,
646 xfer
->tx
.buf
= xfer
->rx
.buf
;
647 init_completion(&xfer
->done
);
650 spin_lock_init(&info
->xfer_lock
);
655 static int scmi_mailbox_check(struct device_node
*np
)
657 struct of_phandle_args arg
;
659 return of_parse_phandle_with_args(np
, "mboxes", "#mbox-cells", 0, &arg
);
662 static int scmi_mbox_free_channel(int id
, void *p
, void *data
)
664 struct scmi_chan_info
*cinfo
= p
;
665 struct idr
*idr
= data
;
667 if (!IS_ERR_OR_NULL(cinfo
->chan
)) {
668 mbox_free_channel(cinfo
->chan
);
677 static int scmi_remove(struct platform_device
*pdev
)
680 struct scmi_info
*info
= platform_get_drvdata(pdev
);
681 struct idr
*idr
= &info
->tx_idr
;
683 mutex_lock(&scmi_list_mutex
);
687 list_del(&info
->node
);
688 mutex_unlock(&scmi_list_mutex
);
693 /* Safe to free channels since no more users */
694 ret
= idr_for_each(idr
, scmi_mbox_free_channel
, idr
);
695 idr_destroy(&info
->tx_idr
);
701 scmi_mbox_chan_setup(struct scmi_info
*info
, struct device
*dev
, int prot_id
)
705 resource_size_t size
;
706 struct device_node
*shmem
, *np
= dev
->of_node
;
707 struct scmi_chan_info
*cinfo
;
708 struct mbox_client
*cl
;
710 if (scmi_mailbox_check(np
)) {
711 cinfo
= idr_find(&info
->tx_idr
, SCMI_PROTOCOL_BASE
);
715 cinfo
= devm_kzalloc(info
->dev
, sizeof(*cinfo
), GFP_KERNEL
);
723 cl
->rx_callback
= scmi_rx_callback
;
724 cl
->tx_prepare
= scmi_tx_prepare
;
725 cl
->tx_block
= false;
726 cl
->knows_txdone
= true;
728 shmem
= of_parse_phandle(np
, "shmem", 0);
729 ret
= of_address_to_resource(shmem
, 0, &res
);
732 dev_err(dev
, "failed to get SCMI Tx payload mem resource\n");
736 size
= resource_size(&res
);
737 cinfo
->payload
= devm_ioremap(info
->dev
, res
.start
, size
);
738 if (!cinfo
->payload
) {
739 dev_err(dev
, "failed to ioremap SCMI Tx payload\n");
740 return -EADDRNOTAVAIL
;
743 /* Transmit channel is first entry i.e. index 0 */
744 cinfo
->chan
= mbox_request_channel(cl
, 0);
745 if (IS_ERR(cinfo
->chan
)) {
746 ret
= PTR_ERR(cinfo
->chan
);
747 if (ret
!= -EPROBE_DEFER
)
748 dev_err(dev
, "failed to request SCMI Tx mailbox\n");
753 ret
= idr_alloc(&info
->tx_idr
, cinfo
, prot_id
, prot_id
+ 1, GFP_KERNEL
);
754 if (ret
!= prot_id
) {
755 dev_err(dev
, "unable to allocate SCMI idr slot err %d\n", ret
);
759 cinfo
->handle
= &info
->handle
;
764 scmi_create_protocol_device(struct device_node
*np
, struct scmi_info
*info
,
767 struct scmi_device
*sdev
;
769 sdev
= scmi_device_create(np
, info
->dev
, prot_id
);
771 dev_err(info
->dev
, "failed to create %d protocol device\n",
776 if (scmi_mbox_chan_setup(info
, &sdev
->dev
, prot_id
)) {
777 dev_err(&sdev
->dev
, "failed to setup transport\n");
778 scmi_device_destroy(sdev
);
782 /* setup handle now as the transport is ready */
783 scmi_set_handle(sdev
);
786 static int scmi_probe(struct platform_device
*pdev
)
789 struct scmi_handle
*handle
;
790 const struct scmi_desc
*desc
;
791 struct scmi_info
*info
;
792 struct device
*dev
= &pdev
->dev
;
793 struct device_node
*child
, *np
= dev
->of_node
;
795 /* Only mailbox method supported, check for the presence of one */
796 if (scmi_mailbox_check(np
)) {
797 dev_err(dev
, "no mailbox found in %pOF\n", np
);
801 desc
= of_match_device(scmi_of_match
, dev
)->data
;
803 info
= devm_kzalloc(dev
, sizeof(*info
), GFP_KERNEL
);
809 INIT_LIST_HEAD(&info
->node
);
811 ret
= scmi_xfer_info_init(info
);
815 platform_set_drvdata(pdev
, info
);
816 idr_init(&info
->tx_idr
);
818 handle
= &info
->handle
;
819 handle
->dev
= info
->dev
;
820 handle
->version
= &info
->version
;
822 ret
= scmi_mbox_chan_setup(info
, dev
, SCMI_PROTOCOL_BASE
);
826 ret
= scmi_base_protocol_init(handle
);
828 dev_err(dev
, "unable to communicate with SCMI(%d)\n", ret
);
832 mutex_lock(&scmi_list_mutex
);
833 list_add_tail(&info
->node
, &scmi_list
);
834 mutex_unlock(&scmi_list_mutex
);
836 for_each_available_child_of_node(np
, child
) {
839 if (of_property_read_u32(child
, "reg", &prot_id
))
842 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK
, prot_id
))
843 dev_err(dev
, "Out of range protocol %d\n", prot_id
);
845 if (!scmi_is_protocol_implemented(handle
, prot_id
)) {
846 dev_err(dev
, "SCMI protocol %d not implemented\n",
851 scmi_create_protocol_device(child
, info
, prot_id
);
857 static struct platform_driver scmi_driver
= {
860 .of_match_table
= scmi_of_match
,
863 .remove
= scmi_remove
,
866 module_platform_driver(scmi_driver
);
868 MODULE_ALIAS("platform: arm-scmi");
869 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
870 MODULE_DESCRIPTION("ARM SCMI protocol driver");
871 MODULE_LICENSE("GPL v2");