1 // SPDX-License-Identifier: GPL-2.0
3 * System Control and Management Interface (SCMI) Message Protocol driver
5 * SCMI Message Protocol is used between the System Control Processor(SCP)
6 * and the Application Processors(AP). The Message Handling Unit(MHU)
7 * provides a mechanism for inter-processor communication between SCP's
10 * SCP offers control and management of the core/cluster power states,
11 * various power domain DVFS including the core/cluster, certain system
12 * clocks configuration, thermal sensors and many others.
14 * Copyright (C) 2018-2024 ARM Ltd.
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 #include <linux/bitmap.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/idr.h>
25 #include <linux/io-64-nonatomic-hi-lo.h>
26 #include <linux/kernel.h>
27 #include <linux/ktime.h>
28 #include <linux/hashtable.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
32 #include <linux/platform_device.h>
33 #include <linux/processor.h>
34 #include <linux/refcount.h>
35 #include <linux/slab.h>
36 #include <linux/xarray.h>
43 #define CREATE_TRACE_POINTS
44 #include <trace/events/scmi.h>
46 static DEFINE_IDA(scmi_id
);
48 static DEFINE_XARRAY(scmi_protocols
);
50 /* List of all SCMI devices active in system */
51 static LIST_HEAD(scmi_list
);
52 /* Protection for the entire list */
53 static DEFINE_MUTEX(scmi_list_mutex
);
54 /* Track the unique id for the transfers for debug & profiling purpose */
55 static atomic_t transfer_last_id
;
57 static struct dentry
*scmi_top_dentry
;
60 * struct scmi_xfers_info - Structure to manage transfer information
62 * @xfer_alloc_table: Bitmap table for allocated messages.
63 * Index of this bitmap table is also used for message
64 * sequence identifier.
65 * @xfer_lock: Protection for message allocation
66 * @max_msg: Maximum number of messages that can be pending
67 * @free_xfers: A free list for available to use xfers. It is initialized with
68 * a number of xfers equal to the maximum allowed in-flight
70 * @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the
71 * currently in-flight messages.
73 struct scmi_xfers_info
{
74 unsigned long *xfer_alloc_table
;
77 struct hlist_head free_xfers
;
78 DECLARE_HASHTABLE(pending_xfers
, SCMI_PENDING_XFERS_HT_ORDER_SZ
);
82 * struct scmi_protocol_instance - Describe an initialized protocol instance.
83 * @handle: Reference to the SCMI handle associated to this protocol instance.
84 * @proto: A reference to the protocol descriptor.
85 * @gid: A reference for per-protocol devres management.
86 * @users: A refcount to track effective users of this protocol.
87 * @priv: Reference for optional protocol private data.
88 * @version: Protocol version supported by the platform as detected at runtime.
89 * @negotiated_version: When the platform supports a newer protocol version,
90 * the agent will try to negotiate with the platform the
91 * usage of the newest version known to it, since
92 * backward compatibility is NOT automatically assured.
93 * This field is NON-zero when a successful negotiation
95 * @ph: An embedded protocol handle that will be passed down to protocol
96 * initialization code to identify this instance.
98 * Each protocol is initialized independently once for each SCMI platform in
99 * which is defined by DT and implemented by the SCMI server fw.
101 struct scmi_protocol_instance
{
102 const struct scmi_handle
*handle
;
103 const struct scmi_protocol
*proto
;
107 unsigned int version
;
108 unsigned int negotiated_version
;
109 struct scmi_protocol_handle ph
;
112 #define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
115 * struct scmi_debug_info - Debug common info
116 * @top_dentry: A reference to the top debugfs dentry
117 * @name: Name of this SCMI instance
118 * @type: Type of this SCMI instance
119 * @is_atomic: Flag to state if the transport of this instance is atomic
120 * @counters: An array of atomic_c's used for tracking statistics (if enabled)
122 struct scmi_debug_info
{
123 struct dentry
*top_dentry
;
127 atomic_t counters
[SCMI_DEBUG_COUNTERS_LAST
];
131 * struct scmi_info - Structure representing a SCMI instance
133 * @id: A sequence number starting from zero identifying this instance
134 * @dev: Device pointer
135 * @desc: SoC description for this instance
136 * @version: SCMI revision information containing protocol version,
137 * implementation version and (sub-)vendor identification.
138 * @handle: Instance of SCMI handle to send to clients
139 * @tx_minfo: Universal Transmit Message management info
140 * @rx_minfo: Universal Receive Message management info
141 * @tx_idr: IDR object to map protocol id to Tx channel info pointer
142 * @rx_idr: IDR object to map protocol id to Rx channel info pointer
143 * @protocols: IDR for protocols' instance descriptors initialized for
144 * this SCMI instance: populated on protocol's first attempted
146 * @protocols_mtx: A mutex to protect protocols instances initialization.
147 * @protocols_imp: List of protocols implemented, currently maximum of
148 * scmi_revision_info.num_protocols elements allocated by the
150 * @active_protocols: IDR storing device_nodes for protocols actually defined
151 * in the DT and confirmed as implemented by fw.
152 * @notify_priv: Pointer to private data structure specific to notifications.
154 * @users: Number of users of this instance
155 * @bus_nb: A notifier to listen for device bind/unbind on the scmi bus
156 * @dev_req_nb: A notifier to listen for device request/unrequest on the scmi
158 * @devreq_mtx: A mutex to serialize device creation for this SCMI instance
159 * @dbg: A pointer to debugfs related data (if any)
160 * @raw: An opaque reference handle used by SCMI Raw mode.
165 const struct scmi_desc
*desc
;
166 struct scmi_revision_info version
;
167 struct scmi_handle handle
;
168 struct scmi_xfers_info tx_minfo
;
169 struct scmi_xfers_info rx_minfo
;
172 struct idr protocols
;
173 /* Ensure mutual exclusive access to protocols instance array */
174 struct mutex protocols_mtx
;
176 struct idr active_protocols
;
178 struct list_head node
;
180 struct notifier_block bus_nb
;
181 struct notifier_block dev_req_nb
;
182 /* Serialize device creation process for this instance */
183 struct mutex devreq_mtx
;
184 struct scmi_debug_info
*dbg
;
188 #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
189 #define bus_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, bus_nb)
190 #define req_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, dev_req_nb)
192 static void scmi_rx_callback(struct scmi_chan_info
*cinfo
,
193 u32 msg_hdr
, void *priv
);
194 static void scmi_bad_message_trace(struct scmi_chan_info
*cinfo
,
195 u32 msg_hdr
, enum scmi_bad_msg err
);
197 static struct scmi_transport_core_operations scmi_trans_core_ops
= {
198 .bad_message_trace
= scmi_bad_message_trace
,
199 .rx_callback
= scmi_rx_callback
,
203 scmi_vendor_protocol_signature(unsigned int protocol_id
, char *vendor_id
,
204 char *sub_vendor_id
, u32 impl_ver
)
207 unsigned long hash
= 0;
209 /* vendor_id/sub_vendor_id guaranteed <= SCMI_SHORT_NAME_MAX_SIZE */
210 signature
= kasprintf(GFP_KERNEL
, "%02X|%s|%s|0x%08X", protocol_id
,
211 vendor_id
?: "", sub_vendor_id
?: "", impl_ver
);
217 hash
= partial_name_hash(tolower(*p
++), hash
);
218 hash
= end_name_hash(hash
);
226 scmi_protocol_key_calculate(int protocol_id
, char *vendor_id
,
227 char *sub_vendor_id
, u32 impl_ver
)
229 if (protocol_id
< SCMI_PROTOCOL_VENDOR_BASE
)
232 return scmi_vendor_protocol_signature(protocol_id
, vendor_id
,
233 sub_vendor_id
, impl_ver
);
236 static const struct scmi_protocol
*
237 __scmi_vendor_protocol_lookup(int protocol_id
, char *vendor_id
,
238 char *sub_vendor_id
, u32 impl_ver
)
241 struct scmi_protocol
*proto
= NULL
;
243 key
= scmi_protocol_key_calculate(protocol_id
, vendor_id
,
244 sub_vendor_id
, impl_ver
);
246 proto
= xa_load(&scmi_protocols
, key
);
251 static const struct scmi_protocol
*
252 scmi_vendor_protocol_lookup(int protocol_id
, char *vendor_id
,
253 char *sub_vendor_id
, u32 impl_ver
)
255 const struct scmi_protocol
*proto
= NULL
;
257 /* Searching for closest match ...*/
258 proto
= __scmi_vendor_protocol_lookup(protocol_id
, vendor_id
,
259 sub_vendor_id
, impl_ver
);
263 /* Any match just on vendor/sub_vendor ? */
265 proto
= __scmi_vendor_protocol_lookup(protocol_id
, vendor_id
,
271 /* Any match just on the vendor ? */
273 proto
= __scmi_vendor_protocol_lookup(protocol_id
, vendor_id
,
278 static const struct scmi_protocol
*
279 scmi_protocol_get(int protocol_id
, struct scmi_revision_info
*version
)
281 const struct scmi_protocol
*proto
= NULL
;
283 if (protocol_id
< SCMI_PROTOCOL_VENDOR_BASE
)
284 proto
= xa_load(&scmi_protocols
, protocol_id
);
286 proto
= scmi_vendor_protocol_lookup(protocol_id
,
288 version
->sub_vendor_id
,
290 if (!proto
|| !try_module_get(proto
->owner
)) {
291 pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id
);
295 pr_debug("Found SCMI Protocol 0x%x\n", protocol_id
);
297 if (protocol_id
>= SCMI_PROTOCOL_VENDOR_BASE
)
298 pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n",
299 protocol_id
, proto
->vendor_id
?: "",
300 proto
->sub_vendor_id
?: "", proto
->impl_ver
);
305 static void scmi_protocol_put(const struct scmi_protocol
*proto
)
308 module_put(proto
->owner
);
311 static int scmi_vendor_protocol_check(const struct scmi_protocol
*proto
)
313 if (!proto
->vendor_id
) {
314 pr_err("missing vendor_id for protocol 0x%x\n", proto
->id
);
318 if (strlen(proto
->vendor_id
) >= SCMI_SHORT_NAME_MAX_SIZE
) {
319 pr_err("malformed vendor_id for protocol 0x%x\n", proto
->id
);
323 if (proto
->sub_vendor_id
&&
324 strlen(proto
->sub_vendor_id
) >= SCMI_SHORT_NAME_MAX_SIZE
) {
325 pr_err("malformed sub_vendor_id for protocol 0x%x\n",
333 int scmi_protocol_register(const struct scmi_protocol
*proto
)
339 pr_err("invalid protocol\n");
343 if (!proto
->instance_init
) {
344 pr_err("missing init for protocol 0x%x\n", proto
->id
);
348 if (proto
->id
>= SCMI_PROTOCOL_VENDOR_BASE
&&
349 scmi_vendor_protocol_check(proto
))
353 * Calculate a protocol key to register this protocol with the core;
354 * key value 0 is considered invalid.
356 key
= scmi_protocol_key_calculate(proto
->id
, proto
->vendor_id
,
357 proto
->sub_vendor_id
,
362 ret
= xa_insert(&scmi_protocols
, key
, (void *)proto
, GFP_KERNEL
);
364 pr_err("unable to allocate SCMI protocol slot for 0x%x - err %d\n",
369 pr_debug("Registered SCMI Protocol 0x%x\n", proto
->id
);
373 EXPORT_SYMBOL_GPL(scmi_protocol_register
);
375 void scmi_protocol_unregister(const struct scmi_protocol
*proto
)
379 key
= scmi_protocol_key_calculate(proto
->id
, proto
->vendor_id
,
380 proto
->sub_vendor_id
,
385 xa_erase(&scmi_protocols
, key
);
387 pr_debug("Unregistered SCMI Protocol 0x%x\n", proto
->id
);
389 EXPORT_SYMBOL_GPL(scmi_protocol_unregister
);
392 * scmi_create_protocol_devices - Create devices for all pending requests for
393 * this SCMI instance.
395 * @np: The device node describing the protocol
396 * @info: The SCMI instance descriptor
397 * @prot_id: The protocol ID
398 * @name: The optional name of the device to be created: if not provided this
399 * call will lead to the creation of all the devices currently requested
400 * for the specified protocol.
402 static void scmi_create_protocol_devices(struct device_node
*np
,
403 struct scmi_info
*info
,
404 int prot_id
, const char *name
)
406 struct scmi_device
*sdev
;
408 mutex_lock(&info
->devreq_mtx
);
409 sdev
= scmi_device_create(np
, info
->dev
, prot_id
, name
);
412 "failed to create device for protocol 0x%X (%s)\n",
414 mutex_unlock(&info
->devreq_mtx
);
417 static void scmi_destroy_protocol_devices(struct scmi_info
*info
,
418 int prot_id
, const char *name
)
420 mutex_lock(&info
->devreq_mtx
);
421 scmi_device_destroy(info
->dev
, prot_id
, name
);
422 mutex_unlock(&info
->devreq_mtx
);
425 void scmi_notification_instance_data_set(const struct scmi_handle
*handle
,
428 struct scmi_info
*info
= handle_to_scmi_info(handle
);
430 info
->notify_priv
= priv
;
431 /* Ensure updated protocol private date are visible */
435 void *scmi_notification_instance_data_get(const struct scmi_handle
*handle
)
437 struct scmi_info
*info
= handle_to_scmi_info(handle
);
439 /* Ensure protocols_private_data has been updated */
441 return info
->notify_priv
;
445 * scmi_xfer_token_set - Reserve and set new token for the xfer at hand
447 * @minfo: Pointer to Tx/Rx Message management info based on channel type
448 * @xfer: The xfer to act upon
450 * Pick the next unused monotonically increasing token and set it into
451 * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
452 * reuse of freshly completed or timed-out xfers, thus mitigating the risk
453 * of incorrect association of a late and expired xfer with a live in-flight
454 * transaction, both happening to re-use the same token identifier.
456 * Since platform is NOT required to answer our request in-order we should
457 * account for a few rare but possible scenarios:
459 * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
460 * using find_next_zero_bit() starting from candidate next_token bit
462 * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
463 * are plenty of free tokens at start, so try a second pass using
464 * find_next_zero_bit() and starting from 0.
472 * -----------+----------------------------------------------------------
473 * | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X|
474 * ----------------------------------------------------------------------
478 * Out-of-order pending at start
479 * -----------------------------
481 * |- xfer_id picked, last_token fixed
482 * -----+----------------------------------------------------------------
483 * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| |
484 * ----------------------------------------------------------------------
489 * Out-of-order pending at end
490 * ---------------------------
492 * |- xfer_id picked, last_token fixed
493 * -----+----------------------------------------------------------------
494 * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X|
495 * ----------------------------------------------------------------------
499 * Context: Assumes to be called with @xfer_lock already acquired.
501 * Return: 0 on Success or error
503 static int scmi_xfer_token_set(struct scmi_xfers_info
*minfo
,
504 struct scmi_xfer
*xfer
)
506 unsigned long xfer_id
, next_token
;
509 * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1]
510 * using the pre-allocated transfer_id as a base.
511 * Note that the global transfer_id is shared across all message types
512 * so there could be holes in the allocated set of monotonic sequence
513 * numbers, but that is going to limit the effectiveness of the
514 * mitigation only in very rare limit conditions.
516 next_token
= (xfer
->transfer_id
& (MSG_TOKEN_MAX
- 1));
518 /* Pick the next available xfer_id >= next_token */
519 xfer_id
= find_next_zero_bit(minfo
->xfer_alloc_table
,
520 MSG_TOKEN_MAX
, next_token
);
521 if (xfer_id
== MSG_TOKEN_MAX
) {
523 * After heavily out-of-order responses, there are no free
524 * tokens ahead, but only at start of xfer_alloc_table so
525 * try again from the beginning.
527 xfer_id
= find_next_zero_bit(minfo
->xfer_alloc_table
,
530 * Something is wrong if we got here since there can be a
531 * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages
532 * but we have not found any free token [0, MSG_TOKEN_MAX - 1].
534 if (WARN_ON_ONCE(xfer_id
== MSG_TOKEN_MAX
))
538 /* Update +/- last_token accordingly if we skipped some hole */
539 if (xfer_id
!= next_token
)
540 atomic_add((int)(xfer_id
- next_token
), &transfer_last_id
);
542 xfer
->hdr
.seq
= (u16
)xfer_id
;
548 * scmi_xfer_token_clear - Release the token
550 * @minfo: Pointer to Tx/Rx Message management info based on channel type
551 * @xfer: The xfer to act upon
553 static inline void scmi_xfer_token_clear(struct scmi_xfers_info
*minfo
,
554 struct scmi_xfer
*xfer
)
556 clear_bit(xfer
->hdr
.seq
, minfo
->xfer_alloc_table
);
560 * scmi_xfer_inflight_register_unlocked - Register the xfer as in-flight
562 * @xfer: The xfer to register
563 * @minfo: Pointer to Tx/Rx Message management info based on channel type
565 * Note that this helper assumes that the xfer to be registered as in-flight
566 * had been built using an xfer sequence number which still corresponds to a
567 * free slot in the xfer_alloc_table.
569 * Context: Assumes to be called with @xfer_lock already acquired.
572 scmi_xfer_inflight_register_unlocked(struct scmi_xfer
*xfer
,
573 struct scmi_xfers_info
*minfo
)
576 set_bit(xfer
->hdr
.seq
, minfo
->xfer_alloc_table
);
577 hash_add(minfo
->pending_xfers
, &xfer
->node
, xfer
->hdr
.seq
);
578 xfer
->pending
= true;
582 * scmi_xfer_inflight_register - Try to register an xfer as in-flight
584 * @xfer: The xfer to register
585 * @minfo: Pointer to Tx/Rx Message management info based on channel type
587 * Note that this helper does NOT assume anything about the sequence number
588 * that was baked into the provided xfer, so it checks at first if it can
589 * be mapped to a free slot and fails with an error if another xfer with the
590 * same sequence number is currently still registered as in-flight.
592 * Return: 0 on Success or -EBUSY if sequence number embedded in the xfer
593 * could not rbe mapped to a free slot in the xfer_alloc_table.
595 static int scmi_xfer_inflight_register(struct scmi_xfer
*xfer
,
596 struct scmi_xfers_info
*minfo
)
601 spin_lock_irqsave(&minfo
->xfer_lock
, flags
);
602 if (!test_bit(xfer
->hdr
.seq
, minfo
->xfer_alloc_table
))
603 scmi_xfer_inflight_register_unlocked(xfer
, minfo
);
606 spin_unlock_irqrestore(&minfo
->xfer_lock
, flags
);
612 * scmi_xfer_raw_inflight_register - An helper to register the given xfer as in
613 * flight on the TX channel, if possible.
615 * @handle: Pointer to SCMI entity handle
616 * @xfer: The xfer to register
618 * Return: 0 on Success, error otherwise
620 int scmi_xfer_raw_inflight_register(const struct scmi_handle
*handle
,
621 struct scmi_xfer
*xfer
)
623 struct scmi_info
*info
= handle_to_scmi_info(handle
);
625 return scmi_xfer_inflight_register(xfer
, &info
->tx_minfo
);
629 * scmi_xfer_pending_set - Pick a proper sequence number and mark the xfer
630 * as pending in-flight
632 * @xfer: The xfer to act upon
633 * @minfo: Pointer to Tx/Rx Message management info based on channel type
635 * Return: 0 on Success or error otherwise
637 static inline int scmi_xfer_pending_set(struct scmi_xfer
*xfer
,
638 struct scmi_xfers_info
*minfo
)
643 spin_lock_irqsave(&minfo
->xfer_lock
, flags
);
644 /* Set a new monotonic token as the xfer sequence number */
645 ret
= scmi_xfer_token_set(minfo
, xfer
);
647 scmi_xfer_inflight_register_unlocked(xfer
, minfo
);
648 spin_unlock_irqrestore(&minfo
->xfer_lock
, flags
);
654 * scmi_xfer_get() - Allocate one message
656 * @handle: Pointer to SCMI entity handle
657 * @minfo: Pointer to Tx/Rx Message management info based on channel type
659 * Helper function which is used by various message functions that are
660 * exposed to clients of this driver for allocating a message traffic event.
662 * Picks an xfer from the free list @free_xfers (if any available) and perform
663 * a basic initialization.
665 * Note that, at this point, still no sequence number is assigned to the
666 * allocated xfer, nor it is registered as a pending transaction.
668 * The successfully initialized xfer is refcounted.
670 * Context: Holds @xfer_lock while manipulating @free_xfers.
672 * Return: An initialized xfer if all went fine, else pointer error.
674 static struct scmi_xfer
*scmi_xfer_get(const struct scmi_handle
*handle
,
675 struct scmi_xfers_info
*minfo
)
678 struct scmi_xfer
*xfer
;
680 spin_lock_irqsave(&minfo
->xfer_lock
, flags
);
681 if (hlist_empty(&minfo
->free_xfers
)) {
682 spin_unlock_irqrestore(&minfo
->xfer_lock
, flags
);
683 return ERR_PTR(-ENOMEM
);
686 /* grab an xfer from the free_list */
687 xfer
= hlist_entry(minfo
->free_xfers
.first
, struct scmi_xfer
, node
);
688 hlist_del_init(&xfer
->node
);
691 * Allocate transfer_id early so that can be used also as base for
692 * monotonic sequence number generation if needed.
694 xfer
->transfer_id
= atomic_inc_return(&transfer_last_id
);
696 refcount_set(&xfer
->users
, 1);
697 atomic_set(&xfer
->busy
, SCMI_XFER_FREE
);
698 spin_unlock_irqrestore(&minfo
->xfer_lock
, flags
);
704 * scmi_xfer_raw_get - Helper to get a bare free xfer from the TX channel
706 * @handle: Pointer to SCMI entity handle
708 * Note that xfer is taken from the TX channel structures.
710 * Return: A valid xfer on Success, or an error-pointer otherwise
712 struct scmi_xfer
*scmi_xfer_raw_get(const struct scmi_handle
*handle
)
714 struct scmi_xfer
*xfer
;
715 struct scmi_info
*info
= handle_to_scmi_info(handle
);
717 xfer
= scmi_xfer_get(handle
, &info
->tx_minfo
);
719 xfer
->flags
|= SCMI_XFER_FLAG_IS_RAW
;
725 * scmi_xfer_raw_channel_get - Helper to get a reference to the proper channel
726 * to use for a specific protocol_id Raw transaction.
728 * @handle: Pointer to SCMI entity handle
729 * @protocol_id: Identifier of the protocol
731 * Note that in a regular SCMI stack, usually, a protocol has to be defined in
732 * the DT to have an associated channel and be usable; but in Raw mode any
733 * protocol in range is allowed, re-using the Base channel, so as to enable
734 * fuzzing on any protocol without the need of a fully compiled DT.
736 * Return: A reference to the channel to use, or an ERR_PTR
738 struct scmi_chan_info
*
739 scmi_xfer_raw_channel_get(const struct scmi_handle
*handle
, u8 protocol_id
)
741 struct scmi_chan_info
*cinfo
;
742 struct scmi_info
*info
= handle_to_scmi_info(handle
);
744 cinfo
= idr_find(&info
->tx_idr
, protocol_id
);
746 if (protocol_id
== SCMI_PROTOCOL_BASE
)
747 return ERR_PTR(-EINVAL
);
748 /* Use Base channel for protocols not defined for DT */
749 cinfo
= idr_find(&info
->tx_idr
, SCMI_PROTOCOL_BASE
);
751 return ERR_PTR(-EINVAL
);
752 dev_warn_once(handle
->dev
,
753 "Using Base channel for protocol 0x%X\n",
761 * __scmi_xfer_put() - Release a message
763 * @minfo: Pointer to Tx/Rx Message management info based on channel type
764 * @xfer: message that was reserved by scmi_xfer_get
766 * After refcount check, possibly release an xfer, clearing the token slot,
767 * removing xfer from @pending_xfers and putting it back into free_xfers.
769 * This holds a spinlock to maintain integrity of internal data structures.
772 __scmi_xfer_put(struct scmi_xfers_info
*minfo
, struct scmi_xfer
*xfer
)
776 spin_lock_irqsave(&minfo
->xfer_lock
, flags
);
777 if (refcount_dec_and_test(&xfer
->users
)) {
779 scmi_xfer_token_clear(minfo
, xfer
);
780 hash_del(&xfer
->node
);
781 xfer
->pending
= false;
783 hlist_add_head(&xfer
->node
, &minfo
->free_xfers
);
785 spin_unlock_irqrestore(&minfo
->xfer_lock
, flags
);
789 * scmi_xfer_raw_put - Release an xfer that was taken by @scmi_xfer_raw_get
791 * @handle: Pointer to SCMI entity handle
792 * @xfer: A reference to the xfer to put
794 * Note that as with other xfer_put() handlers the xfer is really effectively
795 * released only if there are no more users on the system.
797 void scmi_xfer_raw_put(const struct scmi_handle
*handle
, struct scmi_xfer
*xfer
)
799 struct scmi_info
*info
= handle_to_scmi_info(handle
);
801 xfer
->flags
&= ~SCMI_XFER_FLAG_IS_RAW
;
802 xfer
->flags
&= ~SCMI_XFER_FLAG_CHAN_SET
;
803 return __scmi_xfer_put(&info
->tx_minfo
, xfer
);
807 * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
809 * @minfo: Pointer to Tx/Rx Message management info based on channel type
810 * @xfer_id: Token ID to lookup in @pending_xfers
812 * Refcounting is untouched.
814 * Context: Assumes to be called with @xfer_lock already acquired.
816 * Return: A valid xfer on Success or error otherwise
818 static struct scmi_xfer
*
819 scmi_xfer_lookup_unlocked(struct scmi_xfers_info
*minfo
, u16 xfer_id
)
821 struct scmi_xfer
*xfer
= NULL
;
823 if (test_bit(xfer_id
, minfo
->xfer_alloc_table
))
824 xfer
= XFER_FIND(minfo
->pending_xfers
, xfer_id
);
826 return xfer
?: ERR_PTR(-EINVAL
);
830 * scmi_bad_message_trace - A helper to trace weird messages
832 * @cinfo: A reference to the channel descriptor on which the message was
834 * @msg_hdr: Message header to track
835 * @err: A specific error code used as a status value in traces.
837 * This helper can be used to trace any kind of weird, incomplete, unexpected,
838 * timed-out message that arrives and as such, can be traced only referring to
839 * the header content, since the payload is missing/unreliable.
841 static void scmi_bad_message_trace(struct scmi_chan_info
*cinfo
, u32 msg_hdr
,
842 enum scmi_bad_msg err
)
845 struct scmi_info
*info
= handle_to_scmi_info(cinfo
->handle
);
847 switch (MSG_XTRACT_TYPE(msg_hdr
)) {
848 case MSG_TYPE_COMMAND
:
851 case MSG_TYPE_DELAYED_RESP
:
854 case MSG_TYPE_NOTIFICATION
:
862 trace_scmi_msg_dump(info
->id
, cinfo
->id
,
863 MSG_XTRACT_PROT_ID(msg_hdr
),
864 MSG_XTRACT_ID(msg_hdr
), tag
,
865 MSG_XTRACT_TOKEN(msg_hdr
), err
, NULL
, 0);
869 * scmi_msg_response_validate - Validate message type against state of related
872 * @cinfo: A reference to the channel descriptor.
873 * @msg_type: Message type to check
874 * @xfer: A reference to the xfer to validate against @msg_type
876 * This function checks if @msg_type is congruent with the current state of
877 * a pending @xfer; if an asynchronous delayed response is received before the
878 * related synchronous response (Out-of-Order Delayed Response) the missing
879 * synchronous response is assumed to be OK and completed, carrying on with the
880 * Delayed Response: this is done to address the case in which the underlying
881 * SCMI transport can deliver such out-of-order responses.
883 * Context: Assumes to be called with xfer->lock already acquired.
885 * Return: 0 on Success, error otherwise
887 static inline int scmi_msg_response_validate(struct scmi_chan_info
*cinfo
,
889 struct scmi_xfer
*xfer
)
892 * Even if a response was indeed expected on this slot at this point,
893 * a buggy platform could wrongly reply feeding us an unexpected
894 * delayed response we're not prepared to handle: bail-out safely
897 if (msg_type
== MSG_TYPE_DELAYED_RESP
&& !xfer
->async_done
) {
899 "Delayed Response for %d not expected! Buggy F/W ?\n",
904 switch (xfer
->state
) {
905 case SCMI_XFER_SENT_OK
:
906 if (msg_type
== MSG_TYPE_DELAYED_RESP
) {
908 * Delayed Response expected but delivered earlier.
909 * Assume message RESPONSE was OK and skip state.
911 xfer
->hdr
.status
= SCMI_SUCCESS
;
912 xfer
->state
= SCMI_XFER_RESP_OK
;
913 complete(&xfer
->done
);
915 "Received valid OoO Delayed Response for %d\n",
919 case SCMI_XFER_RESP_OK
:
920 if (msg_type
!= MSG_TYPE_DELAYED_RESP
)
923 case SCMI_XFER_DRESP_OK
:
924 /* No further message expected once in SCMI_XFER_DRESP_OK */
932 * scmi_xfer_state_update - Update xfer state
934 * @xfer: A reference to the xfer to update
935 * @msg_type: Type of message being processed.
937 * Note that this message is assumed to have been already successfully validated
938 * by @scmi_msg_response_validate(), so here we just update the state.
940 * Context: Assumes to be called on an xfer exclusively acquired using the
943 static inline void scmi_xfer_state_update(struct scmi_xfer
*xfer
, u8 msg_type
)
945 xfer
->hdr
.type
= msg_type
;
947 /* Unknown command types were already discarded earlier */
948 if (xfer
->hdr
.type
== MSG_TYPE_COMMAND
)
949 xfer
->state
= SCMI_XFER_RESP_OK
;
951 xfer
->state
= SCMI_XFER_DRESP_OK
;
954 static bool scmi_xfer_acquired(struct scmi_xfer
*xfer
)
958 ret
= atomic_cmpxchg(&xfer
->busy
, SCMI_XFER_FREE
, SCMI_XFER_BUSY
);
960 return ret
== SCMI_XFER_FREE
;
964 * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
966 * @cinfo: A reference to the channel descriptor.
967 * @msg_hdr: A message header to use as lookup key
969 * When a valid xfer is found for the sequence number embedded in the provided
970 * msg_hdr, reference counting is properly updated and exclusive access to this
971 * xfer is granted till released with @scmi_xfer_command_release.
973 * Return: A valid @xfer on Success or error otherwise.
975 static inline struct scmi_xfer
*
976 scmi_xfer_command_acquire(struct scmi_chan_info
*cinfo
, u32 msg_hdr
)
980 struct scmi_xfer
*xfer
;
981 struct scmi_info
*info
= handle_to_scmi_info(cinfo
->handle
);
982 struct scmi_xfers_info
*minfo
= &info
->tx_minfo
;
983 u8 msg_type
= MSG_XTRACT_TYPE(msg_hdr
);
984 u16 xfer_id
= MSG_XTRACT_TOKEN(msg_hdr
);
986 /* Are we even expecting this? */
987 spin_lock_irqsave(&minfo
->xfer_lock
, flags
);
988 xfer
= scmi_xfer_lookup_unlocked(minfo
, xfer_id
);
991 "Message for %d type %d is not expected!\n",
993 spin_unlock_irqrestore(&minfo
->xfer_lock
, flags
);
995 scmi_bad_message_trace(cinfo
, msg_hdr
, MSG_UNEXPECTED
);
996 scmi_inc_count(info
->dbg
->counters
, ERR_MSG_UNEXPECTED
);
1000 refcount_inc(&xfer
->users
);
1001 spin_unlock_irqrestore(&minfo
->xfer_lock
, flags
);
1003 spin_lock_irqsave(&xfer
->lock
, flags
);
1004 ret
= scmi_msg_response_validate(cinfo
, msg_type
, xfer
);
1006 * If a pending xfer was found which was also in a congruent state with
1007 * the received message, acquire exclusive access to it setting the busy
1009 * Spins only on the rare limit condition of concurrent reception of
1010 * RESP and DRESP for the same xfer.
1013 spin_until_cond(scmi_xfer_acquired(xfer
));
1014 scmi_xfer_state_update(xfer
, msg_type
);
1016 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1020 "Invalid message type:%d for %d - HDR:0x%X state:%d\n",
1021 msg_type
, xfer_id
, msg_hdr
, xfer
->state
);
1023 scmi_bad_message_trace(cinfo
, msg_hdr
, MSG_INVALID
);
1024 scmi_inc_count(info
->dbg
->counters
, ERR_MSG_INVALID
);
1026 /* On error the refcount incremented above has to be dropped */
1027 __scmi_xfer_put(minfo
, xfer
);
1028 xfer
= ERR_PTR(-EINVAL
);
1034 static inline void scmi_xfer_command_release(struct scmi_info
*info
,
1035 struct scmi_xfer
*xfer
)
1037 atomic_set(&xfer
->busy
, SCMI_XFER_FREE
);
1038 __scmi_xfer_put(&info
->tx_minfo
, xfer
);
1041 static inline void scmi_clear_channel(struct scmi_info
*info
,
1042 struct scmi_chan_info
*cinfo
)
1044 if (!cinfo
->is_p2a
) {
1045 dev_warn(cinfo
->dev
, "Invalid clear on A2P channel !\n");
1049 if (info
->desc
->ops
->clear_channel
)
1050 info
->desc
->ops
->clear_channel(cinfo
);
1053 static void scmi_handle_notification(struct scmi_chan_info
*cinfo
,
1054 u32 msg_hdr
, void *priv
)
1056 struct scmi_xfer
*xfer
;
1057 struct device
*dev
= cinfo
->dev
;
1058 struct scmi_info
*info
= handle_to_scmi_info(cinfo
->handle
);
1059 struct scmi_xfers_info
*minfo
= &info
->rx_minfo
;
1062 ts
= ktime_get_boottime();
1063 xfer
= scmi_xfer_get(cinfo
->handle
, minfo
);
1065 dev_err(dev
, "failed to get free message slot (%ld)\n",
1068 scmi_bad_message_trace(cinfo
, msg_hdr
, MSG_NOMEM
);
1069 scmi_inc_count(info
->dbg
->counters
, ERR_MSG_NOMEM
);
1071 scmi_clear_channel(info
, cinfo
);
1075 unpack_scmi_header(msg_hdr
, &xfer
->hdr
);
1077 /* Ensure order between xfer->priv store and following ops */
1078 smp_store_mb(xfer
->priv
, priv
);
1079 info
->desc
->ops
->fetch_notification(cinfo
, info
->desc
->max_msg_size
,
1082 trace_scmi_msg_dump(info
->id
, cinfo
->id
, xfer
->hdr
.protocol_id
,
1083 xfer
->hdr
.id
, "NOTI", xfer
->hdr
.seq
,
1084 xfer
->hdr
.status
, xfer
->rx
.buf
, xfer
->rx
.len
);
1085 scmi_inc_count(info
->dbg
->counters
, NOTIFICATION_OK
);
1087 scmi_notify(cinfo
->handle
, xfer
->hdr
.protocol_id
,
1088 xfer
->hdr
.id
, xfer
->rx
.buf
, xfer
->rx
.len
, ts
);
1090 trace_scmi_rx_done(xfer
->transfer_id
, xfer
->hdr
.id
,
1091 xfer
->hdr
.protocol_id
, xfer
->hdr
.seq
,
1092 MSG_TYPE_NOTIFICATION
);
1094 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT
)) {
1095 xfer
->hdr
.seq
= MSG_XTRACT_TOKEN(msg_hdr
);
1096 scmi_raw_message_report(info
->raw
, xfer
, SCMI_RAW_NOTIF_QUEUE
,
1100 __scmi_xfer_put(minfo
, xfer
);
1102 scmi_clear_channel(info
, cinfo
);
1105 static void scmi_handle_response(struct scmi_chan_info
*cinfo
,
1106 u32 msg_hdr
, void *priv
)
1108 struct scmi_xfer
*xfer
;
1109 struct scmi_info
*info
= handle_to_scmi_info(cinfo
->handle
);
1111 xfer
= scmi_xfer_command_acquire(cinfo
, msg_hdr
);
1113 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT
))
1114 scmi_raw_error_report(info
->raw
, cinfo
, msg_hdr
, priv
);
1116 if (MSG_XTRACT_TYPE(msg_hdr
) == MSG_TYPE_DELAYED_RESP
)
1117 scmi_clear_channel(info
, cinfo
);
1121 /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
1122 if (xfer
->hdr
.type
== MSG_TYPE_DELAYED_RESP
)
1123 xfer
->rx
.len
= info
->desc
->max_msg_size
;
1126 /* Ensure order between xfer->priv store and following ops */
1127 smp_store_mb(xfer
->priv
, priv
);
1128 info
->desc
->ops
->fetch_response(cinfo
, xfer
);
1130 trace_scmi_msg_dump(info
->id
, cinfo
->id
, xfer
->hdr
.protocol_id
,
1132 xfer
->hdr
.type
== MSG_TYPE_DELAYED_RESP
?
1133 (!SCMI_XFER_IS_RAW(xfer
) ? "DLYD" : "dlyd") :
1134 (!SCMI_XFER_IS_RAW(xfer
) ? "RESP" : "resp"),
1135 xfer
->hdr
.seq
, xfer
->hdr
.status
,
1136 xfer
->rx
.buf
, xfer
->rx
.len
);
1138 trace_scmi_rx_done(xfer
->transfer_id
, xfer
->hdr
.id
,
1139 xfer
->hdr
.protocol_id
, xfer
->hdr
.seq
,
1142 if (xfer
->hdr
.type
== MSG_TYPE_DELAYED_RESP
) {
1143 scmi_clear_channel(info
, cinfo
);
1144 complete(xfer
->async_done
);
1145 scmi_inc_count(info
->dbg
->counters
, DELAYED_RESPONSE_OK
);
1147 complete(&xfer
->done
);
1148 scmi_inc_count(info
->dbg
->counters
, RESPONSE_OK
);
1151 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT
)) {
1153 * When in polling mode avoid to queue the Raw xfer on the IRQ
1154 * RX path since it will be already queued at the end of the TX
1157 if (!xfer
->hdr
.poll_completion
)
1158 scmi_raw_message_report(info
->raw
, xfer
,
1159 SCMI_RAW_REPLY_QUEUE
,
1163 scmi_xfer_command_release(info
, xfer
);
1167 * scmi_rx_callback() - callback for receiving messages
1169 * @cinfo: SCMI channel info
1170 * @msg_hdr: Message header
1171 * @priv: Transport specific private data.
1173 * Processes one received message to appropriate transfer information and
1174 * signals completion of the transfer.
1176 * NOTE: This function will be invoked in IRQ context, hence should be
1177 * as optimal as possible.
1179 static void scmi_rx_callback(struct scmi_chan_info
*cinfo
, u32 msg_hdr
,
1182 u8 msg_type
= MSG_XTRACT_TYPE(msg_hdr
);
1185 case MSG_TYPE_NOTIFICATION
:
1186 scmi_handle_notification(cinfo
, msg_hdr
, priv
);
1188 case MSG_TYPE_COMMAND
:
1189 case MSG_TYPE_DELAYED_RESP
:
1190 scmi_handle_response(cinfo
, msg_hdr
, priv
);
1193 WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type
);
1194 scmi_bad_message_trace(cinfo
, msg_hdr
, MSG_UNKNOWN
);
1200 * xfer_put() - Release a transmit message
1202 * @ph: Pointer to SCMI protocol handle
1203 * @xfer: message that was reserved by xfer_get_init
1205 static void xfer_put(const struct scmi_protocol_handle
*ph
,
1206 struct scmi_xfer
*xfer
)
1208 const struct scmi_protocol_instance
*pi
= ph_to_pi(ph
);
1209 struct scmi_info
*info
= handle_to_scmi_info(pi
->handle
);
1211 __scmi_xfer_put(&info
->tx_minfo
, xfer
);
1214 static bool scmi_xfer_done_no_timeout(struct scmi_chan_info
*cinfo
,
1215 struct scmi_xfer
*xfer
, ktime_t stop
)
1217 struct scmi_info
*info
= handle_to_scmi_info(cinfo
->handle
);
1220 * Poll also on xfer->done so that polling can be forcibly terminated
1221 * in case of out-of-order receptions of delayed responses
1223 return info
->desc
->ops
->poll_done(cinfo
, xfer
) ||
1224 try_wait_for_completion(&xfer
->done
) ||
1225 ktime_after(ktime_get(), stop
);
1228 static int scmi_wait_for_reply(struct device
*dev
, const struct scmi_desc
*desc
,
1229 struct scmi_chan_info
*cinfo
,
1230 struct scmi_xfer
*xfer
, unsigned int timeout_ms
)
1233 struct scmi_info
*info
= handle_to_scmi_info(cinfo
->handle
);
1235 if (xfer
->hdr
.poll_completion
) {
1237 * Real polling is needed only if transport has NOT declared
1238 * itself to support synchronous commands replies.
1240 if (!desc
->sync_cmds_completed_on_ret
) {
1242 * Poll on xfer using transport provided .poll_done();
1243 * assumes no completion interrupt was available.
1245 ktime_t stop
= ktime_add_ms(ktime_get(), timeout_ms
);
1247 spin_until_cond(scmi_xfer_done_no_timeout(cinfo
,
1249 if (ktime_after(ktime_get(), stop
)) {
1251 "timed out in resp(caller: %pS) - polling\n",
1254 scmi_inc_count(info
->dbg
->counters
, XFERS_RESPONSE_POLLED_TIMEOUT
);
1259 unsigned long flags
;
1262 * Do not fetch_response if an out-of-order delayed
1263 * response is being processed.
1265 spin_lock_irqsave(&xfer
->lock
, flags
);
1266 if (xfer
->state
== SCMI_XFER_SENT_OK
) {
1267 desc
->ops
->fetch_response(cinfo
, xfer
);
1268 xfer
->state
= SCMI_XFER_RESP_OK
;
1270 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1272 /* Trace polled replies. */
1273 trace_scmi_msg_dump(info
->id
, cinfo
->id
,
1274 xfer
->hdr
.protocol_id
, xfer
->hdr
.id
,
1275 !SCMI_XFER_IS_RAW(xfer
) ?
1277 xfer
->hdr
.seq
, xfer
->hdr
.status
,
1278 xfer
->rx
.buf
, xfer
->rx
.len
);
1279 scmi_inc_count(info
->dbg
->counters
, RESPONSE_POLLED_OK
);
1281 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT
)) {
1282 scmi_raw_message_report(info
->raw
, xfer
,
1283 SCMI_RAW_REPLY_QUEUE
,
1288 /* And we wait for the response. */
1289 if (!wait_for_completion_timeout(&xfer
->done
,
1290 msecs_to_jiffies(timeout_ms
))) {
1291 dev_err(dev
, "timed out in resp(caller: %pS)\n",
1294 scmi_inc_count(info
->dbg
->counters
, XFERS_RESPONSE_TIMEOUT
);
1302 * scmi_wait_for_message_response - An helper to group all the possible ways of
1303 * waiting for a synchronous message response.
1305 * @cinfo: SCMI channel info
1306 * @xfer: Reference to the transfer being waited for.
1308 * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
1309 * configuration flags like xfer->hdr.poll_completion.
1311 * Return: 0 on Success, error otherwise.
1313 static int scmi_wait_for_message_response(struct scmi_chan_info
*cinfo
,
1314 struct scmi_xfer
*xfer
)
1316 struct scmi_info
*info
= handle_to_scmi_info(cinfo
->handle
);
1317 struct device
*dev
= info
->dev
;
1319 trace_scmi_xfer_response_wait(xfer
->transfer_id
, xfer
->hdr
.id
,
1320 xfer
->hdr
.protocol_id
, xfer
->hdr
.seq
,
1321 info
->desc
->max_rx_timeout_ms
,
1322 xfer
->hdr
.poll_completion
);
1324 return scmi_wait_for_reply(dev
, info
->desc
, cinfo
, xfer
,
1325 info
->desc
->max_rx_timeout_ms
);
1329 * scmi_xfer_raw_wait_for_message_response - An helper to wait for a message
1330 * reply to an xfer raw request on a specific channel for the required timeout.
1332 * @cinfo: SCMI channel info
1333 * @xfer: Reference to the transfer being waited for.
1334 * @timeout_ms: The maximum timeout in milliseconds
1336 * Return: 0 on Success, error otherwise.
1338 int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info
*cinfo
,
1339 struct scmi_xfer
*xfer
,
1340 unsigned int timeout_ms
)
1343 struct scmi_info
*info
= handle_to_scmi_info(cinfo
->handle
);
1344 struct device
*dev
= info
->dev
;
1346 ret
= scmi_wait_for_reply(dev
, info
->desc
, cinfo
, xfer
, timeout_ms
);
1348 dev_dbg(dev
, "timed out in RAW response - HDR:%08X\n",
1349 pack_scmi_header(&xfer
->hdr
));
1355 * do_xfer() - Do one transfer
1357 * @ph: Pointer to SCMI protocol handle
1358 * @xfer: Transfer to initiate and wait for response
1360 * Return: -ETIMEDOUT in case of no response, if transmit error,
1361 * return corresponding error, else if all goes well,
1364 static int do_xfer(const struct scmi_protocol_handle
*ph
,
1365 struct scmi_xfer
*xfer
)
1368 const struct scmi_protocol_instance
*pi
= ph_to_pi(ph
);
1369 struct scmi_info
*info
= handle_to_scmi_info(pi
->handle
);
1370 struct device
*dev
= info
->dev
;
1371 struct scmi_chan_info
*cinfo
;
1373 /* Check for polling request on custom command xfers at first */
1374 if (xfer
->hdr
.poll_completion
&&
1375 !is_transport_polling_capable(info
->desc
)) {
1377 "Polling mode is not supported by transport.\n");
1378 scmi_inc_count(info
->dbg
->counters
, SENT_FAIL_POLLING_UNSUPPORTED
);
1382 cinfo
= idr_find(&info
->tx_idr
, pi
->proto
->id
);
1383 if (unlikely(!cinfo
)) {
1384 scmi_inc_count(info
->dbg
->counters
, SENT_FAIL_CHANNEL_NOT_FOUND
);
1387 /* True ONLY if also supported by transport. */
1388 if (is_polling_enabled(cinfo
, info
->desc
))
1389 xfer
->hdr
.poll_completion
= true;
1392 * Initialise protocol id now from protocol handle to avoid it being
1393 * overridden by mistake (or malice) by the protocol code mangling with
1394 * the scmi_xfer structure prior to this.
1396 xfer
->hdr
.protocol_id
= pi
->proto
->id
;
1397 reinit_completion(&xfer
->done
);
1399 trace_scmi_xfer_begin(xfer
->transfer_id
, xfer
->hdr
.id
,
1400 xfer
->hdr
.protocol_id
, xfer
->hdr
.seq
,
1401 xfer
->hdr
.poll_completion
);
1403 /* Clear any stale status */
1404 xfer
->hdr
.status
= SCMI_SUCCESS
;
1405 xfer
->state
= SCMI_XFER_SENT_OK
;
1407 * Even though spinlocking is not needed here since no race is possible
1408 * on xfer->state due to the monotonically increasing tokens allocation,
1409 * we must anyway ensure xfer->state initialization is not re-ordered
1410 * after the .send_message() to be sure that on the RX path an early
1411 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
1415 ret
= info
->desc
->ops
->send_message(cinfo
, xfer
);
1417 dev_dbg(dev
, "Failed to send message %d\n", ret
);
1418 scmi_inc_count(info
->dbg
->counters
, SENT_FAIL
);
1422 trace_scmi_msg_dump(info
->id
, cinfo
->id
, xfer
->hdr
.protocol_id
,
1423 xfer
->hdr
.id
, "CMND", xfer
->hdr
.seq
,
1424 xfer
->hdr
.status
, xfer
->tx
.buf
, xfer
->tx
.len
);
1425 scmi_inc_count(info
->dbg
->counters
, SENT_OK
);
1427 ret
= scmi_wait_for_message_response(cinfo
, xfer
);
1428 if (!ret
&& xfer
->hdr
.status
) {
1429 ret
= scmi_to_linux_errno(xfer
->hdr
.status
);
1430 scmi_inc_count(info
->dbg
->counters
, ERR_PROTOCOL
);
1433 if (info
->desc
->ops
->mark_txdone
)
1434 info
->desc
->ops
->mark_txdone(cinfo
, ret
, xfer
);
1436 trace_scmi_xfer_end(xfer
->transfer_id
, xfer
->hdr
.id
,
1437 xfer
->hdr
.protocol_id
, xfer
->hdr
.seq
, ret
);
1442 static void reset_rx_to_maxsz(const struct scmi_protocol_handle
*ph
,
1443 struct scmi_xfer
*xfer
)
1445 const struct scmi_protocol_instance
*pi
= ph_to_pi(ph
);
1446 struct scmi_info
*info
= handle_to_scmi_info(pi
->handle
);
1448 xfer
->rx
.len
= info
->desc
->max_msg_size
;
1452 * do_xfer_with_response() - Do one transfer and wait until the delayed
1453 * response is received
1455 * @ph: Pointer to SCMI protocol handle
1456 * @xfer: Transfer to initiate and wait for response
1458 * Using asynchronous commands in atomic/polling mode should be avoided since
1459 * it could cause long busy-waiting here, so ignore polling for the delayed
1460 * response and WARN if it was requested for this command transaction since
1461 * upper layers should refrain from issuing such kind of requests.
1463 * The only other option would have been to refrain from using any asynchronous
1464 * command even if made available, when an atomic transport is detected, and
1465 * instead forcibly use the synchronous version (thing that can be easily
1466 * attained at the protocol layer), but this would also have led to longer
1467 * stalls of the channel for synchronous commands and possibly timeouts.
1468 * (in other words there is usually a good reason if a platform provides an
1469 * asynchronous version of a command and we should prefer to use it...just not
1470 * when using atomic/polling mode)
1472 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
1473 * return corresponding error, else if all goes well, return 0.
1475 static int do_xfer_with_response(const struct scmi_protocol_handle
*ph
,
1476 struct scmi_xfer
*xfer
)
1478 int ret
, timeout
= msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT
);
1479 DECLARE_COMPLETION_ONSTACK(async_response
);
1481 xfer
->async_done
= &async_response
;
1484 * Delayed responses should not be polled, so an async command should
1485 * not have been used when requiring an atomic/poll context; WARN and
1486 * perform instead a sleeping wait.
1487 * (Note Async + IgnoreDelayedResponses are sent via do_xfer)
1489 WARN_ON_ONCE(xfer
->hdr
.poll_completion
);
1491 ret
= do_xfer(ph
, xfer
);
1493 if (!wait_for_completion_timeout(xfer
->async_done
, timeout
)) {
1495 "timed out in delayed resp(caller: %pS)\n",
1498 } else if (xfer
->hdr
.status
) {
1499 ret
= scmi_to_linux_errno(xfer
->hdr
.status
);
1503 xfer
->async_done
= NULL
;
1508 * xfer_get_init() - Allocate and initialise one message for transmit
1510 * @ph: Pointer to SCMI protocol handle
1511 * @msg_id: Message identifier
1512 * @tx_size: transmit message size
1513 * @rx_size: receive message size
1514 * @p: pointer to the allocated and initialised message
1516 * This function allocates the message using @scmi_xfer_get and
1517 * initialise the header.
1519 * Return: 0 if all went fine with @p pointing to message, else
1520 * corresponding error.
1522 static int xfer_get_init(const struct scmi_protocol_handle
*ph
,
1523 u8 msg_id
, size_t tx_size
, size_t rx_size
,
1524 struct scmi_xfer
**p
)
1527 struct scmi_xfer
*xfer
;
1528 const struct scmi_protocol_instance
*pi
= ph_to_pi(ph
);
1529 struct scmi_info
*info
= handle_to_scmi_info(pi
->handle
);
1530 struct scmi_xfers_info
*minfo
= &info
->tx_minfo
;
1531 struct device
*dev
= info
->dev
;
1533 /* Ensure we have sane transfer sizes */
1534 if (rx_size
> info
->desc
->max_msg_size
||
1535 tx_size
> info
->desc
->max_msg_size
)
1538 xfer
= scmi_xfer_get(pi
->handle
, minfo
);
1540 ret
= PTR_ERR(xfer
);
1541 dev_err(dev
, "failed to get free message slot(%d)\n", ret
);
1545 /* Pick a sequence number and register this xfer as in-flight */
1546 ret
= scmi_xfer_pending_set(xfer
, minfo
);
1548 dev_err(pi
->handle
->dev
,
1549 "Failed to get monotonic token %d\n", ret
);
1550 __scmi_xfer_put(minfo
, xfer
);
1554 xfer
->tx
.len
= tx_size
;
1555 xfer
->rx
.len
= rx_size
? : info
->desc
->max_msg_size
;
1556 xfer
->hdr
.type
= MSG_TYPE_COMMAND
;
1557 xfer
->hdr
.id
= msg_id
;
1558 xfer
->hdr
.poll_completion
= false;
1566 * version_get() - command to get the revision of the SCMI entity
1568 * @ph: Pointer to SCMI protocol handle
1569 * @version: Holds returned version of protocol.
1571 * Updates the SCMI information in the internal data structure.
1573 * Return: 0 if all went fine, else return appropriate error.
1575 static int version_get(const struct scmi_protocol_handle
*ph
, u32
*version
)
1579 struct scmi_xfer
*t
;
1581 ret
= xfer_get_init(ph
, PROTOCOL_VERSION
, 0, sizeof(*version
), &t
);
1585 ret
= do_xfer(ph
, t
);
1587 rev_info
= t
->rx
.buf
;
1588 *version
= le32_to_cpu(*rev_info
);
1596 * scmi_set_protocol_priv - Set protocol specific data at init time
1598 * @ph: A reference to the protocol handle.
1599 * @priv: The private data to set.
1600 * @version: The detected protocol version for the core to register.
1602 * Return: 0 on Success
1604 static int scmi_set_protocol_priv(const struct scmi_protocol_handle
*ph
,
1605 void *priv
, u32 version
)
1607 struct scmi_protocol_instance
*pi
= ph_to_pi(ph
);
1610 pi
->version
= version
;
1616 * scmi_get_protocol_priv - Set protocol specific data at init time
1618 * @ph: A reference to the protocol handle.
1620 * Return: Protocol private data if any was set.
1622 static void *scmi_get_protocol_priv(const struct scmi_protocol_handle
*ph
)
1624 const struct scmi_protocol_instance
*pi
= ph_to_pi(ph
);
1629 static const struct scmi_xfer_ops xfer_ops
= {
1630 .version_get
= version_get
,
1631 .xfer_get_init
= xfer_get_init
,
1632 .reset_rx_to_maxsz
= reset_rx_to_maxsz
,
1634 .do_xfer_with_response
= do_xfer_with_response
,
1635 .xfer_put
= xfer_put
,
1638 struct scmi_msg_resp_domain_name_get
{
1640 u8 name
[SCMI_MAX_STR_SIZE
];
1644 * scmi_common_extended_name_get - Common helper to get extended resources name
1645 * @ph: A protocol handle reference.
1646 * @cmd_id: The specific command ID to use.
1647 * @res_id: The specific resource ID to use.
1648 * @flags: A pointer to specific flags to use, if any.
1649 * @name: A pointer to the preallocated area where the retrieved name will be
1650 * stored as a NULL terminated string.
1651 * @len: The len in bytes of the @name char array.
1653 * Return: 0 on Succcess
1655 static int scmi_common_extended_name_get(const struct scmi_protocol_handle
*ph
,
1656 u8 cmd_id
, u32 res_id
, u32
*flags
,
1657 char *name
, size_t len
)
1661 struct scmi_xfer
*t
;
1662 struct scmi_msg_resp_domain_name_get
*resp
;
1664 txlen
= !flags
? sizeof(res_id
) : sizeof(res_id
) + sizeof(*flags
);
1665 ret
= ph
->xops
->xfer_get_init(ph
, cmd_id
, txlen
, sizeof(*resp
), &t
);
1669 put_unaligned_le32(res_id
, t
->tx
.buf
);
1671 put_unaligned_le32(*flags
, t
->tx
.buf
+ sizeof(res_id
));
1674 ret
= ph
->xops
->do_xfer(ph
, t
);
1676 strscpy(name
, resp
->name
, len
);
1678 ph
->xops
->xfer_put(ph
, t
);
1682 "Failed to get extended name - id:%u (ret:%d). Using %s\n",
1688 * scmi_common_get_max_msg_size - Get maximum message size
1689 * @ph: A protocol handle reference.
1691 * Return: Maximum message size for the current protocol.
1693 static int scmi_common_get_max_msg_size(const struct scmi_protocol_handle
*ph
)
1695 const struct scmi_protocol_instance
*pi
= ph_to_pi(ph
);
1696 struct scmi_info
*info
= handle_to_scmi_info(pi
->handle
);
1698 return info
->desc
->max_msg_size
;
1702 * struct scmi_iterator - Iterator descriptor
1703 * @msg: A reference to the message TX buffer; filled by @prepare_message with
1704 * a proper custom command payload for each multi-part command request.
1705 * @resp: A reference to the response RX buffer; used by @update_state and
1706 * @process_response to parse the multi-part replies.
1707 * @t: A reference to the underlying xfer initialized and used transparently by
1708 * the iterator internal routines.
1709 * @ph: A reference to the associated protocol handle to be used.
1710 * @ops: A reference to the custom provided iterator operations.
1711 * @state: The current iterator state; used and updated in turn by the iterators
1712 * internal routines and by the caller-provided @scmi_iterator_ops.
1713 * @priv: A reference to optional private data as provided by the caller and
1714 * passed back to the @@scmi_iterator_ops.
1716 struct scmi_iterator
{
1719 struct scmi_xfer
*t
;
1720 const struct scmi_protocol_handle
*ph
;
1721 struct scmi_iterator_ops
*ops
;
1722 struct scmi_iterator_state state
;
1726 static void *scmi_iterator_init(const struct scmi_protocol_handle
*ph
,
1727 struct scmi_iterator_ops
*ops
,
1728 unsigned int max_resources
, u8 msg_id
,
1729 size_t tx_size
, void *priv
)
1732 struct scmi_iterator
*i
;
1734 i
= devm_kzalloc(ph
->dev
, sizeof(*i
), GFP_KERNEL
);
1736 return ERR_PTR(-ENOMEM
);
1742 ret
= ph
->xops
->xfer_get_init(ph
, msg_id
, tx_size
, 0, &i
->t
);
1744 devm_kfree(ph
->dev
, i
);
1745 return ERR_PTR(ret
);
1748 i
->state
.max_resources
= max_resources
;
1749 i
->msg
= i
->t
->tx
.buf
;
1750 i
->resp
= i
->t
->rx
.buf
;
1755 static int scmi_iterator_run(void *iter
)
1758 struct scmi_iterator_ops
*iops
;
1759 const struct scmi_protocol_handle
*ph
;
1760 struct scmi_iterator_state
*st
;
1761 struct scmi_iterator
*i
= iter
;
1763 if (!i
|| !i
->ops
|| !i
->ph
)
1771 iops
->prepare_message(i
->msg
, st
->desc_index
, i
->priv
);
1772 ret
= ph
->xops
->do_xfer(ph
, i
->t
);
1776 st
->rx_len
= i
->t
->rx
.len
;
1777 ret
= iops
->update_state(st
, i
->resp
, i
->priv
);
1781 if (st
->num_returned
> st
->max_resources
- st
->desc_index
) {
1783 "No. of resources can't exceed %d\n",
1789 for (st
->loop_idx
= 0; st
->loop_idx
< st
->num_returned
;
1791 ret
= iops
->process_response(ph
, i
->resp
, st
, i
->priv
);
1796 st
->desc_index
+= st
->num_returned
;
1797 ph
->xops
->reset_rx_to_maxsz(ph
, i
->t
);
1799 * check for both returned and remaining to avoid infinite
1800 * loop due to buggy firmware
1802 } while (st
->num_returned
&& st
->num_remaining
);
1805 /* Finalize and destroy iterator */
1806 ph
->xops
->xfer_put(ph
, i
->t
);
1807 devm_kfree(ph
->dev
, i
);
1812 struct scmi_msg_get_fc_info
{
1817 struct scmi_msg_resp_desc_fc
{
1819 #define SUPPORTS_DOORBELL(x) ((x) & BIT(0))
1820 #define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x))
1822 __le32 chan_addr_low
;
1823 __le32 chan_addr_high
;
1826 __le32 db_addr_high
;
1827 __le32 db_set_lmask
;
1828 __le32 db_set_hmask
;
1829 __le32 db_preserve_lmask
;
1830 __le32 db_preserve_hmask
;
1834 scmi_common_fastchannel_init(const struct scmi_protocol_handle
*ph
,
1835 u8 describe_id
, u32 message_id
, u32 valid_size
,
1836 u32 domain
, void __iomem
**p_addr
,
1837 struct scmi_fc_db_info
**p_db
, u32
*rate_limit
)
1844 struct scmi_xfer
*t
;
1845 struct scmi_fc_db_info
*db
= NULL
;
1846 struct scmi_msg_get_fc_info
*info
;
1847 struct scmi_msg_resp_desc_fc
*resp
;
1848 const struct scmi_protocol_instance
*pi
= ph_to_pi(ph
);
1855 ret
= ph
->xops
->xfer_get_init(ph
, describe_id
,
1856 sizeof(*info
), sizeof(*resp
), &t
);
1861 info
->domain
= cpu_to_le32(domain
);
1862 info
->message_id
= cpu_to_le32(message_id
);
1865 * Bail out on error leaving fc_info addresses zeroed; this includes
1866 * the case in which the requested domain/message_id does NOT support
1867 * fastchannels at all.
1869 ret
= ph
->xops
->do_xfer(ph
, t
);
1874 flags
= le32_to_cpu(resp
->attr
);
1875 size
= le32_to_cpu(resp
->chan_size
);
1876 if (size
!= valid_size
) {
1882 *rate_limit
= le32_to_cpu(resp
->rate_limit
) & GENMASK(19, 0);
1884 phys_addr
= le32_to_cpu(resp
->chan_addr_low
);
1885 phys_addr
|= (u64
)le32_to_cpu(resp
->chan_addr_high
) << 32;
1886 addr
= devm_ioremap(ph
->dev
, phys_addr
, size
);
1888 ret
= -EADDRNOTAVAIL
;
1894 if (p_db
&& SUPPORTS_DOORBELL(flags
)) {
1895 db
= devm_kzalloc(ph
->dev
, sizeof(*db
), GFP_KERNEL
);
1901 size
= 1 << DOORBELL_REG_WIDTH(flags
);
1902 phys_addr
= le32_to_cpu(resp
->db_addr_low
);
1903 phys_addr
|= (u64
)le32_to_cpu(resp
->db_addr_high
) << 32;
1904 addr
= devm_ioremap(ph
->dev
, phys_addr
, size
);
1906 ret
= -EADDRNOTAVAIL
;
1912 db
->set
= le32_to_cpu(resp
->db_set_lmask
);
1913 db
->set
|= (u64
)le32_to_cpu(resp
->db_set_hmask
) << 32;
1914 db
->mask
= le32_to_cpu(resp
->db_preserve_lmask
);
1915 db
->mask
|= (u64
)le32_to_cpu(resp
->db_preserve_hmask
) << 32;
1920 ph
->xops
->xfer_put(ph
, t
);
1923 "Using valid FC for protocol %X [MSG_ID:%u / RES_ID:%u]\n",
1924 pi
->proto
->id
, message_id
, domain
);
1929 devm_kfree(ph
->dev
, db
);
1935 ph
->xops
->xfer_put(ph
, t
);
1939 "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n",
1940 pi
->proto
->id
, message_id
, domain
, ret
);
1943 #define SCMI_PROTO_FC_RING_DB(w) \
1948 val = ioread##w(db->addr) & db->mask; \
1949 iowrite##w((u##w)db->set | val, db->addr); \
1952 static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info
*db
)
1954 if (!db
|| !db
->addr
)
1958 SCMI_PROTO_FC_RING_DB(8);
1959 else if (db
->width
== 2)
1960 SCMI_PROTO_FC_RING_DB(16);
1961 else if (db
->width
== 4)
1962 SCMI_PROTO_FC_RING_DB(32);
1963 else /* db->width == 8 */
1965 SCMI_PROTO_FC_RING_DB(64);
1971 val
= ioread64_hi_lo(db
->addr
) & db
->mask
;
1972 iowrite64_hi_lo(db
->set
| val
, db
->addr
);
1978 * scmi_protocol_msg_check - Check protocol message attributes
1980 * @ph: A reference to the protocol handle.
1981 * @message_id: The ID of the message to check.
1982 * @attributes: A parameter to optionally return the retrieved message
1983 * attributes, in case of Success.
1985 * An helper to check protocol message attributes for a specific protocol
1988 * Return: 0 on SUCCESS
1990 static int scmi_protocol_msg_check(const struct scmi_protocol_handle
*ph
,
1991 u32 message_id
, u32
*attributes
)
1994 struct scmi_xfer
*t
;
1996 ret
= xfer_get_init(ph
, PROTOCOL_MESSAGE_ATTRIBUTES
,
1997 sizeof(__le32
), 0, &t
);
2001 put_unaligned_le32(message_id
, t
->tx
.buf
);
2002 ret
= do_xfer(ph
, t
);
2003 if (!ret
&& attributes
)
2004 *attributes
= get_unaligned_le32(t
->rx
.buf
);
2010 static const struct scmi_proto_helpers_ops helpers_ops
= {
2011 .extended_name_get
= scmi_common_extended_name_get
,
2012 .get_max_msg_size
= scmi_common_get_max_msg_size
,
2013 .iter_response_init
= scmi_iterator_init
,
2014 .iter_response_run
= scmi_iterator_run
,
2015 .protocol_msg_check
= scmi_protocol_msg_check
,
2016 .fastchannel_init
= scmi_common_fastchannel_init
,
2017 .fastchannel_db_ring
= scmi_common_fastchannel_db_ring
,
2021 * scmi_revision_area_get - Retrieve version memory area.
2023 * @ph: A reference to the protocol handle.
2025 * A helper to grab the version memory area reference during SCMI Base protocol
2028 * Return: A reference to the version memory area associated to the SCMI
2029 * instance underlying this protocol handle.
2031 struct scmi_revision_info
*
2032 scmi_revision_area_get(const struct scmi_protocol_handle
*ph
)
2034 const struct scmi_protocol_instance
*pi
= ph_to_pi(ph
);
2036 return pi
->handle
->version
;
2040 * scmi_protocol_version_negotiate - Negotiate protocol version
2042 * @ph: A reference to the protocol handle.
2044 * An helper to negotiate a protocol version different from the latest
2045 * advertised as supported from the platform: on Success backward
2046 * compatibility is assured by the platform.
2048 * Return: 0 on Success
2050 static int scmi_protocol_version_negotiate(struct scmi_protocol_handle
*ph
)
2053 struct scmi_xfer
*t
;
2054 struct scmi_protocol_instance
*pi
= ph_to_pi(ph
);
2056 /* At first check if NEGOTIATE_PROTOCOL_VERSION is supported ... */
2057 ret
= scmi_protocol_msg_check(ph
, NEGOTIATE_PROTOCOL_VERSION
, NULL
);
2061 /* ... then attempt protocol version negotiation */
2062 ret
= xfer_get_init(ph
, NEGOTIATE_PROTOCOL_VERSION
,
2063 sizeof(__le32
), 0, &t
);
2067 put_unaligned_le32(pi
->proto
->supported_version
, t
->tx
.buf
);
2068 ret
= do_xfer(ph
, t
);
2070 pi
->negotiated_version
= pi
->proto
->supported_version
;
2078 * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
2079 * instance descriptor.
2080 * @info: The reference to the related SCMI instance.
2081 * @proto: The protocol descriptor.
2083 * Allocate a new protocol instance descriptor, using the provided @proto
2084 * description, against the specified SCMI instance @info, and initialize it;
2085 * all resources management is handled via a dedicated per-protocol devres
2088 * Context: Assumes to be called with @protocols_mtx already acquired.
2089 * Return: A reference to a freshly allocated and initialized protocol instance
2090 * or ERR_PTR on failure. On failure the @proto reference is at first
2091 * put using @scmi_protocol_put() before releasing all the devres group.
2093 static struct scmi_protocol_instance
*
2094 scmi_alloc_init_protocol_instance(struct scmi_info
*info
,
2095 const struct scmi_protocol
*proto
)
2099 struct scmi_protocol_instance
*pi
;
2100 const struct scmi_handle
*handle
= &info
->handle
;
2102 /* Protocol specific devres group */
2103 gid
= devres_open_group(handle
->dev
, NULL
, GFP_KERNEL
);
2105 scmi_protocol_put(proto
);
2109 pi
= devm_kzalloc(handle
->dev
, sizeof(*pi
), GFP_KERNEL
);
2115 pi
->handle
= handle
;
2116 pi
->ph
.dev
= handle
->dev
;
2117 pi
->ph
.xops
= &xfer_ops
;
2118 pi
->ph
.hops
= &helpers_ops
;
2119 pi
->ph
.set_priv
= scmi_set_protocol_priv
;
2120 pi
->ph
.get_priv
= scmi_get_protocol_priv
;
2121 refcount_set(&pi
->users
, 1);
2122 /* proto->init is assured NON NULL by scmi_protocol_register */
2123 ret
= pi
->proto
->instance_init(&pi
->ph
);
2127 ret
= idr_alloc(&info
->protocols
, pi
, proto
->id
, proto
->id
+ 1,
2129 if (ret
!= proto
->id
)
2133 * Warn but ignore events registration errors since we do not want
2134 * to skip whole protocols if their notifications are messed up.
2136 if (pi
->proto
->events
) {
2137 ret
= scmi_register_protocol_events(handle
, pi
->proto
->id
,
2141 dev_warn(handle
->dev
,
2142 "Protocol:%X - Events Registration Failed - err:%d\n",
2143 pi
->proto
->id
, ret
);
2146 devres_close_group(handle
->dev
, pi
->gid
);
2147 dev_dbg(handle
->dev
, "Initialized protocol: 0x%X\n", pi
->proto
->id
);
2149 if (pi
->version
> proto
->supported_version
) {
2150 ret
= scmi_protocol_version_negotiate(&pi
->ph
);
2152 dev_info(handle
->dev
,
2153 "Protocol 0x%X successfully negotiated version 0x%X\n",
2154 proto
->id
, pi
->negotiated_version
);
2156 dev_warn(handle
->dev
,
2157 "Detected UNSUPPORTED higher version 0x%X for protocol 0x%X.\n",
2158 pi
->version
, pi
->proto
->id
);
2159 dev_warn(handle
->dev
,
2160 "Trying version 0x%X. Backward compatibility is NOT assured.\n",
2161 pi
->proto
->supported_version
);
2168 /* Take care to put the protocol module's owner before releasing all */
2169 scmi_protocol_put(proto
);
2170 devres_release_group(handle
->dev
, gid
);
2172 return ERR_PTR(ret
);
2176 * scmi_get_protocol_instance - Protocol initialization helper.
2177 * @handle: A reference to the SCMI platform instance.
2178 * @protocol_id: The protocol being requested.
2180 * In case the required protocol has never been requested before for this
2181 * instance, allocate and initialize all the needed structures while handling
2182 * resource allocation with a dedicated per-protocol devres subgroup.
2184 * Return: A reference to an initialized protocol instance or error on failure:
2185 * in particular returns -EPROBE_DEFER when the desired protocol could
2188 static struct scmi_protocol_instance
* __must_check
2189 scmi_get_protocol_instance(const struct scmi_handle
*handle
, u8 protocol_id
)
2191 struct scmi_protocol_instance
*pi
;
2192 struct scmi_info
*info
= handle_to_scmi_info(handle
);
2194 mutex_lock(&info
->protocols_mtx
);
2195 pi
= idr_find(&info
->protocols
, protocol_id
);
2198 refcount_inc(&pi
->users
);
2200 const struct scmi_protocol
*proto
;
2202 /* Fails if protocol not registered on bus */
2203 proto
= scmi_protocol_get(protocol_id
, &info
->version
);
2205 pi
= scmi_alloc_init_protocol_instance(info
, proto
);
2207 pi
= ERR_PTR(-EPROBE_DEFER
);
2209 mutex_unlock(&info
->protocols_mtx
);
2215 * scmi_protocol_acquire - Protocol acquire
2216 * @handle: A reference to the SCMI platform instance.
2217 * @protocol_id: The protocol being requested.
2219 * Register a new user for the requested protocol on the specified SCMI
2220 * platform instance, possibly triggering its initialization on first user.
2222 * Return: 0 if protocol was acquired successfully.
2224 int scmi_protocol_acquire(const struct scmi_handle
*handle
, u8 protocol_id
)
2226 return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle
, protocol_id
));
2230 * scmi_protocol_release - Protocol de-initialization helper.
2231 * @handle: A reference to the SCMI platform instance.
2232 * @protocol_id: The protocol being requested.
2234 * Remove one user for the specified protocol and triggers de-initialization
2235 * and resources de-allocation once the last user has gone.
2237 void scmi_protocol_release(const struct scmi_handle
*handle
, u8 protocol_id
)
2239 struct scmi_info
*info
= handle_to_scmi_info(handle
);
2240 struct scmi_protocol_instance
*pi
;
2242 mutex_lock(&info
->protocols_mtx
);
2243 pi
= idr_find(&info
->protocols
, protocol_id
);
2247 if (refcount_dec_and_test(&pi
->users
)) {
2248 void *gid
= pi
->gid
;
2250 if (pi
->proto
->events
)
2251 scmi_deregister_protocol_events(handle
, protocol_id
);
2253 if (pi
->proto
->instance_deinit
)
2254 pi
->proto
->instance_deinit(&pi
->ph
);
2256 idr_remove(&info
->protocols
, protocol_id
);
2258 scmi_protocol_put(pi
->proto
);
2260 devres_release_group(handle
->dev
, gid
);
2261 dev_dbg(handle
->dev
, "De-Initialized protocol: 0x%X\n",
2266 mutex_unlock(&info
->protocols_mtx
);
2269 void scmi_setup_protocol_implemented(const struct scmi_protocol_handle
*ph
,
2272 const struct scmi_protocol_instance
*pi
= ph_to_pi(ph
);
2273 struct scmi_info
*info
= handle_to_scmi_info(pi
->handle
);
2275 info
->protocols_imp
= prot_imp
;
2279 scmi_is_protocol_implemented(const struct scmi_handle
*handle
, u8 prot_id
)
2282 struct scmi_info
*info
= handle_to_scmi_info(handle
);
2283 struct scmi_revision_info
*rev
= handle
->version
;
2285 if (!info
->protocols_imp
)
2288 for (i
= 0; i
< rev
->num_protocols
; i
++)
2289 if (info
->protocols_imp
[i
] == prot_id
)
2294 struct scmi_protocol_devres
{
2295 const struct scmi_handle
*handle
;
2299 static void scmi_devm_release_protocol(struct device
*dev
, void *res
)
2301 struct scmi_protocol_devres
*dres
= res
;
2303 scmi_protocol_release(dres
->handle
, dres
->protocol_id
);
2306 static struct scmi_protocol_instance __must_check
*
2307 scmi_devres_protocol_instance_get(struct scmi_device
*sdev
, u8 protocol_id
)
2309 struct scmi_protocol_instance
*pi
;
2310 struct scmi_protocol_devres
*dres
;
2312 dres
= devres_alloc(scmi_devm_release_protocol
,
2313 sizeof(*dres
), GFP_KERNEL
);
2315 return ERR_PTR(-ENOMEM
);
2317 pi
= scmi_get_protocol_instance(sdev
->handle
, protocol_id
);
2323 dres
->handle
= sdev
->handle
;
2324 dres
->protocol_id
= protocol_id
;
2325 devres_add(&sdev
->dev
, dres
);
2331 * scmi_devm_protocol_get - Devres managed get protocol operations and handle
2332 * @sdev: A reference to an scmi_device whose embedded struct device is to
2333 * be used for devres accounting.
2334 * @protocol_id: The protocol being requested.
2335 * @ph: A pointer reference used to pass back the associated protocol handle.
2337 * Get hold of a protocol accounting for its usage, eventually triggering its
2338 * initialization, and returning the protocol specific operations and related
2339 * protocol handle which will be used as first argument in most of the
2340 * protocols operations methods.
2341 * Being a devres based managed method, protocol hold will be automatically
2342 * released, and possibly de-initialized on last user, once the SCMI driver
2343 * owning the scmi_device is unbound from it.
2345 * Return: A reference to the requested protocol operations or error.
2346 * Must be checked for errors by caller.
2348 static const void __must_check
*
2349 scmi_devm_protocol_get(struct scmi_device
*sdev
, u8 protocol_id
,
2350 struct scmi_protocol_handle
**ph
)
2352 struct scmi_protocol_instance
*pi
;
2355 return ERR_PTR(-EINVAL
);
2357 pi
= scmi_devres_protocol_instance_get(sdev
, protocol_id
);
2363 return pi
->proto
->ops
;
2367 * scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol
2368 * @sdev: A reference to an scmi_device whose embedded struct device is to
2369 * be used for devres accounting.
2370 * @protocol_id: The protocol being requested.
2372 * Get hold of a protocol accounting for its usage, possibly triggering its
2373 * initialization but without getting access to its protocol specific operations
2376 * Being a devres based managed method, protocol hold will be automatically
2377 * released, and possibly de-initialized on last user, once the SCMI driver
2378 * owning the scmi_device is unbound from it.
2380 * Return: 0 on SUCCESS
2382 static int __must_check
scmi_devm_protocol_acquire(struct scmi_device
*sdev
,
2385 struct scmi_protocol_instance
*pi
;
2387 pi
= scmi_devres_protocol_instance_get(sdev
, protocol_id
);
2394 static int scmi_devm_protocol_match(struct device
*dev
, void *res
, void *data
)
2396 struct scmi_protocol_devres
*dres
= res
;
2398 if (WARN_ON(!dres
|| !data
))
2401 return dres
->protocol_id
== *((u8
*)data
);
2405 * scmi_devm_protocol_put - Devres managed put protocol operations and handle
2406 * @sdev: A reference to an scmi_device whose embedded struct device is to
2407 * be used for devres accounting.
2408 * @protocol_id: The protocol being requested.
2410 * Explicitly release a protocol hold previously obtained calling the above
2411 * @scmi_devm_protocol_get.
2413 static void scmi_devm_protocol_put(struct scmi_device
*sdev
, u8 protocol_id
)
2417 ret
= devres_release(&sdev
->dev
, scmi_devm_release_protocol
,
2418 scmi_devm_protocol_match
, &protocol_id
);
2423 * scmi_is_transport_atomic - Method to check if underlying transport for an
2424 * SCMI instance is configured as atomic.
2426 * @handle: A reference to the SCMI platform instance.
2427 * @atomic_threshold: An optional return value for the system wide currently
2428 * configured threshold for atomic operations.
2430 * Return: True if transport is configured as atomic
2432 static bool scmi_is_transport_atomic(const struct scmi_handle
*handle
,
2433 unsigned int *atomic_threshold
)
2436 struct scmi_info
*info
= handle_to_scmi_info(handle
);
2438 ret
= info
->desc
->atomic_enabled
&&
2439 is_transport_polling_capable(info
->desc
);
2440 if (ret
&& atomic_threshold
)
2441 *atomic_threshold
= info
->desc
->atomic_threshold
;
2447 * scmi_handle_get() - Get the SCMI handle for a device
2449 * @dev: pointer to device for which we want SCMI handle
2451 * NOTE: The function does not track individual clients of the framework
2452 * and is expected to be maintained by caller of SCMI protocol library.
2453 * scmi_handle_put must be balanced with successful scmi_handle_get
2455 * Return: pointer to handle if successful, NULL on error
2457 static struct scmi_handle
*scmi_handle_get(struct device
*dev
)
2459 struct list_head
*p
;
2460 struct scmi_info
*info
;
2461 struct scmi_handle
*handle
= NULL
;
2463 mutex_lock(&scmi_list_mutex
);
2464 list_for_each(p
, &scmi_list
) {
2465 info
= list_entry(p
, struct scmi_info
, node
);
2466 if (dev
->parent
== info
->dev
) {
2468 handle
= &info
->handle
;
2472 mutex_unlock(&scmi_list_mutex
);
2478 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
2480 * @handle: handle acquired by scmi_handle_get
2482 * NOTE: The function does not track individual clients of the framework
2483 * and is expected to be maintained by caller of SCMI protocol library.
2484 * scmi_handle_put must be balanced with successful scmi_handle_get
2486 * Return: 0 is successfully released
2487 * if null was passed, it returns -EINVAL;
2489 static int scmi_handle_put(const struct scmi_handle
*handle
)
2491 struct scmi_info
*info
;
2496 info
= handle_to_scmi_info(handle
);
2497 mutex_lock(&scmi_list_mutex
);
2498 if (!WARN_ON(!info
->users
))
2500 mutex_unlock(&scmi_list_mutex
);
2505 static void scmi_device_link_add(struct device
*consumer
,
2506 struct device
*supplier
)
2508 struct device_link
*link
;
2510 link
= device_link_add(consumer
, supplier
, DL_FLAG_AUTOREMOVE_CONSUMER
);
2515 static void scmi_set_handle(struct scmi_device
*scmi_dev
)
2517 scmi_dev
->handle
= scmi_handle_get(&scmi_dev
->dev
);
2518 if (scmi_dev
->handle
)
2519 scmi_device_link_add(&scmi_dev
->dev
, scmi_dev
->handle
->dev
);
2522 static int __scmi_xfer_info_init(struct scmi_info
*sinfo
,
2523 struct scmi_xfers_info
*info
)
2526 struct scmi_xfer
*xfer
;
2527 struct device
*dev
= sinfo
->dev
;
2528 const struct scmi_desc
*desc
= sinfo
->desc
;
2530 /* Pre-allocated messages, no more than what hdr.seq can support */
2531 if (WARN_ON(!info
->max_msg
|| info
->max_msg
> MSG_TOKEN_MAX
)) {
2533 "Invalid maximum messages %d, not in range [1 - %lu]\n",
2534 info
->max_msg
, MSG_TOKEN_MAX
);
2538 hash_init(info
->pending_xfers
);
2540 /* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
2541 info
->xfer_alloc_table
= devm_bitmap_zalloc(dev
, MSG_TOKEN_MAX
,
2543 if (!info
->xfer_alloc_table
)
2547 * Preallocate a number of xfers equal to max inflight messages,
2548 * pre-initialize the buffer pointer to pre-allocated buffers and
2549 * attach all of them to the free list
2551 INIT_HLIST_HEAD(&info
->free_xfers
);
2552 for (i
= 0; i
< info
->max_msg
; i
++) {
2553 xfer
= devm_kzalloc(dev
, sizeof(*xfer
), GFP_KERNEL
);
2557 xfer
->rx
.buf
= devm_kcalloc(dev
, sizeof(u8
), desc
->max_msg_size
,
2562 xfer
->tx
.buf
= xfer
->rx
.buf
;
2563 init_completion(&xfer
->done
);
2564 spin_lock_init(&xfer
->lock
);
2566 /* Add initialized xfer to the free list */
2567 hlist_add_head(&xfer
->node
, &info
->free_xfers
);
2570 spin_lock_init(&info
->xfer_lock
);
2575 static int scmi_channels_max_msg_configure(struct scmi_info
*sinfo
)
2577 const struct scmi_desc
*desc
= sinfo
->desc
;
2579 if (!desc
->ops
->get_max_msg
) {
2580 sinfo
->tx_minfo
.max_msg
= desc
->max_msg
;
2581 sinfo
->rx_minfo
.max_msg
= desc
->max_msg
;
2583 struct scmi_chan_info
*base_cinfo
;
2585 base_cinfo
= idr_find(&sinfo
->tx_idr
, SCMI_PROTOCOL_BASE
);
2588 sinfo
->tx_minfo
.max_msg
= desc
->ops
->get_max_msg(base_cinfo
);
2590 /* RX channel is optional so can be skipped */
2591 base_cinfo
= idr_find(&sinfo
->rx_idr
, SCMI_PROTOCOL_BASE
);
2593 sinfo
->rx_minfo
.max_msg
=
2594 desc
->ops
->get_max_msg(base_cinfo
);
2600 static int scmi_xfer_info_init(struct scmi_info
*sinfo
)
2604 ret
= scmi_channels_max_msg_configure(sinfo
);
2608 ret
= __scmi_xfer_info_init(sinfo
, &sinfo
->tx_minfo
);
2609 if (!ret
&& !idr_is_empty(&sinfo
->rx_idr
))
2610 ret
= __scmi_xfer_info_init(sinfo
, &sinfo
->rx_minfo
);
2615 static int scmi_chan_setup(struct scmi_info
*info
, struct device_node
*of_node
,
2616 int prot_id
, bool tx
)
2620 struct scmi_chan_info
*cinfo
;
2622 struct scmi_device
*tdev
= NULL
;
2624 /* Transmit channel is first entry i.e. index 0 */
2626 idr
= tx
? &info
->tx_idr
: &info
->rx_idr
;
2628 if (!info
->desc
->ops
->chan_available(of_node
, idx
)) {
2629 cinfo
= idr_find(idr
, SCMI_PROTOCOL_BASE
);
2630 if (unlikely(!cinfo
)) /* Possible only if platform has no Rx */
2635 cinfo
= devm_kzalloc(info
->dev
, sizeof(*cinfo
), GFP_KERNEL
);
2639 cinfo
->is_p2a
= !tx
;
2640 cinfo
->rx_timeout_ms
= info
->desc
->max_rx_timeout_ms
;
2641 cinfo
->max_msg_size
= info
->desc
->max_msg_size
;
2643 /* Create a unique name for this transport device */
2644 snprintf(name
, 32, "__scmi_transport_device_%s_%02X",
2645 idx
? "rx" : "tx", prot_id
);
2646 /* Create a uniquely named, dedicated transport device for this chan */
2647 tdev
= scmi_device_create(of_node
, info
->dev
, prot_id
, name
);
2650 "failed to create transport device (%s)\n", name
);
2651 devm_kfree(info
->dev
, cinfo
);
2654 of_node_get(of_node
);
2656 cinfo
->id
= prot_id
;
2657 cinfo
->dev
= &tdev
->dev
;
2658 ret
= info
->desc
->ops
->chan_setup(cinfo
, info
->dev
, tx
);
2660 of_node_put(of_node
);
2661 scmi_device_destroy(info
->dev
, prot_id
, name
);
2662 devm_kfree(info
->dev
, cinfo
);
2666 if (tx
&& is_polling_required(cinfo
, info
->desc
)) {
2667 if (is_transport_polling_capable(info
->desc
))
2668 dev_info(&tdev
->dev
,
2669 "Enabled polling mode TX channel - prot_id:%d\n",
2672 dev_warn(&tdev
->dev
,
2673 "Polling mode NOT supported by transport.\n");
2677 ret
= idr_alloc(idr
, cinfo
, prot_id
, prot_id
+ 1, GFP_KERNEL
);
2678 if (ret
!= prot_id
) {
2680 "unable to allocate SCMI idr slot err %d\n", ret
);
2681 /* Destroy channel and device only if created by this call. */
2683 of_node_put(of_node
);
2684 scmi_device_destroy(info
->dev
, prot_id
, name
);
2685 devm_kfree(info
->dev
, cinfo
);
2690 cinfo
->handle
= &info
->handle
;
2695 scmi_txrx_setup(struct scmi_info
*info
, struct device_node
*of_node
,
2698 int ret
= scmi_chan_setup(info
, of_node
, prot_id
, true);
2701 /* Rx is optional, report only memory errors */
2702 ret
= scmi_chan_setup(info
, of_node
, prot_id
, false);
2703 if (ret
&& ret
!= -ENOMEM
)
2709 "failed to setup channel for protocol:0x%X\n", prot_id
);
2715 * scmi_channels_setup - Helper to initialize all required channels
2717 * @info: The SCMI instance descriptor.
2719 * Initialize all the channels found described in the DT against the underlying
2720 * configured transport using custom defined dedicated devices instead of
2721 * borrowing devices from the SCMI drivers; this way channels are initialized
2722 * upfront during core SCMI stack probing and are no more coupled with SCMI
2723 * devices used by SCMI drivers.
2725 * Note that, even though a pair of TX/RX channels is associated to each
2726 * protocol defined in the DT, a distinct freshly initialized channel is
2727 * created only if the DT node for the protocol at hand describes a dedicated
2728 * channel: in all the other cases the common BASE protocol channel is reused.
2730 * Return: 0 on Success
2732 static int scmi_channels_setup(struct scmi_info
*info
)
2735 struct device_node
*top_np
= info
->dev
->of_node
;
2737 /* Initialize a common generic channel at first */
2738 ret
= scmi_txrx_setup(info
, top_np
, SCMI_PROTOCOL_BASE
);
2742 for_each_available_child_of_node_scoped(top_np
, child
) {
2745 if (of_property_read_u32(child
, "reg", &prot_id
))
2748 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK
, prot_id
))
2750 "Out of range protocol %d\n", prot_id
);
2752 ret
= scmi_txrx_setup(info
, child
, prot_id
);
2760 static int scmi_chan_destroy(int id
, void *p
, void *idr
)
2762 struct scmi_chan_info
*cinfo
= p
;
2765 struct scmi_info
*info
= handle_to_scmi_info(cinfo
->handle
);
2766 struct scmi_device
*sdev
= to_scmi_dev(cinfo
->dev
);
2768 of_node_put(cinfo
->dev
->of_node
);
2769 scmi_device_destroy(info
->dev
, id
, sdev
->name
);
2773 idr_remove(idr
, id
);
2778 static void scmi_cleanup_channels(struct scmi_info
*info
, struct idr
*idr
)
2780 /* At first free all channels at the transport layer ... */
2781 idr_for_each(idr
, info
->desc
->ops
->chan_free
, idr
);
2783 /* ...then destroy all underlying devices */
2784 idr_for_each(idr
, scmi_chan_destroy
, idr
);
2789 static void scmi_cleanup_txrx_channels(struct scmi_info
*info
)
2791 scmi_cleanup_channels(info
, &info
->tx_idr
);
2793 scmi_cleanup_channels(info
, &info
->rx_idr
);
2796 static int scmi_bus_notifier(struct notifier_block
*nb
,
2797 unsigned long action
, void *data
)
2799 struct scmi_info
*info
= bus_nb_to_scmi_info(nb
);
2800 struct scmi_device
*sdev
= to_scmi_dev(data
);
2802 /* Skip transport devices and devices of different SCMI instances */
2803 if (!strncmp(sdev
->name
, "__scmi_transport_device", 23) ||
2804 sdev
->dev
.parent
!= info
->dev
)
2808 case BUS_NOTIFY_BIND_DRIVER
:
2809 /* setup handle now as the transport is ready */
2810 scmi_set_handle(sdev
);
2812 case BUS_NOTIFY_UNBOUND_DRIVER
:
2813 scmi_handle_put(sdev
->handle
);
2814 sdev
->handle
= NULL
;
2820 dev_dbg(info
->dev
, "Device %s (%s) is now %s\n", dev_name(&sdev
->dev
),
2821 sdev
->name
, action
== BUS_NOTIFY_BIND_DRIVER
?
2822 "about to be BOUND." : "UNBOUND.");
2827 static int scmi_device_request_notifier(struct notifier_block
*nb
,
2828 unsigned long action
, void *data
)
2830 struct device_node
*np
;
2831 struct scmi_device_id
*id_table
= data
;
2832 struct scmi_info
*info
= req_nb_to_scmi_info(nb
);
2834 np
= idr_find(&info
->active_protocols
, id_table
->protocol_id
);
2838 dev_dbg(info
->dev
, "%sRequested device (%s) for protocol 0x%x\n",
2839 action
== SCMI_BUS_NOTIFY_DEVICE_REQUEST
? "" : "UN-",
2840 id_table
->name
, id_table
->protocol_id
);
2843 case SCMI_BUS_NOTIFY_DEVICE_REQUEST
:
2844 scmi_create_protocol_devices(np
, info
, id_table
->protocol_id
,
2847 case SCMI_BUS_NOTIFY_DEVICE_UNREQUEST
:
2848 scmi_destroy_protocol_devices(info
, id_table
->protocol_id
,
2858 static const char * const dbg_counter_strs
[] = {
2861 "sent_fail_polling_unsupported",
2862 "sent_fail_channel_not_found",
2865 "delayed_response_ok",
2866 "xfers_response_timeout",
2867 "xfers_response_polled_timeout",
2868 "response_polled_ok",
2869 "err_msg_unexpected",
2875 static ssize_t
reset_all_on_write(struct file
*filp
, const char __user
*buf
,
2876 size_t count
, loff_t
*ppos
)
2878 struct scmi_debug_info
*dbg
= filp
->private_data
;
2880 for (int i
= 0; i
< SCMI_DEBUG_COUNTERS_LAST
; i
++)
2881 atomic_set(&dbg
->counters
[i
], 0);
2886 static const struct file_operations fops_reset_counts
= {
2887 .owner
= THIS_MODULE
,
2888 .open
= simple_open
,
2889 .write
= reset_all_on_write
,
2892 static void scmi_debugfs_counters_setup(struct scmi_debug_info
*dbg
,
2893 struct dentry
*trans
)
2895 struct dentry
*counters
;
2898 counters
= debugfs_create_dir("counters", trans
);
2900 for (idx
= 0; idx
< SCMI_DEBUG_COUNTERS_LAST
; idx
++)
2901 debugfs_create_atomic_t(dbg_counter_strs
[idx
], 0600, counters
,
2902 &dbg
->counters
[idx
]);
2904 debugfs_create_file("reset", 0200, counters
, dbg
, &fops_reset_counts
);
2907 static void scmi_debugfs_common_cleanup(void *d
)
2909 struct scmi_debug_info
*dbg
= d
;
2914 debugfs_remove_recursive(dbg
->top_dentry
);
2919 static struct scmi_debug_info
*scmi_debugfs_common_setup(struct scmi_info
*info
)
2922 struct dentry
*trans
, *top_dentry
;
2923 struct scmi_debug_info
*dbg
;
2924 const char *c_ptr
= NULL
;
2926 dbg
= devm_kzalloc(info
->dev
, sizeof(*dbg
), GFP_KERNEL
);
2930 dbg
->name
= kstrdup(of_node_full_name(info
->dev
->of_node
), GFP_KERNEL
);
2932 devm_kfree(info
->dev
, dbg
);
2936 of_property_read_string(info
->dev
->of_node
, "compatible", &c_ptr
);
2937 dbg
->type
= kstrdup(c_ptr
, GFP_KERNEL
);
2940 devm_kfree(info
->dev
, dbg
);
2944 snprintf(top_dir
, 16, "%d", info
->id
);
2945 top_dentry
= debugfs_create_dir(top_dir
, scmi_top_dentry
);
2946 trans
= debugfs_create_dir("transport", top_dentry
);
2948 dbg
->is_atomic
= info
->desc
->atomic_enabled
&&
2949 is_transport_polling_capable(info
->desc
);
2951 debugfs_create_str("instance_name", 0400, top_dentry
,
2952 (char **)&dbg
->name
);
2954 debugfs_create_u32("atomic_threshold_us", 0400, top_dentry
,
2955 (u32
*)&info
->desc
->atomic_threshold
);
2957 debugfs_create_str("type", 0400, trans
, (char **)&dbg
->type
);
2959 debugfs_create_bool("is_atomic", 0400, trans
, &dbg
->is_atomic
);
2961 debugfs_create_u32("max_rx_timeout_ms", 0400, trans
,
2962 (u32
*)&info
->desc
->max_rx_timeout_ms
);
2964 debugfs_create_u32("max_msg_size", 0400, trans
,
2965 (u32
*)&info
->desc
->max_msg_size
);
2967 debugfs_create_u32("tx_max_msg", 0400, trans
,
2968 (u32
*)&info
->tx_minfo
.max_msg
);
2970 debugfs_create_u32("rx_max_msg", 0400, trans
,
2971 (u32
*)&info
->rx_minfo
.max_msg
);
2973 if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS
))
2974 scmi_debugfs_counters_setup(dbg
, trans
);
2976 dbg
->top_dentry
= top_dentry
;
2978 if (devm_add_action_or_reset(info
->dev
,
2979 scmi_debugfs_common_cleanup
, dbg
))
2985 static int scmi_debugfs_raw_mode_setup(struct scmi_info
*info
)
2987 int id
, num_chans
= 0, ret
= 0;
2988 struct scmi_chan_info
*cinfo
;
2989 u8 channels
[SCMI_MAX_CHANNELS
] = {};
2990 DECLARE_BITMAP(protos
, SCMI_MAX_CHANNELS
) = {};
2995 /* Enumerate all channels to collect their ids */
2996 idr_for_each_entry(&info
->tx_idr
, cinfo
, id
) {
2998 * Cannot happen, but be defensive.
2999 * Zero as num_chans is ok, warn and carry on.
3001 if (num_chans
>= SCMI_MAX_CHANNELS
|| !cinfo
) {
3003 "SCMI RAW - Error enumerating channels\n");
3007 if (!test_bit(cinfo
->id
, protos
)) {
3008 channels
[num_chans
++] = cinfo
->id
;
3009 set_bit(cinfo
->id
, protos
);
3013 info
->raw
= scmi_raw_mode_init(&info
->handle
, info
->dbg
->top_dentry
,
3014 info
->id
, channels
, num_chans
,
3015 info
->desc
, info
->tx_minfo
.max_msg
);
3016 if (IS_ERR(info
->raw
)) {
3017 dev_err(info
->dev
, "Failed to initialize SCMI RAW Mode !\n");
3018 ret
= PTR_ERR(info
->raw
);
3025 static const struct scmi_desc
*scmi_transport_setup(struct device
*dev
)
3027 struct scmi_transport
*trans
;
3030 trans
= dev_get_platdata(dev
);
3031 if (!trans
|| !trans
->desc
|| !trans
->supplier
|| !trans
->core_ops
)
3034 if (!device_link_add(dev
, trans
->supplier
, DL_FLAG_AUTOREMOVE_CONSUMER
)) {
3036 "Adding link to supplier transport device failed\n");
3040 /* Provide core transport ops */
3041 *trans
->core_ops
= &scmi_trans_core_ops
;
3043 dev_info(dev
, "Using %s\n", dev_driver_string(trans
->supplier
));
3045 ret
= of_property_read_u32(dev
->of_node
, "arm,max-rx-timeout-ms",
3046 &trans
->desc
->max_rx_timeout_ms
);
3047 if (ret
&& ret
!= -EINVAL
)
3048 dev_err(dev
, "Malformed arm,max-rx-timeout-ms DT property.\n");
3050 ret
= of_property_read_u32(dev
->of_node
, "arm,max-msg-size",
3051 &trans
->desc
->max_msg_size
);
3052 if (ret
&& ret
!= -EINVAL
)
3053 dev_err(dev
, "Malformed arm,max-msg-size DT property.\n");
3055 ret
= of_property_read_u32(dev
->of_node
, "arm,max-msg",
3056 &trans
->desc
->max_msg
);
3057 if (ret
&& ret
!= -EINVAL
)
3058 dev_err(dev
, "Malformed arm,max-msg DT property.\n");
3061 "SCMI max-rx-timeout: %dms / max-msg-size: %dbytes / max-msg: %d\n",
3062 trans
->desc
->max_rx_timeout_ms
, trans
->desc
->max_msg_size
,
3063 trans
->desc
->max_msg
);
3065 /* System wide atomic threshold for atomic ops .. if any */
3066 if (!of_property_read_u32(dev
->of_node
, "atomic-threshold-us",
3067 &trans
->desc
->atomic_threshold
))
3069 "SCMI System wide atomic threshold set to %u us\n",
3070 trans
->desc
->atomic_threshold
);
3075 static int scmi_probe(struct platform_device
*pdev
)
3078 char *err_str
= "probe failure\n";
3079 struct scmi_handle
*handle
;
3080 const struct scmi_desc
*desc
;
3081 struct scmi_info
*info
;
3082 bool coex
= IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX
);
3083 struct device
*dev
= &pdev
->dev
;
3084 struct device_node
*child
, *np
= dev
->of_node
;
3086 desc
= scmi_transport_setup(dev
);
3088 err_str
= "transport invalid\n";
3093 info
= devm_kzalloc(dev
, sizeof(*info
), GFP_KERNEL
);
3097 info
->id
= ida_alloc_min(&scmi_id
, 0, GFP_KERNEL
);
3103 info
->bus_nb
.notifier_call
= scmi_bus_notifier
;
3104 info
->dev_req_nb
.notifier_call
= scmi_device_request_notifier
;
3105 INIT_LIST_HEAD(&info
->node
);
3106 idr_init(&info
->protocols
);
3107 mutex_init(&info
->protocols_mtx
);
3108 idr_init(&info
->active_protocols
);
3109 mutex_init(&info
->devreq_mtx
);
3111 platform_set_drvdata(pdev
, info
);
3112 idr_init(&info
->tx_idr
);
3113 idr_init(&info
->rx_idr
);
3115 handle
= &info
->handle
;
3116 handle
->dev
= info
->dev
;
3117 handle
->version
= &info
->version
;
3118 handle
->devm_protocol_acquire
= scmi_devm_protocol_acquire
;
3119 handle
->devm_protocol_get
= scmi_devm_protocol_get
;
3120 handle
->devm_protocol_put
= scmi_devm_protocol_put
;
3121 handle
->is_transport_atomic
= scmi_is_transport_atomic
;
3123 /* Setup all channels described in the DT at first */
3124 ret
= scmi_channels_setup(info
);
3126 err_str
= "failed to setup channels\n";
3130 ret
= bus_register_notifier(&scmi_bus_type
, &info
->bus_nb
);
3132 err_str
= "failed to register bus notifier\n";
3133 goto clear_txrx_setup
;
3136 ret
= blocking_notifier_chain_register(&scmi_requested_devices_nh
,
3139 err_str
= "failed to register device notifier\n";
3140 goto clear_bus_notifier
;
3143 ret
= scmi_xfer_info_init(info
);
3145 err_str
= "failed to init xfers pool\n";
3146 goto clear_dev_req_notifier
;
3149 if (scmi_top_dentry
) {
3150 info
->dbg
= scmi_debugfs_common_setup(info
);
3152 dev_warn(dev
, "Failed to setup SCMI debugfs.\n");
3154 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT
)) {
3155 ret
= scmi_debugfs_raw_mode_setup(info
);
3158 goto clear_dev_req_notifier
;
3160 /* Bail out anyway when coex disabled. */
3164 /* Coex enabled, carry on in any case. */
3165 dev_info(dev
, "SCMI RAW Mode COEX enabled !\n");
3169 if (scmi_notification_init(handle
))
3170 dev_err(dev
, "SCMI Notifications NOT available.\n");
3172 if (info
->desc
->atomic_enabled
&&
3173 !is_transport_polling_capable(info
->desc
))
3175 "Transport is not polling capable. Atomic mode not supported.\n");
3178 * Trigger SCMI Base protocol initialization.
3179 * It's mandatory and won't be ever released/deinit until the
3180 * SCMI stack is shutdown/unloaded as a whole.
3182 ret
= scmi_protocol_acquire(handle
, SCMI_PROTOCOL_BASE
);
3184 err_str
= "unable to communicate with SCMI\n";
3186 dev_err(dev
, "%s", err_str
);
3189 goto notification_exit
;
3192 mutex_lock(&scmi_list_mutex
);
3193 list_add_tail(&info
->node
, &scmi_list
);
3194 mutex_unlock(&scmi_list_mutex
);
3196 for_each_available_child_of_node(np
, child
) {
3199 if (of_property_read_u32(child
, "reg", &prot_id
))
3202 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK
, prot_id
))
3203 dev_err(dev
, "Out of range protocol %d\n", prot_id
);
3205 if (!scmi_is_protocol_implemented(handle
, prot_id
)) {
3206 dev_err(dev
, "SCMI protocol %d not implemented\n",
3212 * Save this valid DT protocol descriptor amongst
3213 * @active_protocols for this SCMI instance/
3215 ret
= idr_alloc(&info
->active_protocols
, child
,
3216 prot_id
, prot_id
+ 1, GFP_KERNEL
);
3217 if (ret
!= prot_id
) {
3218 dev_err(dev
, "SCMI protocol %d already activated. Skip\n",
3224 scmi_create_protocol_devices(child
, info
, prot_id
, NULL
);
3230 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT
))
3231 scmi_raw_mode_cleanup(info
->raw
);
3232 scmi_notification_exit(&info
->handle
);
3233 clear_dev_req_notifier
:
3234 blocking_notifier_chain_unregister(&scmi_requested_devices_nh
,
3237 bus_unregister_notifier(&scmi_bus_type
, &info
->bus_nb
);
3239 scmi_cleanup_txrx_channels(info
);
3241 ida_free(&scmi_id
, info
->id
);
3244 return dev_err_probe(dev
, ret
, "%s", err_str
);
3247 static void scmi_remove(struct platform_device
*pdev
)
3250 struct scmi_info
*info
= platform_get_drvdata(pdev
);
3251 struct device_node
*child
;
3253 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT
))
3254 scmi_raw_mode_cleanup(info
->raw
);
3256 mutex_lock(&scmi_list_mutex
);
3258 dev_warn(&pdev
->dev
,
3259 "Still active SCMI users will be forcibly unbound.\n");
3260 list_del(&info
->node
);
3261 mutex_unlock(&scmi_list_mutex
);
3263 scmi_notification_exit(&info
->handle
);
3265 mutex_lock(&info
->protocols_mtx
);
3266 idr_destroy(&info
->protocols
);
3267 mutex_unlock(&info
->protocols_mtx
);
3269 idr_for_each_entry(&info
->active_protocols
, child
, id
)
3271 idr_destroy(&info
->active_protocols
);
3273 blocking_notifier_chain_unregister(&scmi_requested_devices_nh
,
3275 bus_unregister_notifier(&scmi_bus_type
, &info
->bus_nb
);
3277 /* Safe to free channels since no more users */
3278 scmi_cleanup_txrx_channels(info
);
3280 ida_free(&scmi_id
, info
->id
);
3283 static ssize_t
protocol_version_show(struct device
*dev
,
3284 struct device_attribute
*attr
, char *buf
)
3286 struct scmi_info
*info
= dev_get_drvdata(dev
);
3288 return sprintf(buf
, "%u.%u\n", info
->version
.major_ver
,
3289 info
->version
.minor_ver
);
3291 static DEVICE_ATTR_RO(protocol_version
);
3293 static ssize_t
firmware_version_show(struct device
*dev
,
3294 struct device_attribute
*attr
, char *buf
)
3296 struct scmi_info
*info
= dev_get_drvdata(dev
);
3298 return sprintf(buf
, "0x%x\n", info
->version
.impl_ver
);
3300 static DEVICE_ATTR_RO(firmware_version
);
3302 static ssize_t
vendor_id_show(struct device
*dev
,
3303 struct device_attribute
*attr
, char *buf
)
3305 struct scmi_info
*info
= dev_get_drvdata(dev
);
3307 return sprintf(buf
, "%s\n", info
->version
.vendor_id
);
3309 static DEVICE_ATTR_RO(vendor_id
);
3311 static ssize_t
sub_vendor_id_show(struct device
*dev
,
3312 struct device_attribute
*attr
, char *buf
)
3314 struct scmi_info
*info
= dev_get_drvdata(dev
);
3316 return sprintf(buf
, "%s\n", info
->version
.sub_vendor_id
);
3318 static DEVICE_ATTR_RO(sub_vendor_id
);
3320 static struct attribute
*versions_attrs
[] = {
3321 &dev_attr_firmware_version
.attr
,
3322 &dev_attr_protocol_version
.attr
,
3323 &dev_attr_vendor_id
.attr
,
3324 &dev_attr_sub_vendor_id
.attr
,
3327 ATTRIBUTE_GROUPS(versions
);
3329 static struct platform_driver scmi_driver
= {
3332 .suppress_bind_attrs
= true,
3333 .dev_groups
= versions_groups
,
3335 .probe
= scmi_probe
,
3336 .remove
= scmi_remove
,
3339 static struct dentry
*scmi_debugfs_init(void)
3343 d
= debugfs_create_dir("scmi", NULL
);
3345 pr_err("Could NOT create SCMI top dentry.\n");
3352 static int __init
scmi_driver_init(void)
3354 /* Bail out if no SCMI transport was configured */
3355 if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT
)))
3358 if (IS_ENABLED(CONFIG_ARM_SCMI_HAVE_SHMEM
))
3359 scmi_trans_core_ops
.shmem
= scmi_shared_mem_operations_get();
3361 if (IS_ENABLED(CONFIG_ARM_SCMI_HAVE_MSG
))
3362 scmi_trans_core_ops
.msg
= scmi_message_operations_get();
3364 if (IS_ENABLED(CONFIG_ARM_SCMI_NEED_DEBUGFS
))
3365 scmi_top_dentry
= scmi_debugfs_init();
3367 scmi_base_register();
3369 scmi_clock_register();
3370 scmi_perf_register();
3371 scmi_power_register();
3372 scmi_reset_register();
3373 scmi_sensors_register();
3374 scmi_voltage_register();
3375 scmi_system_register();
3376 scmi_powercap_register();
3377 scmi_pinctrl_register();
3379 return platform_driver_register(&scmi_driver
);
3381 module_init(scmi_driver_init
);
3383 static void __exit
scmi_driver_exit(void)
3385 scmi_base_unregister();
3387 scmi_clock_unregister();
3388 scmi_perf_unregister();
3389 scmi_power_unregister();
3390 scmi_reset_unregister();
3391 scmi_sensors_unregister();
3392 scmi_voltage_unregister();
3393 scmi_system_unregister();
3394 scmi_powercap_unregister();
3395 scmi_pinctrl_unregister();
3397 platform_driver_unregister(&scmi_driver
);
3399 debugfs_remove_recursive(scmi_top_dentry
);
3401 module_exit(scmi_driver_exit
);
3403 MODULE_ALIAS("platform:arm-scmi");
3404 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
3405 MODULE_DESCRIPTION("ARM SCMI protocol driver");
3406 MODULE_LICENSE("GPL v2");