1 // SPDX-License-Identifier: GPL-2.0
3 * System Control and Management Interface (SCMI) Performance Protocol
5 * Copyright (C) 2018-2023 ARM Ltd.
8 #define pr_fmt(fmt) "SCMI Notifications PERF - " fmt
10 #include <linux/bits.h>
11 #include <linux/hashtable.h>
13 #include <linux/log2.h>
14 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm_opp.h>
18 #include <linux/scmi_protocol.h>
19 #include <linux/sort.h>
20 #include <linux/xarray.h>
22 #include <trace/events/scmi.h>
24 #include "protocols.h"
27 /* Updated only after ALL the mandatory features for that version are merged */
28 #define SCMI_PROTOCOL_SUPPORTED_VERSION 0x40000
32 enum scmi_performance_protocol_cmd
{
33 PERF_DOMAIN_ATTRIBUTES
= 0x3,
34 PERF_DESCRIBE_LEVELS
= 0x4,
35 PERF_LIMITS_SET
= 0x5,
36 PERF_LIMITS_GET
= 0x6,
39 PERF_NOTIFY_LIMITS
= 0x9,
40 PERF_NOTIFY_LEVEL
= 0xa,
41 PERF_DESCRIBE_FASTCHANNEL
= 0xb,
42 PERF_DOMAIN_NAME_GET
= 0xc,
57 struct hlist_node hash
;
60 struct scmi_msg_resp_perf_attributes
{
63 #define POWER_SCALE_IN_MILLIWATT(x) ((x) & BIT(0))
64 #define POWER_SCALE_IN_MICROWATT(x) ((x) & BIT(1))
65 __le32 stats_addr_low
;
66 __le32 stats_addr_high
;
70 struct scmi_msg_resp_perf_domain_attributes
{
72 #define SUPPORTS_SET_LIMITS(x) ((x) & BIT(31))
73 #define SUPPORTS_SET_PERF_LVL(x) ((x) & BIT(30))
74 #define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29))
75 #define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28))
76 #define SUPPORTS_PERF_FASTCHANNELS(x) ((x) & BIT(27))
77 #define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(26))
78 #define SUPPORTS_LEVEL_INDEXING(x) ((x) & BIT(25))
80 __le32 sustained_freq_khz
;
81 __le32 sustained_perf_level
;
82 u8 name
[SCMI_SHORT_NAME_MAX_SIZE
];
85 struct scmi_msg_perf_describe_levels
{
90 struct scmi_perf_set_limits
{
96 struct scmi_perf_get_limits
{
101 struct scmi_perf_set_level
{
106 struct scmi_perf_notify_level_or_limits
{
108 __le32 notify_enable
;
111 struct scmi_perf_limits_notify_payld
{
118 struct scmi_perf_level_notify_payld
{
121 __le32 performance_level
;
124 struct scmi_msg_resp_perf_describe_levels
{
126 __le16 num_remaining
;
130 __le16 transition_latency_us
;
135 struct scmi_msg_resp_perf_describe_levels_v4
{
137 __le16 num_remaining
;
141 __le16 transition_latency_us
;
143 __le32 indicative_freq
;
148 struct perf_dom_info
{
151 bool perf_limit_notify
;
152 bool perf_level_notify
;
153 bool perf_fastchannels
;
154 bool level_indexing_mode
;
157 u32 sustained_freq_khz
;
158 u32 sustained_perf_level
;
159 unsigned long mult_factor
;
160 struct scmi_perf_domain_info info
;
161 struct scmi_opp opp
[MAX_OPPS
];
162 struct scmi_fc_info
*fc_info
;
163 struct xarray opps_by_idx
;
164 struct xarray opps_by_lvl
;
165 DECLARE_HASHTABLE(opps_by_freq
, ilog2(MAX_OPPS
));
168 #define LOOKUP_BY_FREQ(__htp, __freq) \
170 /* u32 cast is needed to pick right hash func */ \
171 u32 f_ = (u32)(__freq); \
172 struct scmi_opp *_opp; \
174 hash_for_each_possible((__htp), _opp, hash, f_) \
175 if (_opp->indicative_freq == f_) \
180 struct scmi_perf_info
{
183 enum scmi_power_scale power_scale
;
188 struct perf_dom_info
*dom_info
;
191 static enum scmi_performance_protocol_cmd evt_2_cmd
[] = {
196 static int scmi_perf_attributes_get(const struct scmi_protocol_handle
*ph
,
197 struct scmi_perf_info
*pi
)
201 struct scmi_msg_resp_perf_attributes
*attr
;
203 ret
= ph
->xops
->xfer_get_init(ph
, PROTOCOL_ATTRIBUTES
, 0,
210 ret
= ph
->xops
->do_xfer(ph
, t
);
212 u16 flags
= le16_to_cpu(attr
->flags
);
214 pi
->num_domains
= le16_to_cpu(attr
->num_domains
);
216 if (POWER_SCALE_IN_MILLIWATT(flags
))
217 pi
->power_scale
= SCMI_POWER_MILLIWATTS
;
218 if (PROTOCOL_REV_MAJOR(pi
->version
) >= 0x3)
219 if (POWER_SCALE_IN_MICROWATT(flags
))
220 pi
->power_scale
= SCMI_POWER_MICROWATTS
;
222 pi
->stats_addr
= le32_to_cpu(attr
->stats_addr_low
) |
223 (u64
)le32_to_cpu(attr
->stats_addr_high
) << 32;
224 pi
->stats_size
= le32_to_cpu(attr
->stats_size
);
227 ph
->xops
->xfer_put(ph
, t
);
230 if (!ph
->hops
->protocol_msg_check(ph
, PERF_NOTIFY_LEVEL
, NULL
))
231 pi
->notify_lvl_cmd
= true;
233 if (!ph
->hops
->protocol_msg_check(ph
, PERF_NOTIFY_LIMITS
, NULL
))
234 pi
->notify_lim_cmd
= true;
240 static void scmi_perf_xa_destroy(void *data
)
243 struct scmi_perf_info
*pinfo
= data
;
245 for (domain
= 0; domain
< pinfo
->num_domains
; domain
++) {
246 xa_destroy(&((pinfo
->dom_info
+ domain
)->opps_by_idx
));
247 xa_destroy(&((pinfo
->dom_info
+ domain
)->opps_by_lvl
));
252 scmi_perf_domain_attributes_get(const struct scmi_protocol_handle
*ph
,
253 struct perf_dom_info
*dom_info
,
254 bool notify_lim_cmd
, bool notify_lvl_cmd
,
260 struct scmi_msg_resp_perf_domain_attributes
*attr
;
262 ret
= ph
->xops
->xfer_get_init(ph
, PERF_DOMAIN_ATTRIBUTES
,
263 sizeof(dom_info
->id
), sizeof(*attr
), &t
);
267 put_unaligned_le32(dom_info
->id
, t
->tx
.buf
);
270 ret
= ph
->xops
->do_xfer(ph
, t
);
272 flags
= le32_to_cpu(attr
->flags
);
274 dom_info
->set_limits
= SUPPORTS_SET_LIMITS(flags
);
275 dom_info
->info
.set_perf
= SUPPORTS_SET_PERF_LVL(flags
);
277 dom_info
->perf_limit_notify
=
278 SUPPORTS_PERF_LIMIT_NOTIFY(flags
);
280 dom_info
->perf_level_notify
=
281 SUPPORTS_PERF_LEVEL_NOTIFY(flags
);
282 dom_info
->perf_fastchannels
= SUPPORTS_PERF_FASTCHANNELS(flags
);
283 if (PROTOCOL_REV_MAJOR(version
) >= 0x4)
284 dom_info
->level_indexing_mode
=
285 SUPPORTS_LEVEL_INDEXING(flags
);
286 dom_info
->rate_limit_us
= le32_to_cpu(attr
->rate_limit_us
) &
288 dom_info
->sustained_freq_khz
=
289 le32_to_cpu(attr
->sustained_freq_khz
);
290 dom_info
->sustained_perf_level
=
291 le32_to_cpu(attr
->sustained_perf_level
);
293 * sustained_freq_khz = mult_factor * sustained_perf_level
294 * mult_factor must be non zero positive integer(not fraction)
296 if (!dom_info
->sustained_freq_khz
||
297 !dom_info
->sustained_perf_level
||
298 dom_info
->level_indexing_mode
) {
299 /* CPUFreq converts to kHz, hence default 1000 */
300 dom_info
->mult_factor
= 1000;
302 dom_info
->mult_factor
=
303 (dom_info
->sustained_freq_khz
* 1000UL)
304 / dom_info
->sustained_perf_level
;
305 if ((dom_info
->sustained_freq_khz
* 1000UL) %
306 dom_info
->sustained_perf_level
)
308 "multiplier for domain %d rounded\n",
311 if (!dom_info
->mult_factor
)
313 "Wrong sustained perf/frequency(domain %d)\n",
316 strscpy(dom_info
->info
.name
, attr
->name
,
317 SCMI_SHORT_NAME_MAX_SIZE
);
320 ph
->xops
->xfer_put(ph
, t
);
323 * If supported overwrite short name with the extended one;
324 * on error just carry on and use already provided short name.
326 if (!ret
&& PROTOCOL_REV_MAJOR(version
) >= 0x3 &&
327 SUPPORTS_EXTENDED_NAMES(flags
))
328 ph
->hops
->extended_name_get(ph
, PERF_DOMAIN_NAME_GET
,
329 dom_info
->id
, NULL
, dom_info
->info
.name
,
332 xa_init(&dom_info
->opps_by_lvl
);
333 if (dom_info
->level_indexing_mode
) {
334 xa_init(&dom_info
->opps_by_idx
);
335 hash_init(dom_info
->opps_by_freq
);
341 static int opp_cmp_func(const void *opp1
, const void *opp2
)
343 const struct scmi_opp
*t1
= opp1
, *t2
= opp2
;
345 return t1
->perf
- t2
->perf
;
348 struct scmi_perf_ipriv
{
350 struct perf_dom_info
*perf_dom
;
353 static void iter_perf_levels_prepare_message(void *message
,
354 unsigned int desc_index
,
357 struct scmi_msg_perf_describe_levels
*msg
= message
;
358 const struct scmi_perf_ipriv
*p
= priv
;
360 msg
->domain
= cpu_to_le32(p
->perf_dom
->id
);
361 /* Set the number of OPPs to be skipped/already read */
362 msg
->level_index
= cpu_to_le32(desc_index
);
365 static int iter_perf_levels_update_state(struct scmi_iterator_state
*st
,
366 const void *response
, void *priv
)
368 const struct scmi_msg_resp_perf_describe_levels
*r
= response
;
370 st
->num_returned
= le16_to_cpu(r
->num_returned
);
371 st
->num_remaining
= le16_to_cpu(r
->num_remaining
);
377 process_response_opp(struct device
*dev
, struct perf_dom_info
*dom
,
378 struct scmi_opp
*opp
, unsigned int loop_idx
,
379 const struct scmi_msg_resp_perf_describe_levels
*r
)
383 opp
->perf
= le32_to_cpu(r
->opp
[loop_idx
].perf_val
);
384 opp
->power
= le32_to_cpu(r
->opp
[loop_idx
].power
);
385 opp
->trans_latency_us
=
386 le16_to_cpu(r
->opp
[loop_idx
].transition_latency_us
);
388 ret
= xa_insert(&dom
->opps_by_lvl
, opp
->perf
, opp
, GFP_KERNEL
);
390 dev_info(dev
, FW_BUG
"Failed to add opps_by_lvl at %d for %s - ret:%d\n",
391 opp
->perf
, dom
->info
.name
, ret
);
399 process_response_opp_v4(struct device
*dev
, struct perf_dom_info
*dom
,
400 struct scmi_opp
*opp
, unsigned int loop_idx
,
401 const struct scmi_msg_resp_perf_describe_levels_v4
*r
)
405 opp
->perf
= le32_to_cpu(r
->opp
[loop_idx
].perf_val
);
406 opp
->power
= le32_to_cpu(r
->opp
[loop_idx
].power
);
407 opp
->trans_latency_us
=
408 le16_to_cpu(r
->opp
[loop_idx
].transition_latency_us
);
410 ret
= xa_insert(&dom
->opps_by_lvl
, opp
->perf
, opp
, GFP_KERNEL
);
412 dev_info(dev
, FW_BUG
"Failed to add opps_by_lvl at %d for %s - ret:%d\n",
413 opp
->perf
, dom
->info
.name
, ret
);
417 /* Note that PERF v4 reports always five 32-bit words */
418 opp
->indicative_freq
= le32_to_cpu(r
->opp
[loop_idx
].indicative_freq
);
419 if (dom
->level_indexing_mode
) {
420 opp
->level_index
= le32_to_cpu(r
->opp
[loop_idx
].level_index
);
422 ret
= xa_insert(&dom
->opps_by_idx
, opp
->level_index
, opp
,
426 "Failed to add opps_by_idx at %d for %s - ret:%d\n",
427 opp
->level_index
, dom
->info
.name
, ret
);
429 /* Cleanup by_lvl too */
430 xa_erase(&dom
->opps_by_lvl
, opp
->perf
);
435 hash_add(dom
->opps_by_freq
, &opp
->hash
, opp
->indicative_freq
);
442 iter_perf_levels_process_response(const struct scmi_protocol_handle
*ph
,
443 const void *response
,
444 struct scmi_iterator_state
*st
, void *priv
)
447 struct scmi_opp
*opp
;
448 struct scmi_perf_ipriv
*p
= priv
;
450 opp
= &p
->perf_dom
->opp
[p
->perf_dom
->opp_count
];
451 if (PROTOCOL_REV_MAJOR(p
->version
) <= 0x3)
452 ret
= process_response_opp(ph
->dev
, p
->perf_dom
, opp
,
453 st
->loop_idx
, response
);
455 ret
= process_response_opp_v4(ph
->dev
, p
->perf_dom
, opp
,
456 st
->loop_idx
, response
);
458 /* Skip BAD duplicates received from firmware */
460 return ret
== -EBUSY
? 0 : ret
;
462 p
->perf_dom
->opp_count
++;
464 dev_dbg(ph
->dev
, "Level %d Power %d Latency %dus Ifreq %d Index %d\n",
465 opp
->perf
, opp
->power
, opp
->trans_latency_us
,
466 opp
->indicative_freq
, opp
->level_index
);
472 scmi_perf_describe_levels_get(const struct scmi_protocol_handle
*ph
,
473 struct perf_dom_info
*perf_dom
, u32 version
)
477 struct scmi_iterator_ops ops
= {
478 .prepare_message
= iter_perf_levels_prepare_message
,
479 .update_state
= iter_perf_levels_update_state
,
480 .process_response
= iter_perf_levels_process_response
,
482 struct scmi_perf_ipriv ppriv
= {
484 .perf_dom
= perf_dom
,
487 iter
= ph
->hops
->iter_response_init(ph
, &ops
, MAX_OPPS
,
488 PERF_DESCRIBE_LEVELS
,
489 sizeof(struct scmi_msg_perf_describe_levels
),
492 return PTR_ERR(iter
);
494 ret
= ph
->hops
->iter_response_run(iter
);
498 if (perf_dom
->opp_count
)
499 sort(perf_dom
->opp
, perf_dom
->opp_count
,
500 sizeof(struct scmi_opp
), opp_cmp_func
, NULL
);
505 static int scmi_perf_num_domains_get(const struct scmi_protocol_handle
*ph
)
507 struct scmi_perf_info
*pi
= ph
->get_priv(ph
);
509 return pi
->num_domains
;
512 static inline struct perf_dom_info
*
513 scmi_perf_domain_lookup(const struct scmi_protocol_handle
*ph
, u32 domain
)
515 struct scmi_perf_info
*pi
= ph
->get_priv(ph
);
517 if (domain
>= pi
->num_domains
)
518 return ERR_PTR(-EINVAL
);
520 return pi
->dom_info
+ domain
;
523 static const struct scmi_perf_domain_info
*
524 scmi_perf_info_get(const struct scmi_protocol_handle
*ph
, u32 domain
)
526 struct perf_dom_info
*dom
;
528 dom
= scmi_perf_domain_lookup(ph
, domain
);
530 return ERR_PTR(-EINVAL
);
535 static int scmi_perf_msg_limits_set(const struct scmi_protocol_handle
*ph
,
536 u32 domain
, u32 max_perf
, u32 min_perf
)
540 struct scmi_perf_set_limits
*limits
;
542 ret
= ph
->xops
->xfer_get_init(ph
, PERF_LIMITS_SET
,
543 sizeof(*limits
), 0, &t
);
548 limits
->domain
= cpu_to_le32(domain
);
549 limits
->max_level
= cpu_to_le32(max_perf
);
550 limits
->min_level
= cpu_to_le32(min_perf
);
552 ret
= ph
->xops
->do_xfer(ph
, t
);
554 ph
->xops
->xfer_put(ph
, t
);
558 static int __scmi_perf_limits_set(const struct scmi_protocol_handle
*ph
,
559 struct perf_dom_info
*dom
, u32 max_perf
,
562 if (dom
->fc_info
&& dom
->fc_info
[PERF_FC_LIMIT
].set_addr
) {
563 struct scmi_fc_info
*fci
= &dom
->fc_info
[PERF_FC_LIMIT
];
565 trace_scmi_fc_call(SCMI_PROTOCOL_PERF
, PERF_LIMITS_SET
,
566 dom
->id
, min_perf
, max_perf
);
567 iowrite32(max_perf
, fci
->set_addr
);
568 iowrite32(min_perf
, fci
->set_addr
+ 4);
569 ph
->hops
->fastchannel_db_ring(fci
->set_db
);
573 return scmi_perf_msg_limits_set(ph
, dom
->id
, max_perf
, min_perf
);
576 static int scmi_perf_limits_set(const struct scmi_protocol_handle
*ph
,
577 u32 domain
, u32 max_perf
, u32 min_perf
)
579 struct scmi_perf_info
*pi
= ph
->get_priv(ph
);
580 struct perf_dom_info
*dom
;
582 dom
= scmi_perf_domain_lookup(ph
, domain
);
586 if (!dom
->set_limits
)
589 if (PROTOCOL_REV_MAJOR(pi
->version
) >= 0x3 && !max_perf
&& !min_perf
)
592 if (dom
->level_indexing_mode
) {
593 struct scmi_opp
*opp
;
596 opp
= xa_load(&dom
->opps_by_lvl
, min_perf
);
600 min_perf
= opp
->level_index
;
604 opp
= xa_load(&dom
->opps_by_lvl
, max_perf
);
608 max_perf
= opp
->level_index
;
612 return __scmi_perf_limits_set(ph
, dom
, max_perf
, min_perf
);
615 static int scmi_perf_msg_limits_get(const struct scmi_protocol_handle
*ph
,
616 u32 domain
, u32
*max_perf
, u32
*min_perf
)
620 struct scmi_perf_get_limits
*limits
;
622 ret
= ph
->xops
->xfer_get_init(ph
, PERF_LIMITS_GET
,
623 sizeof(__le32
), 0, &t
);
627 put_unaligned_le32(domain
, t
->tx
.buf
);
629 ret
= ph
->xops
->do_xfer(ph
, t
);
633 *max_perf
= le32_to_cpu(limits
->max_level
);
634 *min_perf
= le32_to_cpu(limits
->min_level
);
637 ph
->xops
->xfer_put(ph
, t
);
641 static int __scmi_perf_limits_get(const struct scmi_protocol_handle
*ph
,
642 struct perf_dom_info
*dom
, u32
*max_perf
,
645 if (dom
->fc_info
&& dom
->fc_info
[PERF_FC_LIMIT
].get_addr
) {
646 struct scmi_fc_info
*fci
= &dom
->fc_info
[PERF_FC_LIMIT
];
648 *max_perf
= ioread32(fci
->get_addr
);
649 *min_perf
= ioread32(fci
->get_addr
+ 4);
650 trace_scmi_fc_call(SCMI_PROTOCOL_PERF
, PERF_LIMITS_GET
,
651 dom
->id
, *min_perf
, *max_perf
);
655 return scmi_perf_msg_limits_get(ph
, dom
->id
, max_perf
, min_perf
);
658 static int scmi_perf_limits_get(const struct scmi_protocol_handle
*ph
,
659 u32 domain
, u32
*max_perf
, u32
*min_perf
)
662 struct perf_dom_info
*dom
;
664 dom
= scmi_perf_domain_lookup(ph
, domain
);
668 ret
= __scmi_perf_limits_get(ph
, dom
, max_perf
, min_perf
);
672 if (dom
->level_indexing_mode
) {
673 struct scmi_opp
*opp
;
675 opp
= xa_load(&dom
->opps_by_idx
, *min_perf
);
679 *min_perf
= opp
->perf
;
681 opp
= xa_load(&dom
->opps_by_idx
, *max_perf
);
685 *max_perf
= opp
->perf
;
691 static int scmi_perf_msg_level_set(const struct scmi_protocol_handle
*ph
,
692 u32 domain
, u32 level
, bool poll
)
696 struct scmi_perf_set_level
*lvl
;
698 ret
= ph
->xops
->xfer_get_init(ph
, PERF_LEVEL_SET
, sizeof(*lvl
), 0, &t
);
702 t
->hdr
.poll_completion
= poll
;
704 lvl
->domain
= cpu_to_le32(domain
);
705 lvl
->level
= cpu_to_le32(level
);
707 ret
= ph
->xops
->do_xfer(ph
, t
);
709 ph
->xops
->xfer_put(ph
, t
);
713 static int __scmi_perf_level_set(const struct scmi_protocol_handle
*ph
,
714 struct perf_dom_info
*dom
, u32 level
,
717 if (dom
->fc_info
&& dom
->fc_info
[PERF_FC_LEVEL
].set_addr
) {
718 struct scmi_fc_info
*fci
= &dom
->fc_info
[PERF_FC_LEVEL
];
720 trace_scmi_fc_call(SCMI_PROTOCOL_PERF
, PERF_LEVEL_SET
,
722 iowrite32(level
, fci
->set_addr
);
723 ph
->hops
->fastchannel_db_ring(fci
->set_db
);
727 return scmi_perf_msg_level_set(ph
, dom
->id
, level
, poll
);
730 static int scmi_perf_level_set(const struct scmi_protocol_handle
*ph
,
731 u32 domain
, u32 level
, bool poll
)
733 struct perf_dom_info
*dom
;
735 dom
= scmi_perf_domain_lookup(ph
, domain
);
739 if (!dom
->info
.set_perf
)
742 if (dom
->level_indexing_mode
) {
743 struct scmi_opp
*opp
;
745 opp
= xa_load(&dom
->opps_by_lvl
, level
);
749 level
= opp
->level_index
;
752 return __scmi_perf_level_set(ph
, dom
, level
, poll
);
755 static int scmi_perf_msg_level_get(const struct scmi_protocol_handle
*ph
,
756 u32 domain
, u32
*level
, bool poll
)
761 ret
= ph
->xops
->xfer_get_init(ph
, PERF_LEVEL_GET
,
762 sizeof(u32
), sizeof(u32
), &t
);
766 t
->hdr
.poll_completion
= poll
;
767 put_unaligned_le32(domain
, t
->tx
.buf
);
769 ret
= ph
->xops
->do_xfer(ph
, t
);
771 *level
= get_unaligned_le32(t
->rx
.buf
);
773 ph
->xops
->xfer_put(ph
, t
);
777 static int __scmi_perf_level_get(const struct scmi_protocol_handle
*ph
,
778 struct perf_dom_info
*dom
, u32
*level
,
781 if (dom
->fc_info
&& dom
->fc_info
[PERF_FC_LEVEL
].get_addr
) {
782 *level
= ioread32(dom
->fc_info
[PERF_FC_LEVEL
].get_addr
);
783 trace_scmi_fc_call(SCMI_PROTOCOL_PERF
, PERF_LEVEL_GET
,
788 return scmi_perf_msg_level_get(ph
, dom
->id
, level
, poll
);
791 static int scmi_perf_level_get(const struct scmi_protocol_handle
*ph
,
792 u32 domain
, u32
*level
, bool poll
)
795 struct perf_dom_info
*dom
;
797 dom
= scmi_perf_domain_lookup(ph
, domain
);
801 ret
= __scmi_perf_level_get(ph
, dom
, level
, poll
);
805 if (dom
->level_indexing_mode
) {
806 struct scmi_opp
*opp
;
808 opp
= xa_load(&dom
->opps_by_idx
, *level
);
818 static int scmi_perf_level_limits_notify(const struct scmi_protocol_handle
*ph
,
819 u32 domain
, int message_id
,
824 struct scmi_perf_notify_level_or_limits
*notify
;
826 ret
= ph
->xops
->xfer_get_init(ph
, message_id
, sizeof(*notify
), 0, &t
);
831 notify
->domain
= cpu_to_le32(domain
);
832 notify
->notify_enable
= enable
? cpu_to_le32(BIT(0)) : 0;
834 ret
= ph
->xops
->do_xfer(ph
, t
);
836 ph
->xops
->xfer_put(ph
, t
);
840 static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle
*ph
,
841 struct perf_dom_info
*dom
)
843 struct scmi_fc_info
*fc
;
845 fc
= devm_kcalloc(ph
->dev
, PERF_FC_MAX
, sizeof(*fc
), GFP_KERNEL
);
849 ph
->hops
->fastchannel_init(ph
, PERF_DESCRIBE_FASTCHANNEL
,
850 PERF_LEVEL_GET
, 4, dom
->id
,
851 &fc
[PERF_FC_LEVEL
].get_addr
, NULL
,
852 &fc
[PERF_FC_LEVEL
].rate_limit
);
854 ph
->hops
->fastchannel_init(ph
, PERF_DESCRIBE_FASTCHANNEL
,
855 PERF_LIMITS_GET
, 8, dom
->id
,
856 &fc
[PERF_FC_LIMIT
].get_addr
, NULL
,
857 &fc
[PERF_FC_LIMIT
].rate_limit
);
859 if (dom
->info
.set_perf
)
860 ph
->hops
->fastchannel_init(ph
, PERF_DESCRIBE_FASTCHANNEL
,
861 PERF_LEVEL_SET
, 4, dom
->id
,
862 &fc
[PERF_FC_LEVEL
].set_addr
,
863 &fc
[PERF_FC_LEVEL
].set_db
,
864 &fc
[PERF_FC_LEVEL
].rate_limit
);
867 ph
->hops
->fastchannel_init(ph
, PERF_DESCRIBE_FASTCHANNEL
,
868 PERF_LIMITS_SET
, 8, dom
->id
,
869 &fc
[PERF_FC_LIMIT
].set_addr
,
870 &fc
[PERF_FC_LIMIT
].set_db
,
871 &fc
[PERF_FC_LIMIT
].rate_limit
);
876 static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle
*ph
,
877 struct device
*dev
, u32 domain
)
881 struct dev_pm_opp_data data
= {};
882 struct perf_dom_info
*dom
;
884 dom
= scmi_perf_domain_lookup(ph
, domain
);
888 for (idx
= 0; idx
< dom
->opp_count
; idx
++) {
889 if (!dom
->level_indexing_mode
)
890 freq
= dom
->opp
[idx
].perf
* dom
->mult_factor
;
892 freq
= dom
->opp
[idx
].indicative_freq
* dom
->mult_factor
;
894 /* All OPPs above the sustained frequency are treated as turbo */
895 data
.turbo
= freq
> dom
->sustained_freq_khz
* 1000;
897 data
.level
= dom
->opp
[idx
].perf
;
900 ret
= dev_pm_opp_add_dynamic(dev
, &data
);
902 dev_warn(dev
, "[%d][%s]: Failed to add OPP[%d] %lu\n",
903 domain
, dom
->info
.name
, idx
, freq
);
904 dev_pm_opp_remove_all_dynamic(dev
);
908 dev_dbg(dev
, "[%d][%s]:: Registered OPP[%d] %lu\n",
909 domain
, dom
->info
.name
, idx
, freq
);
915 scmi_dvfs_transition_latency_get(const struct scmi_protocol_handle
*ph
,
918 struct perf_dom_info
*dom
;
920 dom
= scmi_perf_domain_lookup(ph
, domain
);
925 return dom
->opp
[dom
->opp_count
- 1].trans_latency_us
* 1000;
929 scmi_dvfs_rate_limit_get(const struct scmi_protocol_handle
*ph
,
930 u32 domain
, u32
*rate_limit
)
932 struct perf_dom_info
*dom
;
937 dom
= scmi_perf_domain_lookup(ph
, domain
);
941 *rate_limit
= dom
->rate_limit_us
;
945 static int scmi_dvfs_freq_set(const struct scmi_protocol_handle
*ph
, u32 domain
,
946 unsigned long freq
, bool poll
)
949 struct perf_dom_info
*dom
;
951 dom
= scmi_perf_domain_lookup(ph
, domain
);
955 if (!dom
->level_indexing_mode
) {
956 level
= freq
/ dom
->mult_factor
;
958 struct scmi_opp
*opp
;
960 opp
= LOOKUP_BY_FREQ(dom
->opps_by_freq
,
961 freq
/ dom
->mult_factor
);
965 level
= opp
->level_index
;
968 return __scmi_perf_level_set(ph
, dom
, level
, poll
);
971 static int scmi_dvfs_freq_get(const struct scmi_protocol_handle
*ph
, u32 domain
,
972 unsigned long *freq
, bool poll
)
976 struct perf_dom_info
*dom
;
978 dom
= scmi_perf_domain_lookup(ph
, domain
);
982 ret
= __scmi_perf_level_get(ph
, dom
, &level
, poll
);
986 if (!dom
->level_indexing_mode
) {
987 *freq
= level
* dom
->mult_factor
;
989 struct scmi_opp
*opp
;
991 opp
= xa_load(&dom
->opps_by_idx
, level
);
995 *freq
= opp
->indicative_freq
* dom
->mult_factor
;
1001 static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle
*ph
,
1002 u32 domain
, unsigned long *freq
,
1003 unsigned long *power
)
1005 struct perf_dom_info
*dom
;
1006 unsigned long opp_freq
;
1007 int idx
, ret
= -EINVAL
;
1008 struct scmi_opp
*opp
;
1010 dom
= scmi_perf_domain_lookup(ph
, domain
);
1012 return PTR_ERR(dom
);
1014 for (opp
= dom
->opp
, idx
= 0; idx
< dom
->opp_count
; idx
++, opp
++) {
1015 if (!dom
->level_indexing_mode
)
1016 opp_freq
= opp
->perf
* dom
->mult_factor
;
1018 opp_freq
= opp
->indicative_freq
* dom
->mult_factor
;
1020 if (opp_freq
< *freq
)
1024 *power
= opp
->power
;
1032 static bool scmi_fast_switch_possible(const struct scmi_protocol_handle
*ph
,
1035 struct perf_dom_info
*dom
;
1037 dom
= scmi_perf_domain_lookup(ph
, domain
);
1041 return dom
->fc_info
&& dom
->fc_info
[PERF_FC_LEVEL
].set_addr
;
1044 static int scmi_fast_switch_rate_limit(const struct scmi_protocol_handle
*ph
,
1045 u32 domain
, u32
*rate_limit
)
1047 struct perf_dom_info
*dom
;
1052 dom
= scmi_perf_domain_lookup(ph
, domain
);
1054 return PTR_ERR(dom
);
1059 *rate_limit
= dom
->fc_info
[PERF_FC_LEVEL
].rate_limit
;
1063 static enum scmi_power_scale
1064 scmi_power_scale_get(const struct scmi_protocol_handle
*ph
)
1066 struct scmi_perf_info
*pi
= ph
->get_priv(ph
);
1068 return pi
->power_scale
;
1071 static const struct scmi_perf_proto_ops perf_proto_ops
= {
1072 .num_domains_get
= scmi_perf_num_domains_get
,
1073 .info_get
= scmi_perf_info_get
,
1074 .limits_set
= scmi_perf_limits_set
,
1075 .limits_get
= scmi_perf_limits_get
,
1076 .level_set
= scmi_perf_level_set
,
1077 .level_get
= scmi_perf_level_get
,
1078 .transition_latency_get
= scmi_dvfs_transition_latency_get
,
1079 .rate_limit_get
= scmi_dvfs_rate_limit_get
,
1080 .device_opps_add
= scmi_dvfs_device_opps_add
,
1081 .freq_set
= scmi_dvfs_freq_set
,
1082 .freq_get
= scmi_dvfs_freq_get
,
1083 .est_power_get
= scmi_dvfs_est_power_get
,
1084 .fast_switch_possible
= scmi_fast_switch_possible
,
1085 .fast_switch_rate_limit
= scmi_fast_switch_rate_limit
,
1086 .power_scale_get
= scmi_power_scale_get
,
1089 static bool scmi_perf_notify_supported(const struct scmi_protocol_handle
*ph
,
1090 u8 evt_id
, u32 src_id
)
1093 struct perf_dom_info
*dom
;
1095 if (evt_id
>= ARRAY_SIZE(evt_2_cmd
))
1098 dom
= scmi_perf_domain_lookup(ph
, src_id
);
1102 if (evt_id
== SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED
)
1103 supported
= dom
->perf_limit_notify
;
1105 supported
= dom
->perf_level_notify
;
1110 static int scmi_perf_set_notify_enabled(const struct scmi_protocol_handle
*ph
,
1111 u8 evt_id
, u32 src_id
, bool enable
)
1115 if (evt_id
>= ARRAY_SIZE(evt_2_cmd
))
1118 cmd_id
= evt_2_cmd
[evt_id
];
1119 ret
= scmi_perf_level_limits_notify(ph
, src_id
, cmd_id
, enable
);
1121 pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
1122 evt_id
, src_id
, ret
);
1128 scmi_perf_xlate_opp_to_freq(struct perf_dom_info
*dom
,
1129 unsigned int index
, unsigned long *freq
)
1131 struct scmi_opp
*opp
;
1136 if (!dom
->level_indexing_mode
) {
1137 opp
= xa_load(&dom
->opps_by_lvl
, index
);
1141 *freq
= opp
->perf
* dom
->mult_factor
;
1143 opp
= xa_load(&dom
->opps_by_idx
, index
);
1147 *freq
= opp
->indicative_freq
* dom
->mult_factor
;
1153 static void *scmi_perf_fill_custom_report(const struct scmi_protocol_handle
*ph
,
1154 u8 evt_id
, ktime_t timestamp
,
1155 const void *payld
, size_t payld_sz
,
1156 void *report
, u32
*src_id
)
1160 struct perf_dom_info
*dom
;
1163 case SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED
:
1165 const struct scmi_perf_limits_notify_payld
*p
= payld
;
1166 struct scmi_perf_limits_report
*r
= report
;
1167 unsigned long freq_min
, freq_max
;
1169 if (sizeof(*p
) != payld_sz
)
1172 r
->timestamp
= timestamp
;
1173 r
->agent_id
= le32_to_cpu(p
->agent_id
);
1174 r
->domain_id
= le32_to_cpu(p
->domain_id
);
1175 r
->range_max
= le32_to_cpu(p
->range_max
);
1176 r
->range_min
= le32_to_cpu(p
->range_min
);
1177 /* Check if the reported domain exist at all */
1178 dom
= scmi_perf_domain_lookup(ph
, r
->domain_id
);
1182 * Event will be reported from this point on...
1183 * ...even if, later, xlated frequencies were not retrieved.
1185 *src_id
= r
->domain_id
;
1188 ret
= scmi_perf_xlate_opp_to_freq(dom
, r
->range_max
, &freq_max
);
1192 ret
= scmi_perf_xlate_opp_to_freq(dom
, r
->range_min
, &freq_min
);
1196 /* Report translated freqs ONLY if both available */
1197 r
->range_max_freq
= freq_max
;
1198 r
->range_min_freq
= freq_min
;
1202 case SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED
:
1204 const struct scmi_perf_level_notify_payld
*p
= payld
;
1205 struct scmi_perf_level_report
*r
= report
;
1208 if (sizeof(*p
) != payld_sz
)
1211 r
->timestamp
= timestamp
;
1212 r
->agent_id
= le32_to_cpu(p
->agent_id
);
1213 r
->domain_id
= le32_to_cpu(p
->domain_id
);
1214 /* Report translated freqs ONLY if available */
1215 r
->performance_level
= le32_to_cpu(p
->performance_level
);
1216 /* Check if the reported domain exist at all */
1217 dom
= scmi_perf_domain_lookup(ph
, r
->domain_id
);
1221 * Event will be reported from this point on...
1222 * ...even if, later, xlated frequencies were not retrieved.
1224 *src_id
= r
->domain_id
;
1227 /* Report translated freqs ONLY if available */
1228 ret
= scmi_perf_xlate_opp_to_freq(dom
, r
->performance_level
,
1233 r
->performance_level_freq
= freq
;
1244 static int scmi_perf_get_num_sources(const struct scmi_protocol_handle
*ph
)
1246 struct scmi_perf_info
*pi
= ph
->get_priv(ph
);
1251 return pi
->num_domains
;
1254 static const struct scmi_event perf_events
[] = {
1256 .id
= SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED
,
1257 .max_payld_sz
= sizeof(struct scmi_perf_limits_notify_payld
),
1258 .max_report_sz
= sizeof(struct scmi_perf_limits_report
),
1261 .id
= SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED
,
1262 .max_payld_sz
= sizeof(struct scmi_perf_level_notify_payld
),
1263 .max_report_sz
= sizeof(struct scmi_perf_level_report
),
1267 static const struct scmi_event_ops perf_event_ops
= {
1268 .is_notify_supported
= scmi_perf_notify_supported
,
1269 .get_num_sources
= scmi_perf_get_num_sources
,
1270 .set_notify_enabled
= scmi_perf_set_notify_enabled
,
1271 .fill_custom_report
= scmi_perf_fill_custom_report
,
1274 static const struct scmi_protocol_events perf_protocol_events
= {
1275 .queue_sz
= SCMI_PROTO_QUEUE_SZ
,
1276 .ops
= &perf_event_ops
,
1277 .evts
= perf_events
,
1278 .num_events
= ARRAY_SIZE(perf_events
),
1281 static int scmi_perf_protocol_init(const struct scmi_protocol_handle
*ph
)
1285 struct scmi_perf_info
*pinfo
;
1287 ret
= ph
->xops
->version_get(ph
, &version
);
1291 dev_dbg(ph
->dev
, "Performance Version %d.%d\n",
1292 PROTOCOL_REV_MAJOR(version
), PROTOCOL_REV_MINOR(version
));
1294 pinfo
= devm_kzalloc(ph
->dev
, sizeof(*pinfo
), GFP_KERNEL
);
1298 pinfo
->version
= version
;
1300 ret
= scmi_perf_attributes_get(ph
, pinfo
);
1304 pinfo
->dom_info
= devm_kcalloc(ph
->dev
, pinfo
->num_domains
,
1305 sizeof(*pinfo
->dom_info
), GFP_KERNEL
);
1306 if (!pinfo
->dom_info
)
1309 for (domain
= 0; domain
< pinfo
->num_domains
; domain
++) {
1310 struct perf_dom_info
*dom
= pinfo
->dom_info
+ domain
;
1313 scmi_perf_domain_attributes_get(ph
, dom
, pinfo
->notify_lim_cmd
,
1314 pinfo
->notify_lvl_cmd
, version
);
1315 scmi_perf_describe_levels_get(ph
, dom
, version
);
1317 if (dom
->perf_fastchannels
)
1318 scmi_perf_domain_init_fc(ph
, dom
);
1321 ret
= devm_add_action_or_reset(ph
->dev
, scmi_perf_xa_destroy
, pinfo
);
1325 return ph
->set_priv(ph
, pinfo
, version
);
1328 static const struct scmi_protocol scmi_perf
= {
1329 .id
= SCMI_PROTOCOL_PERF
,
1330 .owner
= THIS_MODULE
,
1331 .instance_init
= &scmi_perf_protocol_init
,
1332 .ops
= &perf_proto_ops
,
1333 .events
= &perf_protocol_events
,
1334 .supported_version
= SCMI_PROTOCOL_SUPPORTED_VERSION
,
1337 DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(perf
, scmi_perf
)