1 // SPDX-License-Identifier: GPL-2.0
3 * System Control and Management Interface (SCMI) Performance Protocol
5 * Copyright (C) 2018 ARM Ltd.
8 #include <linux/bits.h>
11 #include <linux/io-64-nonatomic-hi-lo.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_opp.h>
14 #include <linux/sort.h>
18 enum scmi_performance_protocol_cmd
{
19 PERF_DOMAIN_ATTRIBUTES
= 0x3,
20 PERF_DESCRIBE_LEVELS
= 0x4,
21 PERF_LIMITS_SET
= 0x5,
22 PERF_LIMITS_GET
= 0x6,
25 PERF_NOTIFY_LIMITS
= 0x9,
26 PERF_NOTIFY_LEVEL
= 0xa,
27 PERF_DESCRIBE_FASTCHANNEL
= 0xb,
36 struct scmi_msg_resp_perf_attributes
{
39 #define POWER_SCALE_IN_MILLIWATT(x) ((x) & BIT(0))
40 __le32 stats_addr_low
;
41 __le32 stats_addr_high
;
45 struct scmi_msg_resp_perf_domain_attributes
{
47 #define SUPPORTS_SET_LIMITS(x) ((x) & BIT(31))
48 #define SUPPORTS_SET_PERF_LVL(x) ((x) & BIT(30))
49 #define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29))
50 #define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28))
51 #define SUPPORTS_PERF_FASTCHANNELS(x) ((x) & BIT(27))
53 __le32 sustained_freq_khz
;
54 __le32 sustained_perf_level
;
55 u8 name
[SCMI_MAX_STR_SIZE
];
58 struct scmi_msg_perf_describe_levels
{
63 struct scmi_perf_set_limits
{
69 struct scmi_perf_get_limits
{
74 struct scmi_perf_set_level
{
79 struct scmi_perf_notify_level_or_limits
{
84 struct scmi_msg_resp_perf_describe_levels
{
90 __le16 transition_latency_us
;
95 struct scmi_perf_get_fc_info
{
100 struct scmi_msg_resp_perf_desc_fc
{
102 #define SUPPORTS_DOORBELL(x) ((x) & BIT(0))
103 #define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x))
105 __le32 chan_addr_low
;
106 __le32 chan_addr_high
;
112 __le32 db_preserve_lmask
;
113 __le32 db_preserve_hmask
;
116 struct scmi_fc_db_info
{
123 struct scmi_fc_info
{
124 void __iomem
*level_set_addr
;
125 void __iomem
*limit_set_addr
;
126 void __iomem
*level_get_addr
;
127 void __iomem
*limit_get_addr
;
128 struct scmi_fc_db_info
*level_set_db
;
129 struct scmi_fc_db_info
*limit_set_db
;
132 struct perf_dom_info
{
135 bool perf_limit_notify
;
136 bool perf_level_notify
;
137 bool perf_fastchannels
;
139 u32 sustained_freq_khz
;
140 u32 sustained_perf_level
;
142 char name
[SCMI_MAX_STR_SIZE
];
143 struct scmi_opp opp
[MAX_OPPS
];
144 struct scmi_fc_info
*fc_info
;
147 struct scmi_perf_info
{
152 struct perf_dom_info
*dom_info
;
155 static int scmi_perf_attributes_get(const struct scmi_handle
*handle
,
156 struct scmi_perf_info
*pi
)
160 struct scmi_msg_resp_perf_attributes
*attr
;
162 ret
= scmi_xfer_get_init(handle
, PROTOCOL_ATTRIBUTES
,
163 SCMI_PROTOCOL_PERF
, 0, sizeof(*attr
), &t
);
169 ret
= scmi_do_xfer(handle
, t
);
171 u16 flags
= le16_to_cpu(attr
->flags
);
173 pi
->num_domains
= le16_to_cpu(attr
->num_domains
);
174 pi
->power_scale_mw
= POWER_SCALE_IN_MILLIWATT(flags
);
175 pi
->stats_addr
= le32_to_cpu(attr
->stats_addr_low
) |
176 (u64
)le32_to_cpu(attr
->stats_addr_high
) << 32;
177 pi
->stats_size
= le32_to_cpu(attr
->stats_size
);
180 scmi_xfer_put(handle
, t
);
185 scmi_perf_domain_attributes_get(const struct scmi_handle
*handle
, u32 domain
,
186 struct perf_dom_info
*dom_info
)
190 struct scmi_msg_resp_perf_domain_attributes
*attr
;
192 ret
= scmi_xfer_get_init(handle
, PERF_DOMAIN_ATTRIBUTES
,
193 SCMI_PROTOCOL_PERF
, sizeof(domain
),
198 put_unaligned_le32(domain
, t
->tx
.buf
);
201 ret
= scmi_do_xfer(handle
, t
);
203 u32 flags
= le32_to_cpu(attr
->flags
);
205 dom_info
->set_limits
= SUPPORTS_SET_LIMITS(flags
);
206 dom_info
->set_perf
= SUPPORTS_SET_PERF_LVL(flags
);
207 dom_info
->perf_limit_notify
= SUPPORTS_PERF_LIMIT_NOTIFY(flags
);
208 dom_info
->perf_level_notify
= SUPPORTS_PERF_LEVEL_NOTIFY(flags
);
209 dom_info
->perf_fastchannels
= SUPPORTS_PERF_FASTCHANNELS(flags
);
210 dom_info
->sustained_freq_khz
=
211 le32_to_cpu(attr
->sustained_freq_khz
);
212 dom_info
->sustained_perf_level
=
213 le32_to_cpu(attr
->sustained_perf_level
);
214 if (!dom_info
->sustained_freq_khz
||
215 !dom_info
->sustained_perf_level
)
216 /* CPUFreq converts to kHz, hence default 1000 */
217 dom_info
->mult_factor
= 1000;
219 dom_info
->mult_factor
=
220 (dom_info
->sustained_freq_khz
* 1000) /
221 dom_info
->sustained_perf_level
;
222 strlcpy(dom_info
->name
, attr
->name
, SCMI_MAX_STR_SIZE
);
225 scmi_xfer_put(handle
, t
);
229 static int opp_cmp_func(const void *opp1
, const void *opp2
)
231 const struct scmi_opp
*t1
= opp1
, *t2
= opp2
;
233 return t1
->perf
- t2
->perf
;
237 scmi_perf_describe_levels_get(const struct scmi_handle
*handle
, u32 domain
,
238 struct perf_dom_info
*perf_dom
)
242 u16 num_returned
, num_remaining
;
244 struct scmi_opp
*opp
;
245 struct scmi_msg_perf_describe_levels
*dom_info
;
246 struct scmi_msg_resp_perf_describe_levels
*level_info
;
248 ret
= scmi_xfer_get_init(handle
, PERF_DESCRIBE_LEVELS
,
249 SCMI_PROTOCOL_PERF
, sizeof(*dom_info
), 0, &t
);
253 dom_info
= t
->tx
.buf
;
254 level_info
= t
->rx
.buf
;
257 dom_info
->domain
= cpu_to_le32(domain
);
258 /* Set the number of OPPs to be skipped/already read */
259 dom_info
->level_index
= cpu_to_le32(tot_opp_cnt
);
261 ret
= scmi_do_xfer(handle
, t
);
265 num_returned
= le16_to_cpu(level_info
->num_returned
);
266 num_remaining
= le16_to_cpu(level_info
->num_remaining
);
267 if (tot_opp_cnt
+ num_returned
> MAX_OPPS
) {
268 dev_err(handle
->dev
, "No. of OPPs exceeded MAX_OPPS");
272 opp
= &perf_dom
->opp
[tot_opp_cnt
];
273 for (cnt
= 0; cnt
< num_returned
; cnt
++, opp
++) {
274 opp
->perf
= le32_to_cpu(level_info
->opp
[cnt
].perf_val
);
275 opp
->power
= le32_to_cpu(level_info
->opp
[cnt
].power
);
276 opp
->trans_latency_us
= le16_to_cpu
277 (level_info
->opp
[cnt
].transition_latency_us
);
279 dev_dbg(handle
->dev
, "Level %d Power %d Latency %dus\n",
280 opp
->perf
, opp
->power
, opp
->trans_latency_us
);
283 tot_opp_cnt
+= num_returned
;
285 * check for both returned and remaining to avoid infinite
286 * loop due to buggy firmware
288 } while (num_returned
&& num_remaining
);
290 perf_dom
->opp_count
= tot_opp_cnt
;
291 scmi_xfer_put(handle
, t
);
293 sort(perf_dom
->opp
, tot_opp_cnt
, sizeof(*opp
), opp_cmp_func
, NULL
);
297 #define SCMI_PERF_FC_RING_DB(w) \
302 val = ioread##w(db->addr) & db->mask; \
303 iowrite##w((u##w)db->set | val, db->addr); \
306 static void scmi_perf_fc_ring_db(struct scmi_fc_db_info
*db
)
308 if (!db
|| !db
->addr
)
312 SCMI_PERF_FC_RING_DB(8);
313 else if (db
->width
== 2)
314 SCMI_PERF_FC_RING_DB(16);
315 else if (db
->width
== 4)
316 SCMI_PERF_FC_RING_DB(32);
317 else /* db->width == 8 */
319 SCMI_PERF_FC_RING_DB(64);
325 val
= ioread64_hi_lo(db
->addr
) & db
->mask
;
326 iowrite64_hi_lo(db
->set
| val
, db
->addr
);
331 static int scmi_perf_mb_limits_set(const struct scmi_handle
*handle
, u32 domain
,
332 u32 max_perf
, u32 min_perf
)
336 struct scmi_perf_set_limits
*limits
;
338 ret
= scmi_xfer_get_init(handle
, PERF_LIMITS_SET
, SCMI_PROTOCOL_PERF
,
339 sizeof(*limits
), 0, &t
);
344 limits
->domain
= cpu_to_le32(domain
);
345 limits
->max_level
= cpu_to_le32(max_perf
);
346 limits
->min_level
= cpu_to_le32(min_perf
);
348 ret
= scmi_do_xfer(handle
, t
);
350 scmi_xfer_put(handle
, t
);
354 static int scmi_perf_limits_set(const struct scmi_handle
*handle
, u32 domain
,
355 u32 max_perf
, u32 min_perf
)
357 struct scmi_perf_info
*pi
= handle
->perf_priv
;
358 struct perf_dom_info
*dom
= pi
->dom_info
+ domain
;
360 if (dom
->fc_info
&& dom
->fc_info
->limit_set_addr
) {
361 iowrite32(max_perf
, dom
->fc_info
->limit_set_addr
);
362 iowrite32(min_perf
, dom
->fc_info
->limit_set_addr
+ 4);
363 scmi_perf_fc_ring_db(dom
->fc_info
->limit_set_db
);
367 return scmi_perf_mb_limits_set(handle
, domain
, max_perf
, min_perf
);
370 static int scmi_perf_mb_limits_get(const struct scmi_handle
*handle
, u32 domain
,
371 u32
*max_perf
, u32
*min_perf
)
375 struct scmi_perf_get_limits
*limits
;
377 ret
= scmi_xfer_get_init(handle
, PERF_LIMITS_GET
, SCMI_PROTOCOL_PERF
,
378 sizeof(__le32
), 0, &t
);
382 put_unaligned_le32(domain
, t
->tx
.buf
);
384 ret
= scmi_do_xfer(handle
, t
);
388 *max_perf
= le32_to_cpu(limits
->max_level
);
389 *min_perf
= le32_to_cpu(limits
->min_level
);
392 scmi_xfer_put(handle
, t
);
396 static int scmi_perf_limits_get(const struct scmi_handle
*handle
, u32 domain
,
397 u32
*max_perf
, u32
*min_perf
)
399 struct scmi_perf_info
*pi
= handle
->perf_priv
;
400 struct perf_dom_info
*dom
= pi
->dom_info
+ domain
;
402 if (dom
->fc_info
&& dom
->fc_info
->limit_get_addr
) {
403 *max_perf
= ioread32(dom
->fc_info
->limit_get_addr
);
404 *min_perf
= ioread32(dom
->fc_info
->limit_get_addr
+ 4);
408 return scmi_perf_mb_limits_get(handle
, domain
, max_perf
, min_perf
);
411 static int scmi_perf_mb_level_set(const struct scmi_handle
*handle
, u32 domain
,
412 u32 level
, bool poll
)
416 struct scmi_perf_set_level
*lvl
;
418 ret
= scmi_xfer_get_init(handle
, PERF_LEVEL_SET
, SCMI_PROTOCOL_PERF
,
419 sizeof(*lvl
), 0, &t
);
423 t
->hdr
.poll_completion
= poll
;
425 lvl
->domain
= cpu_to_le32(domain
);
426 lvl
->level
= cpu_to_le32(level
);
428 ret
= scmi_do_xfer(handle
, t
);
430 scmi_xfer_put(handle
, t
);
434 static int scmi_perf_level_set(const struct scmi_handle
*handle
, u32 domain
,
435 u32 level
, bool poll
)
437 struct scmi_perf_info
*pi
= handle
->perf_priv
;
438 struct perf_dom_info
*dom
= pi
->dom_info
+ domain
;
440 if (dom
->fc_info
&& dom
->fc_info
->level_set_addr
) {
441 iowrite32(level
, dom
->fc_info
->level_set_addr
);
442 scmi_perf_fc_ring_db(dom
->fc_info
->level_set_db
);
446 return scmi_perf_mb_level_set(handle
, domain
, level
, poll
);
449 static int scmi_perf_mb_level_get(const struct scmi_handle
*handle
, u32 domain
,
450 u32
*level
, bool poll
)
455 ret
= scmi_xfer_get_init(handle
, PERF_LEVEL_GET
, SCMI_PROTOCOL_PERF
,
456 sizeof(u32
), sizeof(u32
), &t
);
460 t
->hdr
.poll_completion
= poll
;
461 put_unaligned_le32(domain
, t
->tx
.buf
);
463 ret
= scmi_do_xfer(handle
, t
);
465 *level
= get_unaligned_le32(t
->rx
.buf
);
467 scmi_xfer_put(handle
, t
);
471 static int scmi_perf_level_get(const struct scmi_handle
*handle
, u32 domain
,
472 u32
*level
, bool poll
)
474 struct scmi_perf_info
*pi
= handle
->perf_priv
;
475 struct perf_dom_info
*dom
= pi
->dom_info
+ domain
;
477 if (dom
->fc_info
&& dom
->fc_info
->level_get_addr
) {
478 *level
= ioread32(dom
->fc_info
->level_get_addr
);
482 return scmi_perf_mb_level_get(handle
, domain
, level
, poll
);
485 static bool scmi_perf_fc_size_is_valid(u32 msg
, u32 size
)
487 if ((msg
== PERF_LEVEL_GET
|| msg
== PERF_LEVEL_SET
) && size
== 4)
489 if ((msg
== PERF_LIMITS_GET
|| msg
== PERF_LIMITS_SET
) && size
== 8)
495 scmi_perf_domain_desc_fc(const struct scmi_handle
*handle
, u32 domain
,
496 u32 message_id
, void __iomem
**p_addr
,
497 struct scmi_fc_db_info
**p_db
)
505 struct scmi_fc_db_info
*db
;
506 struct scmi_perf_get_fc_info
*info
;
507 struct scmi_msg_resp_perf_desc_fc
*resp
;
512 ret
= scmi_xfer_get_init(handle
, PERF_DESCRIBE_FASTCHANNEL
,
514 sizeof(*info
), sizeof(*resp
), &t
);
519 info
->domain
= cpu_to_le32(domain
);
520 info
->message_id
= cpu_to_le32(message_id
);
522 ret
= scmi_do_xfer(handle
, t
);
527 flags
= le32_to_cpu(resp
->attr
);
528 size
= le32_to_cpu(resp
->chan_size
);
529 if (!scmi_perf_fc_size_is_valid(message_id
, size
))
532 phys_addr
= le32_to_cpu(resp
->chan_addr_low
);
533 phys_addr
|= (u64
)le32_to_cpu(resp
->chan_addr_high
) << 32;
534 addr
= devm_ioremap(handle
->dev
, phys_addr
, size
);
539 if (p_db
&& SUPPORTS_DOORBELL(flags
)) {
540 db
= devm_kzalloc(handle
->dev
, sizeof(*db
), GFP_KERNEL
);
544 size
= 1 << DOORBELL_REG_WIDTH(flags
);
545 phys_addr
= le32_to_cpu(resp
->db_addr_low
);
546 phys_addr
|= (u64
)le32_to_cpu(resp
->db_addr_high
) << 32;
547 addr
= devm_ioremap(handle
->dev
, phys_addr
, size
);
553 db
->set
= le32_to_cpu(resp
->db_set_lmask
);
554 db
->set
|= (u64
)le32_to_cpu(resp
->db_set_hmask
) << 32;
555 db
->mask
= le32_to_cpu(resp
->db_preserve_lmask
);
556 db
->mask
|= (u64
)le32_to_cpu(resp
->db_preserve_hmask
) << 32;
560 scmi_xfer_put(handle
, t
);
563 static void scmi_perf_domain_init_fc(const struct scmi_handle
*handle
,
564 u32 domain
, struct scmi_fc_info
**p_fc
)
566 struct scmi_fc_info
*fc
;
568 fc
= devm_kzalloc(handle
->dev
, sizeof(*fc
), GFP_KERNEL
);
572 scmi_perf_domain_desc_fc(handle
, domain
, PERF_LEVEL_SET
,
573 &fc
->level_set_addr
, &fc
->level_set_db
);
574 scmi_perf_domain_desc_fc(handle
, domain
, PERF_LEVEL_GET
,
575 &fc
->level_get_addr
, NULL
);
576 scmi_perf_domain_desc_fc(handle
, domain
, PERF_LIMITS_SET
,
577 &fc
->limit_set_addr
, &fc
->limit_set_db
);
578 scmi_perf_domain_desc_fc(handle
, domain
, PERF_LIMITS_GET
,
579 &fc
->limit_get_addr
, NULL
);
583 /* Device specific ops */
584 static int scmi_dev_domain_id(struct device
*dev
)
586 struct of_phandle_args clkspec
;
588 if (of_parse_phandle_with_args(dev
->of_node
, "clocks", "#clock-cells",
592 return clkspec
.args
[0];
595 static int scmi_dvfs_device_opps_add(const struct scmi_handle
*handle
,
598 int idx
, ret
, domain
;
600 struct scmi_opp
*opp
;
601 struct perf_dom_info
*dom
;
602 struct scmi_perf_info
*pi
= handle
->perf_priv
;
604 domain
= scmi_dev_domain_id(dev
);
608 dom
= pi
->dom_info
+ domain
;
610 for (opp
= dom
->opp
, idx
= 0; idx
< dom
->opp_count
; idx
++, opp
++) {
611 freq
= opp
->perf
* dom
->mult_factor
;
613 ret
= dev_pm_opp_add(dev
, freq
, 0);
615 dev_warn(dev
, "failed to add opp %luHz\n", freq
);
618 freq
= (--opp
)->perf
* dom
->mult_factor
;
619 dev_pm_opp_remove(dev
, freq
);
627 static int scmi_dvfs_transition_latency_get(const struct scmi_handle
*handle
,
630 struct perf_dom_info
*dom
;
631 struct scmi_perf_info
*pi
= handle
->perf_priv
;
632 int domain
= scmi_dev_domain_id(dev
);
637 dom
= pi
->dom_info
+ domain
;
639 return dom
->opp
[dom
->opp_count
- 1].trans_latency_us
* 1000;
642 static int scmi_dvfs_freq_set(const struct scmi_handle
*handle
, u32 domain
,
643 unsigned long freq
, bool poll
)
645 struct scmi_perf_info
*pi
= handle
->perf_priv
;
646 struct perf_dom_info
*dom
= pi
->dom_info
+ domain
;
648 return scmi_perf_level_set(handle
, domain
, freq
/ dom
->mult_factor
,
652 static int scmi_dvfs_freq_get(const struct scmi_handle
*handle
, u32 domain
,
653 unsigned long *freq
, bool poll
)
657 struct scmi_perf_info
*pi
= handle
->perf_priv
;
658 struct perf_dom_info
*dom
= pi
->dom_info
+ domain
;
660 ret
= scmi_perf_level_get(handle
, domain
, &level
, poll
);
662 *freq
= level
* dom
->mult_factor
;
667 static int scmi_dvfs_est_power_get(const struct scmi_handle
*handle
, u32 domain
,
668 unsigned long *freq
, unsigned long *power
)
670 struct scmi_perf_info
*pi
= handle
->perf_priv
;
671 struct perf_dom_info
*dom
;
672 unsigned long opp_freq
;
673 int idx
, ret
= -EINVAL
;
674 struct scmi_opp
*opp
;
676 dom
= pi
->dom_info
+ domain
;
680 for (opp
= dom
->opp
, idx
= 0; idx
< dom
->opp_count
; idx
++, opp
++) {
681 opp_freq
= opp
->perf
* dom
->mult_factor
;
682 if (opp_freq
< *freq
)
694 static struct scmi_perf_ops perf_ops
= {
695 .limits_set
= scmi_perf_limits_set
,
696 .limits_get
= scmi_perf_limits_get
,
697 .level_set
= scmi_perf_level_set
,
698 .level_get
= scmi_perf_level_get
,
699 .device_domain_id
= scmi_dev_domain_id
,
700 .transition_latency_get
= scmi_dvfs_transition_latency_get
,
701 .device_opps_add
= scmi_dvfs_device_opps_add
,
702 .freq_set
= scmi_dvfs_freq_set
,
703 .freq_get
= scmi_dvfs_freq_get
,
704 .est_power_get
= scmi_dvfs_est_power_get
,
707 static int scmi_perf_protocol_init(struct scmi_handle
*handle
)
711 struct scmi_perf_info
*pinfo
;
713 scmi_version_get(handle
, SCMI_PROTOCOL_PERF
, &version
);
715 dev_dbg(handle
->dev
, "Performance Version %d.%d\n",
716 PROTOCOL_REV_MAJOR(version
), PROTOCOL_REV_MINOR(version
));
718 pinfo
= devm_kzalloc(handle
->dev
, sizeof(*pinfo
), GFP_KERNEL
);
722 scmi_perf_attributes_get(handle
, pinfo
);
724 pinfo
->dom_info
= devm_kcalloc(handle
->dev
, pinfo
->num_domains
,
725 sizeof(*pinfo
->dom_info
), GFP_KERNEL
);
726 if (!pinfo
->dom_info
)
729 for (domain
= 0; domain
< pinfo
->num_domains
; domain
++) {
730 struct perf_dom_info
*dom
= pinfo
->dom_info
+ domain
;
732 scmi_perf_domain_attributes_get(handle
, domain
, dom
);
733 scmi_perf_describe_levels_get(handle
, domain
, dom
);
735 if (dom
->perf_fastchannels
)
736 scmi_perf_domain_init_fc(handle
, domain
, &dom
->fc_info
);
739 handle
->perf_ops
= &perf_ops
;
740 handle
->perf_priv
= pinfo
;
745 static int __init
scmi_perf_init(void)
747 return scmi_protocol_register(SCMI_PROTOCOL_PERF
,
748 &scmi_perf_protocol_init
);
750 subsys_initcall(scmi_perf_init
);