1 // SPDX-License-Identifier: GPL-2.0
3 * System Control and Management Interface (SCMI) Performance Protocol
5 * Copyright (C) 2018 ARM Ltd.
9 #include <linux/platform_device.h>
10 #include <linux/pm_opp.h>
11 #include <linux/sort.h>
15 enum scmi_performance_protocol_cmd
{
16 PERF_DOMAIN_ATTRIBUTES
= 0x3,
17 PERF_DESCRIBE_LEVELS
= 0x4,
18 PERF_LIMITS_SET
= 0x5,
19 PERF_LIMITS_GET
= 0x6,
22 PERF_NOTIFY_LIMITS
= 0x9,
23 PERF_NOTIFY_LEVEL
= 0xa,
32 struct scmi_msg_resp_perf_attributes
{
35 #define POWER_SCALE_IN_MILLIWATT(x) ((x) & BIT(0))
36 __le32 stats_addr_low
;
37 __le32 stats_addr_high
;
41 struct scmi_msg_resp_perf_domain_attributes
{
43 #define SUPPORTS_SET_LIMITS(x) ((x) & BIT(31))
44 #define SUPPORTS_SET_PERF_LVL(x) ((x) & BIT(30))
45 #define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29))
46 #define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28))
48 __le32 sustained_freq_khz
;
49 __le32 sustained_perf_level
;
50 u8 name
[SCMI_MAX_STR_SIZE
];
53 struct scmi_msg_perf_describe_levels
{
58 struct scmi_perf_set_limits
{
64 struct scmi_perf_get_limits
{
69 struct scmi_perf_set_level
{
74 struct scmi_perf_notify_level_or_limits
{
79 struct scmi_msg_resp_perf_describe_levels
{
85 __le16 transition_latency_us
;
90 struct perf_dom_info
{
93 bool perf_limit_notify
;
94 bool perf_level_notify
;
96 u32 sustained_freq_khz
;
97 u32 sustained_perf_level
;
99 char name
[SCMI_MAX_STR_SIZE
];
100 struct scmi_opp opp
[MAX_OPPS
];
103 struct scmi_perf_info
{
108 struct perf_dom_info
*dom_info
;
111 static int scmi_perf_attributes_get(const struct scmi_handle
*handle
,
112 struct scmi_perf_info
*pi
)
116 struct scmi_msg_resp_perf_attributes
*attr
;
118 ret
= scmi_xfer_get_init(handle
, PROTOCOL_ATTRIBUTES
,
119 SCMI_PROTOCOL_PERF
, 0, sizeof(*attr
), &t
);
125 ret
= scmi_do_xfer(handle
, t
);
127 u16 flags
= le16_to_cpu(attr
->flags
);
129 pi
->num_domains
= le16_to_cpu(attr
->num_domains
);
130 pi
->power_scale_mw
= POWER_SCALE_IN_MILLIWATT(flags
);
131 pi
->stats_addr
= le32_to_cpu(attr
->stats_addr_low
) |
132 (u64
)le32_to_cpu(attr
->stats_addr_high
) << 32;
133 pi
->stats_size
= le32_to_cpu(attr
->stats_size
);
136 scmi_xfer_put(handle
, t
);
141 scmi_perf_domain_attributes_get(const struct scmi_handle
*handle
, u32 domain
,
142 struct perf_dom_info
*dom_info
)
146 struct scmi_msg_resp_perf_domain_attributes
*attr
;
148 ret
= scmi_xfer_get_init(handle
, PERF_DOMAIN_ATTRIBUTES
,
149 SCMI_PROTOCOL_PERF
, sizeof(domain
),
154 *(__le32
*)t
->tx
.buf
= cpu_to_le32(domain
);
157 ret
= scmi_do_xfer(handle
, t
);
159 u32 flags
= le32_to_cpu(attr
->flags
);
161 dom_info
->set_limits
= SUPPORTS_SET_LIMITS(flags
);
162 dom_info
->set_perf
= SUPPORTS_SET_PERF_LVL(flags
);
163 dom_info
->perf_limit_notify
= SUPPORTS_PERF_LIMIT_NOTIFY(flags
);
164 dom_info
->perf_level_notify
= SUPPORTS_PERF_LEVEL_NOTIFY(flags
);
165 dom_info
->sustained_freq_khz
=
166 le32_to_cpu(attr
->sustained_freq_khz
);
167 dom_info
->sustained_perf_level
=
168 le32_to_cpu(attr
->sustained_perf_level
);
169 dom_info
->mult_factor
= (dom_info
->sustained_freq_khz
* 1000) /
170 dom_info
->sustained_perf_level
;
171 memcpy(dom_info
->name
, attr
->name
, SCMI_MAX_STR_SIZE
);
174 scmi_xfer_put(handle
, t
);
178 static int opp_cmp_func(const void *opp1
, const void *opp2
)
180 const struct scmi_opp
*t1
= opp1
, *t2
= opp2
;
182 return t1
->perf
- t2
->perf
;
186 scmi_perf_describe_levels_get(const struct scmi_handle
*handle
, u32 domain
,
187 struct perf_dom_info
*perf_dom
)
191 u16 num_returned
, num_remaining
;
193 struct scmi_opp
*opp
;
194 struct scmi_msg_perf_describe_levels
*dom_info
;
195 struct scmi_msg_resp_perf_describe_levels
*level_info
;
197 ret
= scmi_xfer_get_init(handle
, PERF_DESCRIBE_LEVELS
,
198 SCMI_PROTOCOL_PERF
, sizeof(*dom_info
), 0, &t
);
202 dom_info
= t
->tx
.buf
;
203 level_info
= t
->rx
.buf
;
206 dom_info
->domain
= cpu_to_le32(domain
);
207 /* Set the number of OPPs to be skipped/already read */
208 dom_info
->level_index
= cpu_to_le32(tot_opp_cnt
);
210 ret
= scmi_do_xfer(handle
, t
);
214 num_returned
= le16_to_cpu(level_info
->num_returned
);
215 num_remaining
= le16_to_cpu(level_info
->num_remaining
);
216 if (tot_opp_cnt
+ num_returned
> MAX_OPPS
) {
217 dev_err(handle
->dev
, "No. of OPPs exceeded MAX_OPPS");
221 opp
= &perf_dom
->opp
[tot_opp_cnt
];
222 for (cnt
= 0; cnt
< num_returned
; cnt
++, opp
++) {
223 opp
->perf
= le32_to_cpu(level_info
->opp
[cnt
].perf_val
);
224 opp
->power
= le32_to_cpu(level_info
->opp
[cnt
].power
);
225 opp
->trans_latency_us
= le16_to_cpu
226 (level_info
->opp
[cnt
].transition_latency_us
);
228 dev_dbg(handle
->dev
, "Level %d Power %d Latency %dus\n",
229 opp
->perf
, opp
->power
, opp
->trans_latency_us
);
232 tot_opp_cnt
+= num_returned
;
234 * check for both returned and remaining to avoid infinite
235 * loop due to buggy firmware
237 } while (num_returned
&& num_remaining
);
239 perf_dom
->opp_count
= tot_opp_cnt
;
240 scmi_xfer_put(handle
, t
);
242 sort(perf_dom
->opp
, tot_opp_cnt
, sizeof(*opp
), opp_cmp_func
, NULL
);
246 static int scmi_perf_limits_set(const struct scmi_handle
*handle
, u32 domain
,
247 u32 max_perf
, u32 min_perf
)
251 struct scmi_perf_set_limits
*limits
;
253 ret
= scmi_xfer_get_init(handle
, PERF_LIMITS_SET
, SCMI_PROTOCOL_PERF
,
254 sizeof(*limits
), 0, &t
);
259 limits
->domain
= cpu_to_le32(domain
);
260 limits
->max_level
= cpu_to_le32(max_perf
);
261 limits
->min_level
= cpu_to_le32(min_perf
);
263 ret
= scmi_do_xfer(handle
, t
);
265 scmi_xfer_put(handle
, t
);
269 static int scmi_perf_limits_get(const struct scmi_handle
*handle
, u32 domain
,
270 u32
*max_perf
, u32
*min_perf
)
274 struct scmi_perf_get_limits
*limits
;
276 ret
= scmi_xfer_get_init(handle
, PERF_LIMITS_GET
, SCMI_PROTOCOL_PERF
,
277 sizeof(__le32
), 0, &t
);
281 *(__le32
*)t
->tx
.buf
= cpu_to_le32(domain
);
283 ret
= scmi_do_xfer(handle
, t
);
287 *max_perf
= le32_to_cpu(limits
->max_level
);
288 *min_perf
= le32_to_cpu(limits
->min_level
);
291 scmi_xfer_put(handle
, t
);
295 static int scmi_perf_level_set(const struct scmi_handle
*handle
, u32 domain
,
296 u32 level
, bool poll
)
300 struct scmi_perf_set_level
*lvl
;
302 ret
= scmi_xfer_get_init(handle
, PERF_LEVEL_SET
, SCMI_PROTOCOL_PERF
,
303 sizeof(*lvl
), 0, &t
);
307 t
->hdr
.poll_completion
= poll
;
309 lvl
->domain
= cpu_to_le32(domain
);
310 lvl
->level
= cpu_to_le32(level
);
312 ret
= scmi_do_xfer(handle
, t
);
314 scmi_xfer_put(handle
, t
);
318 static int scmi_perf_level_get(const struct scmi_handle
*handle
, u32 domain
,
319 u32
*level
, bool poll
)
324 ret
= scmi_xfer_get_init(handle
, PERF_LEVEL_GET
, SCMI_PROTOCOL_PERF
,
325 sizeof(u32
), sizeof(u32
), &t
);
329 t
->hdr
.poll_completion
= poll
;
330 *(__le32
*)t
->tx
.buf
= cpu_to_le32(domain
);
332 ret
= scmi_do_xfer(handle
, t
);
334 *level
= le32_to_cpu(*(__le32
*)t
->rx
.buf
);
336 scmi_xfer_put(handle
, t
);
340 /* Device specific ops */
341 static int scmi_dev_domain_id(struct device
*dev
)
343 struct of_phandle_args clkspec
;
345 if (of_parse_phandle_with_args(dev
->of_node
, "clocks", "#clock-cells",
349 return clkspec
.args
[0];
352 static int scmi_dvfs_device_opps_add(const struct scmi_handle
*handle
,
355 int idx
, ret
, domain
;
357 struct scmi_opp
*opp
;
358 struct perf_dom_info
*dom
;
359 struct scmi_perf_info
*pi
= handle
->perf_priv
;
361 domain
= scmi_dev_domain_id(dev
);
365 dom
= pi
->dom_info
+ domain
;
369 for (opp
= dom
->opp
, idx
= 0; idx
< dom
->opp_count
; idx
++, opp
++) {
370 freq
= opp
->perf
* dom
->mult_factor
;
372 ret
= dev_pm_opp_add(dev
, freq
, 0);
374 dev_warn(dev
, "failed to add opp %luHz\n", freq
);
377 freq
= (--opp
)->perf
* dom
->mult_factor
;
378 dev_pm_opp_remove(dev
, freq
);
386 static int scmi_dvfs_transition_latency_get(const struct scmi_handle
*handle
,
389 struct perf_dom_info
*dom
;
390 struct scmi_perf_info
*pi
= handle
->perf_priv
;
391 int domain
= scmi_dev_domain_id(dev
);
396 dom
= pi
->dom_info
+ domain
;
401 return dom
->opp
[dom
->opp_count
- 1].trans_latency_us
* 1000;
404 static int scmi_dvfs_freq_set(const struct scmi_handle
*handle
, u32 domain
,
405 unsigned long freq
, bool poll
)
407 struct scmi_perf_info
*pi
= handle
->perf_priv
;
408 struct perf_dom_info
*dom
= pi
->dom_info
+ domain
;
410 return scmi_perf_level_set(handle
, domain
, freq
/ dom
->mult_factor
,
414 static int scmi_dvfs_freq_get(const struct scmi_handle
*handle
, u32 domain
,
415 unsigned long *freq
, bool poll
)
419 struct scmi_perf_info
*pi
= handle
->perf_priv
;
420 struct perf_dom_info
*dom
= pi
->dom_info
+ domain
;
422 ret
= scmi_perf_level_get(handle
, domain
, &level
, poll
);
424 *freq
= level
* dom
->mult_factor
;
429 static struct scmi_perf_ops perf_ops
= {
430 .limits_set
= scmi_perf_limits_set
,
431 .limits_get
= scmi_perf_limits_get
,
432 .level_set
= scmi_perf_level_set
,
433 .level_get
= scmi_perf_level_get
,
434 .device_domain_id
= scmi_dev_domain_id
,
435 .transition_latency_get
= scmi_dvfs_transition_latency_get
,
436 .device_opps_add
= scmi_dvfs_device_opps_add
,
437 .freq_set
= scmi_dvfs_freq_set
,
438 .freq_get
= scmi_dvfs_freq_get
,
441 static int scmi_perf_protocol_init(struct scmi_handle
*handle
)
445 struct scmi_perf_info
*pinfo
;
447 scmi_version_get(handle
, SCMI_PROTOCOL_PERF
, &version
);
449 dev_dbg(handle
->dev
, "Performance Version %d.%d\n",
450 PROTOCOL_REV_MAJOR(version
), PROTOCOL_REV_MINOR(version
));
452 pinfo
= devm_kzalloc(handle
->dev
, sizeof(*pinfo
), GFP_KERNEL
);
456 scmi_perf_attributes_get(handle
, pinfo
);
458 pinfo
->dom_info
= devm_kcalloc(handle
->dev
, pinfo
->num_domains
,
459 sizeof(*pinfo
->dom_info
), GFP_KERNEL
);
460 if (!pinfo
->dom_info
)
463 for (domain
= 0; domain
< pinfo
->num_domains
; domain
++) {
464 struct perf_dom_info
*dom
= pinfo
->dom_info
+ domain
;
466 scmi_perf_domain_attributes_get(handle
, domain
, dom
);
467 scmi_perf_describe_levels_get(handle
, domain
, dom
);
470 handle
->perf_ops
= &perf_ops
;
471 handle
->perf_priv
= pinfo
;
476 static int __init
scmi_perf_init(void)
478 return scmi_protocol_register(SCMI_PROTOCOL_PERF
,
479 &scmi_perf_protocol_init
);
481 subsys_initcall(scmi_perf_init
);