1 // SPDX-License-Identifier: GPL-2.0-only
3 * RISC-V SBI CPU idle driver.
5 * Copyright (c) 2021 Western Digital Corporation or its affiliates.
6 * Copyright (c) 2022 Ventana Micro Systems Inc.
9 #define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt
11 #include <linux/cleanup.h>
12 #include <linux/cpuhotplug.h>
13 #include <linux/cpuidle.h>
14 #include <linux/cpumask.h>
15 #include <linux/cpu_pm.h>
16 #include <linux/cpu_cooling.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
20 #include <linux/slab.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_domain.h>
23 #include <linux/pm_runtime.h>
24 #include <asm/cpuidle.h>
27 #include <asm/suspend.h>
29 #include "dt_idle_states.h"
30 #include "dt_idle_genpd.h"
32 struct sbi_cpuidle_data
{
37 struct sbi_domain_state
{
42 static DEFINE_PER_CPU_READ_MOSTLY(struct sbi_cpuidle_data
, sbi_cpuidle_data
);
43 static DEFINE_PER_CPU(struct sbi_domain_state
, domain_state
);
44 static bool sbi_cpuidle_use_osi
;
45 static bool sbi_cpuidle_use_cpuhp
;
46 static bool sbi_cpuidle_pd_allow_domain_state
;
48 static inline void sbi_set_domain_state(u32 state
)
50 struct sbi_domain_state
*data
= this_cpu_ptr(&domain_state
);
52 data
->available
= true;
56 static inline u32
sbi_get_domain_state(void)
58 struct sbi_domain_state
*data
= this_cpu_ptr(&domain_state
);
63 static inline void sbi_clear_domain_state(void)
65 struct sbi_domain_state
*data
= this_cpu_ptr(&domain_state
);
67 data
->available
= false;
70 static inline bool sbi_is_domain_state_available(void)
72 struct sbi_domain_state
*data
= this_cpu_ptr(&domain_state
);
74 return data
->available
;
77 static __cpuidle
int sbi_cpuidle_enter_state(struct cpuidle_device
*dev
,
78 struct cpuidle_driver
*drv
, int idx
)
80 u32
*states
= __this_cpu_read(sbi_cpuidle_data
.states
);
81 u32 state
= states
[idx
];
83 if (state
& SBI_HSM_SUSP_NON_RET_BIT
)
84 return CPU_PM_CPU_IDLE_ENTER_PARAM(riscv_sbi_hart_suspend
, idx
, state
);
86 return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(riscv_sbi_hart_suspend
,
90 static __cpuidle
int __sbi_enter_domain_idle_state(struct cpuidle_device
*dev
,
91 struct cpuidle_driver
*drv
, int idx
,
94 struct sbi_cpuidle_data
*data
= this_cpu_ptr(&sbi_cpuidle_data
);
95 u32
*states
= data
->states
;
96 struct device
*pd_dev
= data
->dev
;
100 ret
= cpu_pm_enter();
104 /* Do runtime PM to manage a hierarchical CPU toplogy. */
106 dev_pm_genpd_suspend(pd_dev
);
108 pm_runtime_put_sync_suspend(pd_dev
);
112 if (sbi_is_domain_state_available())
113 state
= sbi_get_domain_state();
117 ret
= riscv_sbi_hart_suspend(state
) ? -1 : idx
;
122 dev_pm_genpd_resume(pd_dev
);
124 pm_runtime_get_sync(pd_dev
);
128 /* Clear the domain state to start fresh when back from idle. */
129 sbi_clear_domain_state();
133 static int sbi_enter_domain_idle_state(struct cpuidle_device
*dev
,
134 struct cpuidle_driver
*drv
, int idx
)
136 return __sbi_enter_domain_idle_state(dev
, drv
, idx
, false);
139 static int sbi_enter_s2idle_domain_idle_state(struct cpuidle_device
*dev
,
140 struct cpuidle_driver
*drv
,
143 return __sbi_enter_domain_idle_state(dev
, drv
, idx
, true);
146 static int sbi_cpuidle_cpuhp_up(unsigned int cpu
)
148 struct device
*pd_dev
= __this_cpu_read(sbi_cpuidle_data
.dev
);
151 pm_runtime_get_sync(pd_dev
);
156 static int sbi_cpuidle_cpuhp_down(unsigned int cpu
)
158 struct device
*pd_dev
= __this_cpu_read(sbi_cpuidle_data
.dev
);
161 pm_runtime_put_sync(pd_dev
);
162 /* Clear domain state to start fresh at next online. */
163 sbi_clear_domain_state();
169 static void sbi_idle_init_cpuhp(void)
173 if (!sbi_cpuidle_use_cpuhp
)
176 err
= cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING
,
177 "cpuidle/sbi:online",
178 sbi_cpuidle_cpuhp_up
,
179 sbi_cpuidle_cpuhp_down
);
181 pr_warn("Failed %d while setup cpuhp state\n", err
);
184 static const struct of_device_id sbi_cpuidle_state_match
[] = {
185 { .compatible
= "riscv,idle-state",
186 .data
= sbi_cpuidle_enter_state
},
190 static int sbi_dt_parse_state_node(struct device_node
*np
, u32
*state
)
192 int err
= of_property_read_u32(np
, "riscv,sbi-suspend-param", state
);
195 pr_warn("%pOF missing riscv,sbi-suspend-param property\n", np
);
199 if (!riscv_sbi_suspend_state_is_valid(*state
)) {
200 pr_warn("Invalid SBI suspend state %#x\n", *state
);
207 static int sbi_dt_cpu_init_topology(struct cpuidle_driver
*drv
,
208 struct sbi_cpuidle_data
*data
,
209 unsigned int state_count
, int cpu
)
211 /* Currently limit the hierarchical topology to be used in OSI mode. */
212 if (!sbi_cpuidle_use_osi
)
215 data
->dev
= dt_idle_attach_cpu(cpu
, "sbi");
216 if (IS_ERR_OR_NULL(data
->dev
))
217 return PTR_ERR_OR_ZERO(data
->dev
);
220 * Using the deepest state for the CPU to trigger a potential selection
221 * of a shared state for the domain, assumes the domain states are all
224 drv
->states
[state_count
- 1].flags
|= CPUIDLE_FLAG_RCU_IDLE
;
225 drv
->states
[state_count
- 1].enter
= sbi_enter_domain_idle_state
;
226 drv
->states
[state_count
- 1].enter_s2idle
=
227 sbi_enter_s2idle_domain_idle_state
;
228 sbi_cpuidle_use_cpuhp
= true;
233 static int sbi_cpuidle_dt_init_states(struct device
*dev
,
234 struct cpuidle_driver
*drv
,
236 unsigned int state_count
)
238 struct sbi_cpuidle_data
*data
= per_cpu_ptr(&sbi_cpuidle_data
, cpu
);
239 struct device_node
*state_node
;
243 struct device_node
*cpu_node
__free(device_node
) = of_cpu_device_node_get(cpu
);
247 states
= devm_kcalloc(dev
, state_count
, sizeof(*states
), GFP_KERNEL
);
251 /* Parse SBI specific details from state DT nodes */
252 for (i
= 1; i
< state_count
; i
++) {
253 state_node
= of_get_cpu_state_node(cpu_node
, i
- 1);
257 ret
= sbi_dt_parse_state_node(state_node
, &states
[i
]);
258 of_node_put(state_node
);
263 pr_debug("sbi-state %#x index %d\n", states
[i
], i
);
265 if (i
!= state_count
)
268 /* Initialize optional data, used for the hierarchical topology. */
269 ret
= sbi_dt_cpu_init_topology(drv
, data
, state_count
, cpu
);
273 /* Store states in the per-cpu struct. */
274 data
->states
= states
;
279 static void sbi_cpuidle_deinit_cpu(int cpu
)
281 struct sbi_cpuidle_data
*data
= per_cpu_ptr(&sbi_cpuidle_data
, cpu
);
283 dt_idle_detach_cpu(data
->dev
);
284 sbi_cpuidle_use_cpuhp
= false;
287 static int sbi_cpuidle_init_cpu(struct device
*dev
, int cpu
)
289 struct cpuidle_driver
*drv
;
290 unsigned int state_count
= 0;
293 drv
= devm_kzalloc(dev
, sizeof(*drv
), GFP_KERNEL
);
297 drv
->name
= "sbi_cpuidle";
298 drv
->owner
= THIS_MODULE
;
299 drv
->cpumask
= (struct cpumask
*)cpumask_of(cpu
);
301 /* RISC-V architectural WFI to be represented as state index 0. */
302 drv
->states
[0].enter
= sbi_cpuidle_enter_state
;
303 drv
->states
[0].exit_latency
= 1;
304 drv
->states
[0].target_residency
= 1;
305 drv
->states
[0].power_usage
= UINT_MAX
;
306 strcpy(drv
->states
[0].name
, "WFI");
307 strcpy(drv
->states
[0].desc
, "RISC-V WFI");
310 * If no DT idle states are detected (ret == 0) let the driver
311 * initialization fail accordingly since there is no reason to
312 * initialize the idle driver if only wfi is supported, the
313 * default archictectural back-end already executes wfi
316 ret
= dt_init_idle_driver(drv
, sbi_cpuidle_state_match
, 1);
318 pr_debug("HART%ld: failed to parse DT idle states\n",
319 cpuid_to_hartid_map(cpu
));
320 return ret
? : -ENODEV
;
322 state_count
= ret
+ 1; /* Include WFI state as well */
324 /* Initialize idle states from DT. */
325 ret
= sbi_cpuidle_dt_init_states(dev
, drv
, cpu
, state_count
);
327 pr_err("HART%ld: failed to init idle states\n",
328 cpuid_to_hartid_map(cpu
));
332 ret
= cpuidle_register(drv
, NULL
);
336 cpuidle_cooling_register(drv
);
340 sbi_cpuidle_deinit_cpu(cpu
);
344 static void sbi_cpuidle_domain_sync_state(struct device
*dev
)
347 * All devices have now been attached/probed to the PM domain
348 * topology, hence it's fine to allow domain states to be picked.
350 sbi_cpuidle_pd_allow_domain_state
= true;
353 #ifdef CONFIG_DT_IDLE_GENPD
355 static int sbi_cpuidle_pd_power_off(struct generic_pm_domain
*pd
)
357 struct genpd_power_state
*state
= &pd
->states
[pd
->state_idx
];
363 if (!sbi_cpuidle_pd_allow_domain_state
)
366 /* OSI mode is enabled, set the corresponding domain state. */
367 pd_state
= state
->data
;
368 sbi_set_domain_state(*pd_state
);
373 struct sbi_pd_provider
{
374 struct list_head link
;
375 struct device_node
*node
;
378 static LIST_HEAD(sbi_pd_providers
);
380 static int sbi_pd_init(struct device_node
*np
)
382 struct generic_pm_domain
*pd
;
383 struct sbi_pd_provider
*pd_provider
;
384 struct dev_power_governor
*pd_gov
;
387 pd
= dt_idle_pd_alloc(np
, sbi_dt_parse_state_node
);
391 pd_provider
= kzalloc(sizeof(*pd_provider
), GFP_KERNEL
);
395 pd
->flags
|= GENPD_FLAG_IRQ_SAFE
| GENPD_FLAG_CPU_DOMAIN
;
397 /* Allow power off when OSI is available. */
398 if (sbi_cpuidle_use_osi
)
399 pd
->power_off
= sbi_cpuidle_pd_power_off
;
401 pd
->flags
|= GENPD_FLAG_ALWAYS_ON
;
403 /* Use governor for CPU PM domains if it has some states to manage. */
404 pd_gov
= pd
->states
? &pm_domain_cpu_gov
: NULL
;
406 ret
= pm_genpd_init(pd
, pd_gov
, false);
410 ret
= of_genpd_add_provider_simple(np
, pd
);
414 pd_provider
->node
= of_node_get(np
);
415 list_add(&pd_provider
->link
, &sbi_pd_providers
);
417 pr_debug("init PM domain %s\n", pd
->name
);
427 pr_err("failed to init PM domain ret=%d %pOF\n", ret
, np
);
431 static void sbi_pd_remove(void)
433 struct sbi_pd_provider
*pd_provider
, *it
;
434 struct generic_pm_domain
*genpd
;
436 list_for_each_entry_safe(pd_provider
, it
, &sbi_pd_providers
, link
) {
437 of_genpd_del_provider(pd_provider
->node
);
439 genpd
= of_genpd_remove_last(pd_provider
->node
);
443 of_node_put(pd_provider
->node
);
444 list_del(&pd_provider
->link
);
449 static int sbi_genpd_probe(struct device_node
*np
)
451 int ret
= 0, pd_count
= 0;
457 * Parse child nodes for the "#power-domain-cells" property and
458 * initialize a genpd/genpd-of-provider pair when it's found.
460 for_each_child_of_node_scoped(np
, node
) {
461 if (!of_property_present(node
, "#power-domain-cells"))
464 ret
= sbi_pd_init(node
);
471 /* Bail out if not using the hierarchical CPU topology. */
475 /* Link genpd masters/subdomains to model the CPU topology. */
476 ret
= dt_idle_pd_init_topology(np
);
484 pr_err("failed to create CPU PM domains ret=%d\n", ret
);
491 static inline int sbi_genpd_probe(struct device_node
*np
)
498 static int sbi_cpuidle_probe(struct platform_device
*pdev
)
501 struct cpuidle_driver
*drv
;
502 struct cpuidle_device
*dev
;
503 struct device_node
*np
, *pds_node
;
505 /* Detect OSI support based on CPU DT nodes */
506 sbi_cpuidle_use_osi
= true;
507 for_each_possible_cpu(cpu
) {
508 np
= of_cpu_device_node_get(cpu
);
510 of_property_present(np
, "power-domains") &&
511 of_property_present(np
, "power-domain-names")) {
514 sbi_cpuidle_use_osi
= false;
519 /* Populate generic power domains from DT nodes */
520 pds_node
= of_find_node_by_path("/cpus/power-domains");
522 ret
= sbi_genpd_probe(pds_node
);
523 of_node_put(pds_node
);
528 /* Initialize CPU idle driver for each CPU */
529 for_each_possible_cpu(cpu
) {
530 ret
= sbi_cpuidle_init_cpu(&pdev
->dev
, cpu
);
532 pr_debug("HART%ld: idle driver init failed\n",
533 cpuid_to_hartid_map(cpu
));
538 /* Setup CPU hotplut notifiers */
539 sbi_idle_init_cpuhp();
541 pr_info("idle driver registered for all CPUs\n");
547 dev
= per_cpu(cpuidle_devices
, cpu
);
548 drv
= cpuidle_get_cpu_driver(dev
);
549 cpuidle_unregister(drv
);
550 sbi_cpuidle_deinit_cpu(cpu
);
556 static struct platform_driver sbi_cpuidle_driver
= {
557 .probe
= sbi_cpuidle_probe
,
559 .name
= "sbi-cpuidle",
560 .sync_state
= sbi_cpuidle_domain_sync_state
,
564 static int __init
sbi_cpuidle_init(void)
567 struct platform_device
*pdev
;
569 if (!riscv_sbi_hsm_is_supported())
572 ret
= platform_driver_register(&sbi_cpuidle_driver
);
576 pdev
= platform_device_register_simple("sbi-cpuidle",
579 platform_driver_unregister(&sbi_cpuidle_driver
);
580 return PTR_ERR(pdev
);
585 device_initcall(sbi_cpuidle_init
);