1 // SPDX-License-Identifier: GPL-2.0-only
3 * RISC-V SBI CPU idle driver.
5 * Copyright (c) 2021 Western Digital Corporation or its affiliates.
6 * Copyright (c) 2022 Ventana Micro Systems Inc.
9 #define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt
11 #include <linux/cleanup.h>
12 #include <linux/cpuhotplug.h>
13 #include <linux/cpuidle.h>
14 #include <linux/cpumask.h>
15 #include <linux/cpu_pm.h>
16 #include <linux/cpu_cooling.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
20 #include <linux/slab.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_domain.h>
23 #include <linux/pm_runtime.h>
24 #include <asm/cpuidle.h>
27 #include <asm/suspend.h>
30 #include "dt_idle_states.h"
31 #include "dt_idle_genpd.h"
33 struct sbi_cpuidle_data
{
38 struct sbi_domain_state
{
43 static DEFINE_PER_CPU_READ_MOSTLY(struct sbi_cpuidle_data
, sbi_cpuidle_data
);
44 static DEFINE_PER_CPU(struct sbi_domain_state
, domain_state
);
45 static bool sbi_cpuidle_use_osi
;
46 static bool sbi_cpuidle_use_cpuhp
;
47 static bool sbi_cpuidle_pd_allow_domain_state
;
49 static inline void sbi_set_domain_state(u32 state
)
51 struct sbi_domain_state
*data
= this_cpu_ptr(&domain_state
);
53 data
->available
= true;
57 static inline u32
sbi_get_domain_state(void)
59 struct sbi_domain_state
*data
= this_cpu_ptr(&domain_state
);
64 static inline void sbi_clear_domain_state(void)
66 struct sbi_domain_state
*data
= this_cpu_ptr(&domain_state
);
68 data
->available
= false;
71 static inline bool sbi_is_domain_state_available(void)
73 struct sbi_domain_state
*data
= this_cpu_ptr(&domain_state
);
75 return data
->available
;
78 static __cpuidle
int sbi_cpuidle_enter_state(struct cpuidle_device
*dev
,
79 struct cpuidle_driver
*drv
, int idx
)
81 u32
*states
= __this_cpu_read(sbi_cpuidle_data
.states
);
82 u32 state
= states
[idx
];
84 if (state
& SBI_HSM_SUSP_NON_RET_BIT
)
85 return CPU_PM_CPU_IDLE_ENTER_PARAM(riscv_sbi_hart_suspend
, idx
, state
);
87 return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(riscv_sbi_hart_suspend
,
91 static __cpuidle
int __sbi_enter_domain_idle_state(struct cpuidle_device
*dev
,
92 struct cpuidle_driver
*drv
, int idx
,
95 struct sbi_cpuidle_data
*data
= this_cpu_ptr(&sbi_cpuidle_data
);
96 u32
*states
= data
->states
;
97 struct device
*pd_dev
= data
->dev
;
101 ret
= cpu_pm_enter();
105 /* Do runtime PM to manage a hierarchical CPU toplogy. */
107 dev_pm_genpd_suspend(pd_dev
);
109 pm_runtime_put_sync_suspend(pd_dev
);
113 if (sbi_is_domain_state_available())
114 state
= sbi_get_domain_state();
118 ret
= riscv_sbi_hart_suspend(state
) ? -1 : idx
;
123 dev_pm_genpd_resume(pd_dev
);
125 pm_runtime_get_sync(pd_dev
);
129 /* Clear the domain state to start fresh when back from idle. */
130 sbi_clear_domain_state();
134 static int sbi_enter_domain_idle_state(struct cpuidle_device
*dev
,
135 struct cpuidle_driver
*drv
, int idx
)
137 return __sbi_enter_domain_idle_state(dev
, drv
, idx
, false);
140 static int sbi_enter_s2idle_domain_idle_state(struct cpuidle_device
*dev
,
141 struct cpuidle_driver
*drv
,
144 return __sbi_enter_domain_idle_state(dev
, drv
, idx
, true);
147 static int sbi_cpuidle_cpuhp_up(unsigned int cpu
)
149 struct device
*pd_dev
= __this_cpu_read(sbi_cpuidle_data
.dev
);
152 pm_runtime_get_sync(pd_dev
);
157 static int sbi_cpuidle_cpuhp_down(unsigned int cpu
)
159 struct device
*pd_dev
= __this_cpu_read(sbi_cpuidle_data
.dev
);
162 pm_runtime_put_sync(pd_dev
);
163 /* Clear domain state to start fresh at next online. */
164 sbi_clear_domain_state();
170 static void sbi_idle_init_cpuhp(void)
174 if (!sbi_cpuidle_use_cpuhp
)
177 err
= cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING
,
178 "cpuidle/sbi:online",
179 sbi_cpuidle_cpuhp_up
,
180 sbi_cpuidle_cpuhp_down
);
182 pr_warn("Failed %d while setup cpuhp state\n", err
);
185 static const struct of_device_id sbi_cpuidle_state_match
[] = {
186 { .compatible
= "riscv,idle-state",
187 .data
= sbi_cpuidle_enter_state
},
191 static int sbi_dt_parse_state_node(struct device_node
*np
, u32
*state
)
193 int err
= of_property_read_u32(np
, "riscv,sbi-suspend-param", state
);
196 pr_warn("%pOF missing riscv,sbi-suspend-param property\n", np
);
200 if (!riscv_sbi_suspend_state_is_valid(*state
)) {
201 pr_warn("Invalid SBI suspend state %#x\n", *state
);
208 static int sbi_dt_cpu_init_topology(struct cpuidle_driver
*drv
,
209 struct sbi_cpuidle_data
*data
,
210 unsigned int state_count
, int cpu
)
212 /* Currently limit the hierarchical topology to be used in OSI mode. */
213 if (!sbi_cpuidle_use_osi
)
216 data
->dev
= dt_idle_attach_cpu(cpu
, "sbi");
217 if (IS_ERR_OR_NULL(data
->dev
))
218 return PTR_ERR_OR_ZERO(data
->dev
);
221 * Using the deepest state for the CPU to trigger a potential selection
222 * of a shared state for the domain, assumes the domain states are all
225 drv
->states
[state_count
- 1].flags
|= CPUIDLE_FLAG_RCU_IDLE
;
226 drv
->states
[state_count
- 1].enter
= sbi_enter_domain_idle_state
;
227 drv
->states
[state_count
- 1].enter_s2idle
=
228 sbi_enter_s2idle_domain_idle_state
;
229 sbi_cpuidle_use_cpuhp
= true;
234 static int sbi_cpuidle_dt_init_states(struct device
*dev
,
235 struct cpuidle_driver
*drv
,
237 unsigned int state_count
)
239 struct sbi_cpuidle_data
*data
= per_cpu_ptr(&sbi_cpuidle_data
, cpu
);
240 struct device_node
*state_node
;
244 struct device_node
*cpu_node
__free(device_node
) = of_cpu_device_node_get(cpu
);
248 states
= devm_kcalloc(dev
, state_count
, sizeof(*states
), GFP_KERNEL
);
252 /* Parse SBI specific details from state DT nodes */
253 for (i
= 1; i
< state_count
; i
++) {
254 state_node
= of_get_cpu_state_node(cpu_node
, i
- 1);
258 ret
= sbi_dt_parse_state_node(state_node
, &states
[i
]);
259 of_node_put(state_node
);
264 pr_debug("sbi-state %#x index %d\n", states
[i
], i
);
266 if (i
!= state_count
)
269 /* Initialize optional data, used for the hierarchical topology. */
270 ret
= sbi_dt_cpu_init_topology(drv
, data
, state_count
, cpu
);
274 /* Store states in the per-cpu struct. */
275 data
->states
= states
;
280 static void sbi_cpuidle_deinit_cpu(int cpu
)
282 struct sbi_cpuidle_data
*data
= per_cpu_ptr(&sbi_cpuidle_data
, cpu
);
284 dt_idle_detach_cpu(data
->dev
);
285 sbi_cpuidle_use_cpuhp
= false;
288 static int sbi_cpuidle_init_cpu(struct device
*dev
, int cpu
)
290 struct cpuidle_driver
*drv
;
291 unsigned int state_count
= 0;
294 drv
= devm_kzalloc(dev
, sizeof(*drv
), GFP_KERNEL
);
298 drv
->name
= "sbi_cpuidle";
299 drv
->owner
= THIS_MODULE
;
300 drv
->cpumask
= (struct cpumask
*)cpumask_of(cpu
);
302 /* RISC-V architectural WFI to be represented as state index 0. */
303 drv
->states
[0].enter
= sbi_cpuidle_enter_state
;
304 drv
->states
[0].exit_latency
= 1;
305 drv
->states
[0].target_residency
= 1;
306 drv
->states
[0].power_usage
= UINT_MAX
;
307 strcpy(drv
->states
[0].name
, "WFI");
308 strcpy(drv
->states
[0].desc
, "RISC-V WFI");
311 * If no DT idle states are detected (ret == 0) let the driver
312 * initialization fail accordingly since there is no reason to
313 * initialize the idle driver if only wfi is supported, the
314 * default archictectural back-end already executes wfi
317 ret
= dt_init_idle_driver(drv
, sbi_cpuidle_state_match
, 1);
319 pr_debug("HART%ld: failed to parse DT idle states\n",
320 cpuid_to_hartid_map(cpu
));
321 return ret
? : -ENODEV
;
323 state_count
= ret
+ 1; /* Include WFI state as well */
325 /* Initialize idle states from DT. */
326 ret
= sbi_cpuidle_dt_init_states(dev
, drv
, cpu
, state_count
);
328 pr_err("HART%ld: failed to init idle states\n",
329 cpuid_to_hartid_map(cpu
));
333 if (cpuidle_disabled())
336 ret
= cpuidle_register(drv
, NULL
);
340 cpuidle_cooling_register(drv
);
344 sbi_cpuidle_deinit_cpu(cpu
);
348 static void sbi_cpuidle_domain_sync_state(struct device
*dev
)
351 * All devices have now been attached/probed to the PM domain
352 * topology, hence it's fine to allow domain states to be picked.
354 sbi_cpuidle_pd_allow_domain_state
= true;
357 #ifdef CONFIG_DT_IDLE_GENPD
359 static int sbi_cpuidle_pd_power_off(struct generic_pm_domain
*pd
)
361 struct genpd_power_state
*state
= &pd
->states
[pd
->state_idx
];
367 if (!sbi_cpuidle_pd_allow_domain_state
)
370 /* OSI mode is enabled, set the corresponding domain state. */
371 pd_state
= state
->data
;
372 sbi_set_domain_state(*pd_state
);
377 struct sbi_pd_provider
{
378 struct list_head link
;
379 struct device_node
*node
;
382 static LIST_HEAD(sbi_pd_providers
);
384 static int sbi_pd_init(struct device_node
*np
)
386 struct generic_pm_domain
*pd
;
387 struct sbi_pd_provider
*pd_provider
;
388 struct dev_power_governor
*pd_gov
;
391 pd
= dt_idle_pd_alloc(np
, sbi_dt_parse_state_node
);
395 pd_provider
= kzalloc(sizeof(*pd_provider
), GFP_KERNEL
);
399 pd
->flags
|= GENPD_FLAG_IRQ_SAFE
| GENPD_FLAG_CPU_DOMAIN
;
401 /* Allow power off when OSI is available. */
402 if (sbi_cpuidle_use_osi
)
403 pd
->power_off
= sbi_cpuidle_pd_power_off
;
405 pd
->flags
|= GENPD_FLAG_ALWAYS_ON
;
407 /* Use governor for CPU PM domains if it has some states to manage. */
408 pd_gov
= pd
->states
? &pm_domain_cpu_gov
: NULL
;
410 ret
= pm_genpd_init(pd
, pd_gov
, false);
414 ret
= of_genpd_add_provider_simple(np
, pd
);
418 pd_provider
->node
= of_node_get(np
);
419 list_add(&pd_provider
->link
, &sbi_pd_providers
);
421 pr_debug("init PM domain %s\n", pd
->name
);
431 pr_err("failed to init PM domain ret=%d %pOF\n", ret
, np
);
435 static void sbi_pd_remove(void)
437 struct sbi_pd_provider
*pd_provider
, *it
;
438 struct generic_pm_domain
*genpd
;
440 list_for_each_entry_safe(pd_provider
, it
, &sbi_pd_providers
, link
) {
441 of_genpd_del_provider(pd_provider
->node
);
443 genpd
= of_genpd_remove_last(pd_provider
->node
);
447 of_node_put(pd_provider
->node
);
448 list_del(&pd_provider
->link
);
453 static int sbi_genpd_probe(struct device_node
*np
)
455 int ret
= 0, pd_count
= 0;
461 * Parse child nodes for the "#power-domain-cells" property and
462 * initialize a genpd/genpd-of-provider pair when it's found.
464 for_each_child_of_node_scoped(np
, node
) {
465 if (!of_property_present(node
, "#power-domain-cells"))
468 ret
= sbi_pd_init(node
);
475 /* Bail out if not using the hierarchical CPU topology. */
479 /* Link genpd masters/subdomains to model the CPU topology. */
480 ret
= dt_idle_pd_init_topology(np
);
488 pr_err("failed to create CPU PM domains ret=%d\n", ret
);
495 static inline int sbi_genpd_probe(struct device_node
*np
)
502 static int sbi_cpuidle_probe(struct platform_device
*pdev
)
505 struct cpuidle_driver
*drv
;
506 struct cpuidle_device
*dev
;
507 struct device_node
*np
, *pds_node
;
509 /* Detect OSI support based on CPU DT nodes */
510 sbi_cpuidle_use_osi
= true;
511 for_each_possible_cpu(cpu
) {
512 np
= of_cpu_device_node_get(cpu
);
514 of_property_present(np
, "power-domains") &&
515 of_property_present(np
, "power-domain-names")) {
518 sbi_cpuidle_use_osi
= false;
523 /* Populate generic power domains from DT nodes */
524 pds_node
= of_find_node_by_path("/cpus/power-domains");
526 ret
= sbi_genpd_probe(pds_node
);
527 of_node_put(pds_node
);
532 /* Initialize CPU idle driver for each CPU */
533 for_each_possible_cpu(cpu
) {
534 ret
= sbi_cpuidle_init_cpu(&pdev
->dev
, cpu
);
536 pr_debug("HART%ld: idle driver init failed\n",
537 cpuid_to_hartid_map(cpu
));
542 /* Setup CPU hotplut notifiers */
543 sbi_idle_init_cpuhp();
545 if (cpuidle_disabled())
546 pr_info("cpuidle is disabled\n");
548 pr_info("idle driver registered for all CPUs\n");
554 dev
= per_cpu(cpuidle_devices
, cpu
);
555 drv
= cpuidle_get_cpu_driver(dev
);
556 cpuidle_unregister(drv
);
557 sbi_cpuidle_deinit_cpu(cpu
);
563 static struct platform_driver sbi_cpuidle_driver
= {
564 .probe
= sbi_cpuidle_probe
,
566 .name
= "sbi-cpuidle",
567 .sync_state
= sbi_cpuidle_domain_sync_state
,
571 static int __init
sbi_cpuidle_init(void)
574 struct platform_device
*pdev
;
576 if (!riscv_sbi_hsm_is_supported())
579 ret
= platform_driver_register(&sbi_cpuidle_driver
);
583 pdev
= platform_device_register_simple("sbi-cpuidle",
586 platform_driver_unregister(&sbi_cpuidle_driver
);
587 return PTR_ERR(pdev
);
592 arch_initcall(sbi_cpuidle_init
);