1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2014 Imagination Technologies
4 * Author: Paul Burton <paul.burton@mips.com>
7 #include <linux/cpu_pm.h>
8 #include <linux/cpuidle.h>
9 #include <linux/init.h>
12 #include <asm/pm-cps.h>
14 /* Enumeration of the various idle states this driver may enter */
16 STATE_WAIT
= 0, /* MIPS wait instruction, coherent */
17 STATE_NC_WAIT
, /* MIPS wait instruction, non-coherent */
18 STATE_CLOCK_GATED
, /* Core clock gated */
19 STATE_POWER_GATED
, /* Core power gated */
23 static int cps_nc_enter(struct cpuidle_device
*dev
,
24 struct cpuidle_driver
*drv
, int index
)
26 enum cps_pm_state pm_state
;
30 * At least one core must remain powered up & clocked in order for the
31 * system to have any hope of functioning.
33 * TODO: don't treat core 0 specially, just prevent the final core
34 * TODO: remap interrupt affinity temporarily
36 if (cpus_are_siblings(0, dev
->cpu
) && (index
> STATE_NC_WAIT
))
37 index
= STATE_NC_WAIT
;
39 /* Select the appropriate cps_pm_state */
42 pm_state
= CPS_PM_NC_WAIT
;
44 case STATE_CLOCK_GATED
:
45 pm_state
= CPS_PM_CLOCK_GATED
;
47 case STATE_POWER_GATED
:
48 pm_state
= CPS_PM_POWER_GATED
;
55 /* Notify listeners the CPU is about to power down */
56 if ((pm_state
== CPS_PM_POWER_GATED
) && cpu_pm_enter())
59 /* Enter that state */
60 err
= cps_pm_enter_state(pm_state
);
62 /* Notify listeners the CPU is back up */
63 if (pm_state
== CPS_PM_POWER_GATED
)
69 static struct cpuidle_driver cps_driver
= {
70 .name
= "cpc_cpuidle",
73 [STATE_WAIT
] = MIPS_CPUIDLE_WAIT_STATE
,
75 .enter
= cps_nc_enter
,
77 .target_residency
= 450,
79 .desc
= "non-coherent MIPS wait",
81 [STATE_CLOCK_GATED
] = {
82 .enter
= cps_nc_enter
,
84 .target_residency
= 700,
85 .flags
= CPUIDLE_FLAG_TIMER_STOP
,
86 .name
= "clock-gated",
87 .desc
= "core clock gated",
89 [STATE_POWER_GATED
] = {
90 .enter
= cps_nc_enter
,
92 .target_residency
= 1000,
93 .flags
= CPUIDLE_FLAG_TIMER_STOP
,
94 .name
= "power-gated",
95 .desc
= "core power gated",
98 .state_count
= STATE_COUNT
,
99 .safe_state_index
= 0,
102 static void __init
cps_cpuidle_unregister(void)
105 struct cpuidle_device
*device
;
107 for_each_possible_cpu(cpu
) {
108 device
= &per_cpu(cpuidle_dev
, cpu
);
109 cpuidle_unregister_device(device
);
112 cpuidle_unregister_driver(&cps_driver
);
115 static int __init
cps_cpuidle_init(void)
118 struct cpuidle_device
*device
;
120 /* Detect supported states */
121 if (!cps_pm_support_state(CPS_PM_POWER_GATED
))
122 cps_driver
.state_count
= STATE_CLOCK_GATED
+ 1;
123 if (!cps_pm_support_state(CPS_PM_CLOCK_GATED
))
124 cps_driver
.state_count
= STATE_NC_WAIT
+ 1;
125 if (!cps_pm_support_state(CPS_PM_NC_WAIT
))
126 cps_driver
.state_count
= STATE_WAIT
+ 1;
128 /* Inform the user if some states are unavailable */
129 if (cps_driver
.state_count
< STATE_COUNT
) {
130 pr_info("cpuidle-cps: limited to ");
131 switch (cps_driver
.state_count
- 1) {
133 pr_cont("coherent wait\n");
136 pr_cont("non-coherent wait\n");
138 case STATE_CLOCK_GATED
:
139 pr_cont("clock gating\n");
145 * Set the coupled flag on the appropriate states if this system
148 if (coupled_coherence
)
149 for (i
= STATE_NC_WAIT
; i
< cps_driver
.state_count
; i
++)
150 cps_driver
.states
[i
].flags
|= CPUIDLE_FLAG_COUPLED
;
152 err
= cpuidle_register_driver(&cps_driver
);
154 pr_err("Failed to register CPS cpuidle driver\n");
158 for_each_possible_cpu(cpu
) {
159 device
= &per_cpu(cpuidle_dev
, cpu
);
161 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
162 cpumask_copy(&device
->coupled_cpus
, &cpu_sibling_map
[cpu
]);
165 err
= cpuidle_register_device(device
);
167 pr_err("Failed to register CPU%d cpuidle device\n",
175 cps_cpuidle_unregister();
178 device_initcall(cps_cpuidle_init
);