iommu/arm-smmu-v3: Separate s/w and h/w views of prod and cons indexes
[linux/fpc-iii.git] / drivers / cpuidle / cpuidle-cps.c
blobdff0ff4cc218db12b5dbd9d055cbf2b42fe059a6
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2014 Imagination Technologies
4 * Author: Paul Burton <paul.burton@mips.com>
5 */
7 #include <linux/cpu_pm.h>
8 #include <linux/cpuidle.h>
9 #include <linux/init.h>
11 #include <asm/idle.h>
12 #include <asm/pm-cps.h>
14 /* Enumeration of the various idle states this driver may enter */
15 enum cps_idle_state {
16 STATE_WAIT = 0, /* MIPS wait instruction, coherent */
17 STATE_NC_WAIT, /* MIPS wait instruction, non-coherent */
18 STATE_CLOCK_GATED, /* Core clock gated */
19 STATE_POWER_GATED, /* Core power gated */
20 STATE_COUNT
23 static int cps_nc_enter(struct cpuidle_device *dev,
24 struct cpuidle_driver *drv, int index)
26 enum cps_pm_state pm_state;
27 int err;
30 * At least one core must remain powered up & clocked in order for the
31 * system to have any hope of functioning.
33 * TODO: don't treat core 0 specially, just prevent the final core
34 * TODO: remap interrupt affinity temporarily
36 if (cpus_are_siblings(0, dev->cpu) && (index > STATE_NC_WAIT))
37 index = STATE_NC_WAIT;
39 /* Select the appropriate cps_pm_state */
40 switch (index) {
41 case STATE_NC_WAIT:
42 pm_state = CPS_PM_NC_WAIT;
43 break;
44 case STATE_CLOCK_GATED:
45 pm_state = CPS_PM_CLOCK_GATED;
46 break;
47 case STATE_POWER_GATED:
48 pm_state = CPS_PM_POWER_GATED;
49 break;
50 default:
51 BUG();
52 return -EINVAL;
55 /* Notify listeners the CPU is about to power down */
56 if ((pm_state == CPS_PM_POWER_GATED) && cpu_pm_enter())
57 return -EINTR;
59 /* Enter that state */
60 err = cps_pm_enter_state(pm_state);
62 /* Notify listeners the CPU is back up */
63 if (pm_state == CPS_PM_POWER_GATED)
64 cpu_pm_exit();
66 return err ?: index;
69 static struct cpuidle_driver cps_driver = {
70 .name = "cpc_cpuidle",
71 .owner = THIS_MODULE,
72 .states = {
73 [STATE_WAIT] = MIPS_CPUIDLE_WAIT_STATE,
74 [STATE_NC_WAIT] = {
75 .enter = cps_nc_enter,
76 .exit_latency = 200,
77 .target_residency = 450,
78 .name = "nc-wait",
79 .desc = "non-coherent MIPS wait",
81 [STATE_CLOCK_GATED] = {
82 .enter = cps_nc_enter,
83 .exit_latency = 300,
84 .target_residency = 700,
85 .flags = CPUIDLE_FLAG_TIMER_STOP,
86 .name = "clock-gated",
87 .desc = "core clock gated",
89 [STATE_POWER_GATED] = {
90 .enter = cps_nc_enter,
91 .exit_latency = 600,
92 .target_residency = 1000,
93 .flags = CPUIDLE_FLAG_TIMER_STOP,
94 .name = "power-gated",
95 .desc = "core power gated",
98 .state_count = STATE_COUNT,
99 .safe_state_index = 0,
102 static void __init cps_cpuidle_unregister(void)
104 int cpu;
105 struct cpuidle_device *device;
107 for_each_possible_cpu(cpu) {
108 device = &per_cpu(cpuidle_dev, cpu);
109 cpuidle_unregister_device(device);
112 cpuidle_unregister_driver(&cps_driver);
115 static int __init cps_cpuidle_init(void)
117 int err, cpu, i;
118 struct cpuidle_device *device;
120 /* Detect supported states */
121 if (!cps_pm_support_state(CPS_PM_POWER_GATED))
122 cps_driver.state_count = STATE_CLOCK_GATED + 1;
123 if (!cps_pm_support_state(CPS_PM_CLOCK_GATED))
124 cps_driver.state_count = STATE_NC_WAIT + 1;
125 if (!cps_pm_support_state(CPS_PM_NC_WAIT))
126 cps_driver.state_count = STATE_WAIT + 1;
128 /* Inform the user if some states are unavailable */
129 if (cps_driver.state_count < STATE_COUNT) {
130 pr_info("cpuidle-cps: limited to ");
131 switch (cps_driver.state_count - 1) {
132 case STATE_WAIT:
133 pr_cont("coherent wait\n");
134 break;
135 case STATE_NC_WAIT:
136 pr_cont("non-coherent wait\n");
137 break;
138 case STATE_CLOCK_GATED:
139 pr_cont("clock gating\n");
140 break;
145 * Set the coupled flag on the appropriate states if this system
146 * requires it.
148 if (coupled_coherence)
149 for (i = STATE_NC_WAIT; i < cps_driver.state_count; i++)
150 cps_driver.states[i].flags |= CPUIDLE_FLAG_COUPLED;
152 err = cpuidle_register_driver(&cps_driver);
153 if (err) {
154 pr_err("Failed to register CPS cpuidle driver\n");
155 return err;
158 for_each_possible_cpu(cpu) {
159 device = &per_cpu(cpuidle_dev, cpu);
160 device->cpu = cpu;
161 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
162 cpumask_copy(&device->coupled_cpus, &cpu_sibling_map[cpu]);
163 #endif
165 err = cpuidle_register_device(device);
166 if (err) {
167 pr_err("Failed to register CPU%d cpuidle device\n",
168 cpu);
169 goto err_out;
173 return 0;
174 err_out:
175 cps_cpuidle_unregister();
176 return err;
178 device_initcall(cps_cpuidle_init);