2 * arch/arm/mach-vexpress/tc2_pm.c - TC2 power management support
4 * Created by: Nicolas Pitre, October 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
7 * Some portions of this file were originally written by Achin Gupta
8 * Copyright: (C) 2012 ARM Limited
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/of_address.h>
19 #include <linux/spinlock.h>
20 #include <linux/errno.h>
21 #include <linux/irqchip/arm-gic.h>
24 #include <asm/proc-fns.h>
25 #include <asm/cacheflush.h>
26 #include <asm/cputype.h>
29 #include <linux/arm-cci.h>
33 /* SCC conf registers */
34 #define A15_CONF 0x400
36 #define SYS_INFO 0x700
37 #define SPC_BASE 0xb00
40 * We can't use regular spinlocks. In the switcher case, it is possible
41 * for an outbound CPU to call power_down() after its inbound counterpart
42 * is already live using the same logical CPU number which trips lockdep
45 static arch_spinlock_t tc2_pm_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
47 #define TC2_CLUSTERS 2
48 #define TC2_MAX_CPUS_PER_CLUSTER 3
50 static unsigned int tc2_nr_cpus
[TC2_CLUSTERS
];
52 /* Keep per-cpu usage count to cope with unordered up/down requests */
53 static int tc2_pm_use_count
[TC2_MAX_CPUS_PER_CLUSTER
][TC2_CLUSTERS
];
55 #define tc2_cluster_unused(cluster) \
56 (!tc2_pm_use_count[0][cluster] && \
57 !tc2_pm_use_count[1][cluster] && \
58 !tc2_pm_use_count[2][cluster])
60 static int tc2_pm_power_up(unsigned int cpu
, unsigned int cluster
)
62 pr_debug("%s: cpu %u cluster %u\n", __func__
, cpu
, cluster
);
63 if (cluster
>= TC2_CLUSTERS
|| cpu
>= tc2_nr_cpus
[cluster
])
67 * Since this is called with IRQs enabled, and no arch_spin_lock_irq
68 * variant exists, we need to disable IRQs manually here.
71 arch_spin_lock(&tc2_pm_lock
);
73 if (tc2_cluster_unused(cluster
))
74 ve_spc_powerdown(cluster
, false);
76 tc2_pm_use_count
[cpu
][cluster
]++;
77 if (tc2_pm_use_count
[cpu
][cluster
] == 1) {
78 ve_spc_set_resume_addr(cluster
, cpu
,
79 virt_to_phys(mcpm_entry_point
));
80 ve_spc_cpu_wakeup_irq(cluster
, cpu
, true);
81 } else if (tc2_pm_use_count
[cpu
][cluster
] != 2) {
83 * The only possible values are:
86 * 2 = CPU requested to be up before it had a chance
87 * to actually make itself down.
88 * Any other value is a bug.
93 arch_spin_unlock(&tc2_pm_lock
);
99 static void tc2_pm_down(u64 residency
)
101 unsigned int mpidr
, cpu
, cluster
;
102 bool last_man
= false, skip_wfi
= false;
104 mpidr
= read_cpuid_mpidr();
105 cpu
= MPIDR_AFFINITY_LEVEL(mpidr
, 0);
106 cluster
= MPIDR_AFFINITY_LEVEL(mpidr
, 1);
108 pr_debug("%s: cpu %u cluster %u\n", __func__
, cpu
, cluster
);
109 BUG_ON(cluster
>= TC2_CLUSTERS
|| cpu
>= TC2_MAX_CPUS_PER_CLUSTER
);
111 __mcpm_cpu_going_down(cpu
, cluster
);
113 arch_spin_lock(&tc2_pm_lock
);
114 BUG_ON(__mcpm_cluster_state(cluster
) != CLUSTER_UP
);
115 tc2_pm_use_count
[cpu
][cluster
]--;
116 if (tc2_pm_use_count
[cpu
][cluster
] == 0) {
117 ve_spc_cpu_wakeup_irq(cluster
, cpu
, true);
118 if (tc2_cluster_unused(cluster
)) {
119 ve_spc_powerdown(cluster
, true);
120 ve_spc_global_wakeup_irq(true);
123 } else if (tc2_pm_use_count
[cpu
][cluster
] == 1) {
125 * A power_up request went ahead of us.
126 * Even if we do not want to shut this CPU down,
127 * the caller expects a certain state as if the WFI
128 * was aborted. So let's continue with cache cleaning.
135 * If the CPU is committed to power down, make sure
136 * the power controller will be in charge of waking it
137 * up upon IRQ, ie IRQ lines are cut from GIC CPU IF
138 * to the CPU by disabling the GIC CPU IF to prevent wfi
139 * from completing execution behind power controller back
144 if (last_man
&& __mcpm_outbound_enter_critical(cpu
, cluster
)) {
145 arch_spin_unlock(&tc2_pm_lock
);
147 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15
) {
149 * On the Cortex-A15 we need to disable
150 * L2 prefetching before flushing the cache.
153 "mcr p15, 1, %0, c15, c0, 3 \n\t"
160 * We need to disable and flush the whole (L1 and L2) cache.
161 * Let's do it in the safest possible way i.e. with
162 * no memory access within the following sequence
163 * including the stack.
165 * Note: fp is preserved to the stack explicitly prior doing
166 * this since adding it to the clobber list is incompatible
167 * with having CONFIG_FRAME_POINTER=y.
170 "str fp, [sp, #-4]! \n\t"
171 "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
172 "bic r0, r0, #"__stringify(CR_C
)" \n\t"
173 "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
175 "bl v7_flush_dcache_all \n\t"
177 "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
178 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
179 "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
183 : : : "r0","r1","r2","r3","r4","r5","r6","r7",
184 "r9","r10","lr","memory");
186 cci_disable_port_by_cpu(mpidr
);
188 __mcpm_outbound_leave_critical(cluster
, CLUSTER_DOWN
);
191 * If last man then undo any setup done previously.
194 ve_spc_powerdown(cluster
, false);
195 ve_spc_global_wakeup_irq(false);
198 arch_spin_unlock(&tc2_pm_lock
);
201 * We need to disable and flush only the L1 cache.
202 * Let's do it in the safest possible way as above.
205 "str fp, [sp, #-4]! \n\t"
206 "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
207 "bic r0, r0, #"__stringify(CR_C
)" \n\t"
208 "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
210 "bl v7_flush_dcache_louis \n\t"
212 "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
213 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
214 "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
218 : : : "r0","r1","r2","r3","r4","r5","r6","r7",
219 "r9","r10","lr","memory");
222 __mcpm_cpu_down(cpu
, cluster
);
224 /* Now we are prepared for power-down, do it: */
228 /* Not dead at this point? Let our caller cope. */
231 static void tc2_pm_power_down(void)
236 static void tc2_pm_suspend(u64 residency
)
238 unsigned int mpidr
, cpu
, cluster
;
240 mpidr
= read_cpuid_mpidr();
241 cpu
= MPIDR_AFFINITY_LEVEL(mpidr
, 0);
242 cluster
= MPIDR_AFFINITY_LEVEL(mpidr
, 1);
243 ve_spc_set_resume_addr(cluster
, cpu
, virt_to_phys(mcpm_entry_point
));
244 tc2_pm_down(residency
);
247 static void tc2_pm_powered_up(void)
249 unsigned int mpidr
, cpu
, cluster
;
252 mpidr
= read_cpuid_mpidr();
253 cpu
= MPIDR_AFFINITY_LEVEL(mpidr
, 0);
254 cluster
= MPIDR_AFFINITY_LEVEL(mpidr
, 1);
256 pr_debug("%s: cpu %u cluster %u\n", __func__
, cpu
, cluster
);
257 BUG_ON(cluster
>= TC2_CLUSTERS
|| cpu
>= TC2_MAX_CPUS_PER_CLUSTER
);
259 local_irq_save(flags
);
260 arch_spin_lock(&tc2_pm_lock
);
262 if (tc2_cluster_unused(cluster
)) {
263 ve_spc_powerdown(cluster
, false);
264 ve_spc_global_wakeup_irq(false);
267 if (!tc2_pm_use_count
[cpu
][cluster
])
268 tc2_pm_use_count
[cpu
][cluster
] = 1;
270 ve_spc_cpu_wakeup_irq(cluster
, cpu
, false);
271 ve_spc_set_resume_addr(cluster
, cpu
, 0);
273 arch_spin_unlock(&tc2_pm_lock
);
274 local_irq_restore(flags
);
277 static const struct mcpm_platform_ops tc2_pm_power_ops
= {
278 .power_up
= tc2_pm_power_up
,
279 .power_down
= tc2_pm_power_down
,
280 .suspend
= tc2_pm_suspend
,
281 .powered_up
= tc2_pm_powered_up
,
284 static bool __init
tc2_pm_usage_count_init(void)
286 unsigned int mpidr
, cpu
, cluster
;
288 mpidr
= read_cpuid_mpidr();
289 cpu
= MPIDR_AFFINITY_LEVEL(mpidr
, 0);
290 cluster
= MPIDR_AFFINITY_LEVEL(mpidr
, 1);
292 pr_debug("%s: cpu %u cluster %u\n", __func__
, cpu
, cluster
);
293 if (cluster
>= TC2_CLUSTERS
|| cpu
>= tc2_nr_cpus
[cluster
]) {
294 pr_err("%s: boot CPU is out of bound!\n", __func__
);
297 tc2_pm_use_count
[cpu
][cluster
] = 1;
302 * Enable cluster-level coherency, in preparation for turning on the MMU.
304 static void __naked
tc2_pm_power_up_setup(unsigned int affinity_level
)
309 " b cci_enable_port_for_self ");
312 static int __init
tc2_pm_init(void)
316 u32 a15_cluster_id
, a7_cluster_id
, sys_info
;
317 struct device_node
*np
;
320 * The power management-related features are hidden behind
321 * SCC registers. We need to extract runtime information like
322 * cluster ids and number of CPUs really available in clusters.
324 np
= of_find_compatible_node(NULL
, NULL
,
325 "arm,vexpress-scc,v2p-ca15_a7");
326 scc
= of_iomap(np
, 0);
330 a15_cluster_id
= readl_relaxed(scc
+ A15_CONF
) & 0xf;
331 a7_cluster_id
= readl_relaxed(scc
+ A7_CONF
) & 0xf;
332 if (a15_cluster_id
>= TC2_CLUSTERS
|| a7_cluster_id
>= TC2_CLUSTERS
)
335 sys_info
= readl_relaxed(scc
+ SYS_INFO
);
336 tc2_nr_cpus
[a15_cluster_id
] = (sys_info
>> 16) & 0xf;
337 tc2_nr_cpus
[a7_cluster_id
] = (sys_info
>> 20) & 0xf;
340 * A subset of the SCC registers is also used to communicate
341 * with the SPC (power controller). We need to be able to
342 * drive it very early in the boot process to power up
343 * processors, so we initialize the SPC driver here.
345 ret
= ve_spc_init(scc
+ SPC_BASE
, a15_cluster_id
);
352 if (!tc2_pm_usage_count_init())
355 ret
= mcpm_platform_register(&tc2_pm_power_ops
);
357 mcpm_sync_init(tc2_pm_power_up_setup
);
358 pr_info("TC2 power management initialized\n");
363 early_initcall(tc2_pm_init
);