2 * arch/arm/mach-vexpress/dcscb.c - Dual Cluster System Configuration Block
4 * Created by: Nicolas Pitre, May 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/init.h>
13 #include <linux/kernel.h>
15 #include <linux/spinlock.h>
16 #include <linux/errno.h>
17 #include <linux/of_address.h>
18 #include <linux/vexpress.h>
19 #include <linux/arm-cci.h>
22 #include <asm/proc-fns.h>
23 #include <asm/cacheflush.h>
24 #include <asm/cputype.h>
30 #define SYS_SWRESET 0x8
32 #define RST_STAT1 0x10
33 #define EAG_CFG_R 0x20
34 #define EAG_CFG_W 0x24
35 #define KFC_CFG_R 0x28
36 #define KFC_CFG_W 0x2c
37 #define DCS_CFG_R 0x30
40 * We can't use regular spinlocks. In the switcher case, it is possible
41 * for an outbound CPU to call power_down() while its inbound counterpart
42 * is already live using the same logical CPU number which trips lockdep
45 static arch_spinlock_t dcscb_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
47 static void __iomem
*dcscb_base
;
48 static int dcscb_use_count
[4][2];
49 static int dcscb_allcpus_mask
[2];
51 static int dcscb_power_up(unsigned int cpu
, unsigned int cluster
)
53 unsigned int rst_hold
, cpumask
= (1 << cpu
);
54 unsigned int all_mask
= dcscb_allcpus_mask
[cluster
];
56 pr_debug("%s: cpu %u cluster %u\n", __func__
, cpu
, cluster
);
57 if (cpu
>= 4 || cluster
>= 2)
61 * Since this is called with IRQs enabled, and no arch_spin_lock_irq
62 * variant exists, we need to disable IRQs manually here.
65 arch_spin_lock(&dcscb_lock
);
67 dcscb_use_count
[cpu
][cluster
]++;
68 if (dcscb_use_count
[cpu
][cluster
] == 1) {
69 rst_hold
= readl_relaxed(dcscb_base
+ RST_HOLD0
+ cluster
* 4);
70 if (rst_hold
& (1 << 8)) {
71 /* remove cluster reset and add individual CPU's reset */
72 rst_hold
&= ~(1 << 8);
75 rst_hold
&= ~(cpumask
| (cpumask
<< 4));
76 writel_relaxed(rst_hold
, dcscb_base
+ RST_HOLD0
+ cluster
* 4);
77 } else if (dcscb_use_count
[cpu
][cluster
] != 2) {
79 * The only possible values are:
82 * 2 = CPU requested to be up before it had a chance
83 * to actually make itself down.
84 * Any other value is a bug.
89 arch_spin_unlock(&dcscb_lock
);
95 static void dcscb_power_down(void)
97 unsigned int mpidr
, cpu
, cluster
, rst_hold
, cpumask
, all_mask
;
98 bool last_man
= false, skip_wfi
= false;
100 mpidr
= read_cpuid_mpidr();
101 cpu
= MPIDR_AFFINITY_LEVEL(mpidr
, 0);
102 cluster
= MPIDR_AFFINITY_LEVEL(mpidr
, 1);
103 cpumask
= (1 << cpu
);
104 all_mask
= dcscb_allcpus_mask
[cluster
];
106 pr_debug("%s: cpu %u cluster %u\n", __func__
, cpu
, cluster
);
107 BUG_ON(cpu
>= 4 || cluster
>= 2);
109 __mcpm_cpu_going_down(cpu
, cluster
);
111 arch_spin_lock(&dcscb_lock
);
112 BUG_ON(__mcpm_cluster_state(cluster
) != CLUSTER_UP
);
113 dcscb_use_count
[cpu
][cluster
]--;
114 if (dcscb_use_count
[cpu
][cluster
] == 0) {
115 rst_hold
= readl_relaxed(dcscb_base
+ RST_HOLD0
+ cluster
* 4);
117 if (((rst_hold
| (rst_hold
>> 4)) & all_mask
) == all_mask
) {
118 rst_hold
|= (1 << 8);
121 writel_relaxed(rst_hold
, dcscb_base
+ RST_HOLD0
+ cluster
* 4);
122 } else if (dcscb_use_count
[cpu
][cluster
] == 1) {
124 * A power_up request went ahead of us.
125 * Even if we do not want to shut this CPU down,
126 * the caller expects a certain state as if the WFI
127 * was aborted. So let's continue with cache cleaning.
133 if (last_man
&& __mcpm_outbound_enter_critical(cpu
, cluster
)) {
134 arch_spin_unlock(&dcscb_lock
);
137 * Flush all cache levels for this cluster.
140 * - Clear the SCTLR.C bit to prevent further cache allocations
141 * - Flush the whole cache
142 * - Clear the ACTLR "SMP" bit to disable local coherency
144 * Let's do it in the safest possible way i.e. with
145 * no memory access within the following sequence
146 * including to the stack.
148 * Note: fp is preserved to the stack explicitly prior doing
149 * this since adding it to the clobber list is incompatible
150 * with having CONFIG_FRAME_POINTER=y.
153 "str fp, [sp, #-4]! \n\t"
154 "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
155 "bic r0, r0, #"__stringify(CR_C
)" \n\t"
156 "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
158 "bl v7_flush_dcache_all \n\t"
160 "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
161 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
162 "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
166 : : : "r0","r1","r2","r3","r4","r5","r6","r7",
167 "r9","r10","lr","memory");
170 * This is a harmless no-op. On platforms with a real
171 * outer cache this might either be needed or not,
172 * depending on where the outer cache sits.
177 * Disable cluster-level coherency by masking
178 * incoming snoops and DVM messages:
180 cci_disable_port_by_cpu(mpidr
);
182 __mcpm_outbound_leave_critical(cluster
, CLUSTER_DOWN
);
184 arch_spin_unlock(&dcscb_lock
);
187 * Flush the local CPU cache.
188 * Let's do it in the safest possible way as above.
191 "str fp, [sp, #-4]! \n\t"
192 "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
193 "bic r0, r0, #"__stringify(CR_C
)" \n\t"
194 "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
196 "bl v7_flush_dcache_louis \n\t"
198 "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
199 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
200 "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
204 : : : "r0","r1","r2","r3","r4","r5","r6","r7",
205 "r9","r10","lr","memory");
208 __mcpm_cpu_down(cpu
, cluster
);
210 /* Now we are prepared for power-down, do it: */
215 /* Not dead at this point? Let our caller cope. */
218 static const struct mcpm_platform_ops dcscb_power_ops
= {
219 .power_up
= dcscb_power_up
,
220 .power_down
= dcscb_power_down
,
223 static void __init
dcscb_usage_count_init(void)
225 unsigned int mpidr
, cpu
, cluster
;
227 mpidr
= read_cpuid_mpidr();
228 cpu
= MPIDR_AFFINITY_LEVEL(mpidr
, 0);
229 cluster
= MPIDR_AFFINITY_LEVEL(mpidr
, 1);
231 pr_debug("%s: cpu %u cluster %u\n", __func__
, cpu
, cluster
);
232 BUG_ON(cpu
>= 4 || cluster
>= 2);
233 dcscb_use_count
[cpu
][cluster
] = 1;
236 extern void dcscb_power_up_setup(unsigned int affinity_level
);
238 static int __init
dcscb_init(void)
240 struct device_node
*node
;
247 node
= of_find_compatible_node(NULL
, NULL
, "arm,rtsm,dcscb");
250 dcscb_base
= of_iomap(node
, 0);
252 return -EADDRNOTAVAIL
;
253 cfg
= readl_relaxed(dcscb_base
+ DCS_CFG_R
);
254 dcscb_allcpus_mask
[0] = (1 << (((cfg
>> 16) >> (0 << 2)) & 0xf)) - 1;
255 dcscb_allcpus_mask
[1] = (1 << (((cfg
>> 16) >> (1 << 2)) & 0xf)) - 1;
256 dcscb_usage_count_init();
258 ret
= mcpm_platform_register(&dcscb_power_ops
);
260 ret
= mcpm_sync_init(dcscb_power_up_setup
);
266 pr_info("VExpress DCSCB support installed\n");
269 * Future entries into the kernel can now go
270 * through the cluster entry vectors.
272 vexpress_flags_set(virt_to_phys(mcpm_entry_point
));
277 early_initcall(dcscb_init
);