2 * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM
4 * Created by: Nicolas Pitre, March 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/irqflags.h>
17 #include <asm/cacheflush.h>
18 #include <asm/idmap.h>
19 #include <asm/cputype.h>
21 extern unsigned long mcpm_entry_vectors
[MAX_NR_CLUSTERS
][MAX_CPUS_PER_CLUSTER
];
23 void mcpm_set_entry_vector(unsigned cpu
, unsigned cluster
, void *ptr
)
25 unsigned long val
= ptr
? virt_to_phys(ptr
) : 0;
26 mcpm_entry_vectors
[cluster
][cpu
] = val
;
27 sync_cache_w(&mcpm_entry_vectors
[cluster
][cpu
]);
30 static const struct mcpm_platform_ops
*platform_ops
;
32 int __init
mcpm_platform_register(const struct mcpm_platform_ops
*ops
)
40 int mcpm_cpu_power_up(unsigned int cpu
, unsigned int cluster
)
43 return -EUNATCH
; /* try not to shadow power_up errors */
45 return platform_ops
->power_up(cpu
, cluster
);
48 typedef void (*phys_reset_t
)(unsigned long);
50 void mcpm_cpu_power_down(void)
52 phys_reset_t phys_reset
;
54 BUG_ON(!platform_ops
);
55 BUG_ON(!irqs_disabled());
58 * Do this before calling into the power_down method,
59 * as it might not always be safe to do afterwards.
61 setup_mm_for_reboot();
63 platform_ops
->power_down();
66 * It is possible for a power_up request to happen concurrently
67 * with a power_down request for the same CPU. In this case the
68 * power_down method might not be able to actually enter a
69 * powered down state with the WFI instruction if the power_up
70 * method has removed the required reset condition. The
71 * power_down method is then allowed to return. We must perform
72 * a re-entry in the kernel as if the power_up method just had
73 * deasserted reset on the CPU.
75 * To simplify race issues, the platform specific implementation
76 * must accommodate for the possibility of unordered calls to
77 * power_down and power_up with a usage count. Therefore, if a
78 * call to power_up is issued for a CPU that is not down, then
79 * the next call to power_down must not attempt a full shutdown
80 * but only do the minimum (normally disabling L1 cache and CPU
81 * coherency) and return just as if a concurrent power_up request
82 * had happened as described above.
85 phys_reset
= (phys_reset_t
)(unsigned long)virt_to_phys(cpu_reset
);
86 phys_reset(virt_to_phys(mcpm_entry_point
));
88 /* should never get here */
92 void mcpm_cpu_suspend(u64 expected_residency
)
94 phys_reset_t phys_reset
;
96 BUG_ON(!platform_ops
);
97 BUG_ON(!irqs_disabled());
99 /* Very similar to mcpm_cpu_power_down() */
100 setup_mm_for_reboot();
101 platform_ops
->suspend(expected_residency
);
102 phys_reset
= (phys_reset_t
)(unsigned long)virt_to_phys(cpu_reset
);
103 phys_reset(virt_to_phys(mcpm_entry_point
));
107 int mcpm_cpu_powered_up(void)
111 if (platform_ops
->powered_up
)
112 platform_ops
->powered_up();
116 struct sync_struct mcpm_sync
;
119 * __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
120 * This must be called at the point of committing to teardown of a CPU.
121 * The CPU cache (SCTRL.C bit) is expected to still be active.
123 void __mcpm_cpu_going_down(unsigned int cpu
, unsigned int cluster
)
125 mcpm_sync
.clusters
[cluster
].cpus
[cpu
].cpu
= CPU_GOING_DOWN
;
126 sync_cache_w(&mcpm_sync
.clusters
[cluster
].cpus
[cpu
].cpu
);
130 * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
131 * cluster can be torn down without disrupting this CPU.
132 * To avoid deadlocks, this must be called before a CPU is powered down.
133 * The CPU cache (SCTRL.C bit) is expected to be off.
134 * However L2 cache might or might not be active.
136 void __mcpm_cpu_down(unsigned int cpu
, unsigned int cluster
)
139 mcpm_sync
.clusters
[cluster
].cpus
[cpu
].cpu
= CPU_DOWN
;
140 sync_cache_w(&mcpm_sync
.clusters
[cluster
].cpus
[cpu
].cpu
);
145 * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
146 * @state: the final state of the cluster:
147 * CLUSTER_UP: no destructive teardown was done and the cluster has been
148 * restored to the previous state (CPU cache still active); or
149 * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
150 * (CPU cache disabled, L2 cache either enabled or disabled).
152 void __mcpm_outbound_leave_critical(unsigned int cluster
, int state
)
155 mcpm_sync
.clusters
[cluster
].cluster
= state
;
156 sync_cache_w(&mcpm_sync
.clusters
[cluster
].cluster
);
161 * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
162 * This function should be called by the last man, after local CPU teardown
163 * is complete. CPU cache expected to be active.
166 * false: the critical section was not entered because an inbound CPU was
167 * observed, or the cluster is already being set up;
168 * true: the critical section was entered: it is now safe to tear down the
171 bool __mcpm_outbound_enter_critical(unsigned int cpu
, unsigned int cluster
)
174 struct mcpm_sync_struct
*c
= &mcpm_sync
.clusters
[cluster
];
176 /* Warn inbound CPUs that the cluster is being torn down: */
177 c
->cluster
= CLUSTER_GOING_DOWN
;
178 sync_cache_w(&c
->cluster
);
180 /* Back out if the inbound cluster is already in the critical region: */
181 sync_cache_r(&c
->inbound
);
182 if (c
->inbound
== INBOUND_COMING_UP
)
186 * Wait for all CPUs to get out of the GOING_DOWN state, so that local
187 * teardown is complete on each CPU before tearing down the cluster.
189 * If any CPU has been woken up again from the DOWN state, then we
190 * shouldn't be taking the cluster down at all: abort in that case.
192 sync_cache_r(&c
->cpus
);
193 for (i
= 0; i
< MAX_CPUS_PER_CLUSTER
; i
++) {
200 cpustate
= c
->cpus
[i
].cpu
;
201 if (cpustate
!= CPU_GOING_DOWN
)
205 sync_cache_r(&c
->cpus
[i
].cpu
);
220 __mcpm_outbound_leave_critical(cluster
, CLUSTER_UP
);
224 int __mcpm_cluster_state(unsigned int cluster
)
226 sync_cache_r(&mcpm_sync
.clusters
[cluster
].cluster
);
227 return mcpm_sync
.clusters
[cluster
].cluster
;
230 extern unsigned long mcpm_power_up_setup_phys
;
232 int __init
mcpm_sync_init(
233 void (*power_up_setup
)(unsigned int affinity_level
))
235 unsigned int i
, j
, mpidr
, this_cluster
;
237 BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE
* MAX_NR_CLUSTERS
!= sizeof mcpm_sync
);
238 BUG_ON((unsigned long)&mcpm_sync
& (__CACHE_WRITEBACK_GRANULE
- 1));
241 * Set initial CPU and cluster states.
242 * Only one cluster is assumed to be active at this point.
244 for (i
= 0; i
< MAX_NR_CLUSTERS
; i
++) {
245 mcpm_sync
.clusters
[i
].cluster
= CLUSTER_DOWN
;
246 mcpm_sync
.clusters
[i
].inbound
= INBOUND_NOT_COMING_UP
;
247 for (j
= 0; j
< MAX_CPUS_PER_CLUSTER
; j
++)
248 mcpm_sync
.clusters
[i
].cpus
[j
].cpu
= CPU_DOWN
;
250 mpidr
= read_cpuid_mpidr();
251 this_cluster
= MPIDR_AFFINITY_LEVEL(mpidr
, 1);
252 for_each_online_cpu(i
)
253 mcpm_sync
.clusters
[this_cluster
].cpus
[i
].cpu
= CPU_UP
;
254 mcpm_sync
.clusters
[this_cluster
].cluster
= CLUSTER_UP
;
255 sync_cache_w(&mcpm_sync
);
257 if (power_up_setup
) {
258 mcpm_power_up_setup_phys
= virt_to_phys(power_up_setup
);
259 sync_cache_w(&mcpm_power_up_setup_phys
);