2 * arch/arm/include/asm/mcpm.h
4 * Created by: Nicolas Pitre, April 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
16 * Maximum number of possible clusters / CPUs per cluster.
18 * This should be sufficient for quite a while, while keeping the
19 * (assembly) code simpler. When this starts to grow then we'll have
20 * to consider dynamic allocation.
22 #define MAX_CPUS_PER_CLUSTER 4
23 #define MAX_NR_CLUSTERS 2
27 #include <linux/types.h>
28 #include <asm/cacheflush.h>
31 * Platform specific code should use this symbol to set up secondary
32 * entry location for processors to use when released from reset.
34 extern void mcpm_entry_point(void);
37 * This is used to indicate where the given CPU from given cluster should
38 * branch once it is ready to re-enter the kernel using ptr, or NULL if it
39 * should be gated. A gated CPU is held in a WFE loop until its vector
42 void mcpm_set_entry_vector(unsigned cpu
, unsigned cluster
, void *ptr
);
45 * CPU/cluster power operations API for higher subsystems to use.
49 * mcpm_cpu_power_up - make given CPU in given cluster runable
51 * @cpu: CPU number within given cluster
52 * @cluster: cluster number for the CPU
54 * The identified CPU is brought out of reset. If the cluster was powered
55 * down then it is brought up as well, taking care not to let the other CPUs
56 * in the cluster run, and ensuring appropriate cluster setup.
58 * Caller must ensure the appropriate entry vector is initialized with
59 * mcpm_set_entry_vector() prior to calling this.
61 * This must be called in a sleepable context. However, the implementation
62 * is strongly encouraged to return early and let the operation happen
63 * asynchronously, especially when significant delays are expected.
65 * If the operation cannot be performed then an error code is returned.
67 int mcpm_cpu_power_up(unsigned int cpu
, unsigned int cluster
);
70 * mcpm_cpu_power_down - power the calling CPU down
72 * The calling CPU is powered down.
74 * If this CPU is found to be the "last man standing" in the cluster
75 * then the cluster is prepared for power-down too.
77 * This must be called with interrupts disabled.
79 * This does not return. Re-entry in the kernel is expected via
82 void mcpm_cpu_power_down(void);
85 * mcpm_cpu_suspend - bring the calling CPU in a suspended state
87 * @expected_residency: duration in microseconds the CPU is expected
88 * to remain suspended, or 0 if unknown/infinity.
90 * The calling CPU is suspended. The expected residency argument is used
91 * as a hint by the platform specific backend to implement the appropriate
92 * sleep state level according to the knowledge it has on wake-up latency
93 * for the given hardware.
95 * If this CPU is found to be the "last man standing" in the cluster
96 * then the cluster may be prepared for power-down too, if the expected
97 * residency makes it worthwhile.
99 * This must be called with interrupts disabled.
101 * This does not return. Re-entry in the kernel is expected via
104 void mcpm_cpu_suspend(u64 expected_residency
);
107 * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up
109 * This lets the platform specific backend code perform needed housekeeping
110 * work. This must be called by the newly activated CPU as soon as it is
111 * fully operational in kernel space, before it enables interrupts.
113 * If the operation cannot be performed then an error code is returned.
115 int mcpm_cpu_powered_up(void);
118 * Platform specific methods used in the implementation of the above API.
120 struct mcpm_platform_ops
{
121 int (*power_up
)(unsigned int cpu
, unsigned int cluster
);
122 void (*power_down
)(void);
123 void (*suspend
)(u64
);
124 void (*powered_up
)(void);
128 * mcpm_platform_register - register platform specific power methods
130 * @ops: mcpm_platform_ops structure to register
132 * An error is returned if the registration has been done previously.
134 int __init
mcpm_platform_register(const struct mcpm_platform_ops
*ops
);
136 /* Synchronisation structures for coordinating safe cluster setup/teardown: */
139 * When modifying this structure, make sure you update the MCPM_SYNC_ defines
142 struct mcpm_sync_struct
{
143 /* individual CPU states */
145 s8 cpu
__aligned(__CACHE_WRITEBACK_GRANULE
);
146 } cpus
[MAX_CPUS_PER_CLUSTER
];
149 s8 cluster
__aligned(__CACHE_WRITEBACK_GRANULE
);
151 /* inbound-side state */
152 s8 inbound
__aligned(__CACHE_WRITEBACK_GRANULE
);
156 struct mcpm_sync_struct clusters
[MAX_NR_CLUSTERS
];
159 extern unsigned long sync_phys
; /* physical address of *mcpm_sync */
161 void __mcpm_cpu_going_down(unsigned int cpu
, unsigned int cluster
);
162 void __mcpm_cpu_down(unsigned int cpu
, unsigned int cluster
);
163 void __mcpm_outbound_leave_critical(unsigned int cluster
, int state
);
164 bool __mcpm_outbound_enter_critical(unsigned int this_cpu
, unsigned int cluster
);
165 int __mcpm_cluster_state(unsigned int cluster
);
167 int __init
mcpm_sync_init(
168 void (*power_up_setup
)(unsigned int affinity_level
));
170 void __init
mcpm_smp_set_ops(void);
175 * asm-offsets.h causes trouble when included in .c files, and cacheflush.h
176 * cannot be included in asm files. Let's work around the conflict like this.
178 #include <asm/asm-offsets.h>
179 #define __CACHE_WRITEBACK_GRANULE CACHE_WRITEBACK_GRANULE
181 #endif /* ! __ASSEMBLY__ */
183 /* Definitions for mcpm_sync_struct */
184 #define CPU_DOWN 0x11
185 #define CPU_COMING_UP 0x12
187 #define CPU_GOING_DOWN 0x14
189 #define CLUSTER_DOWN 0x21
190 #define CLUSTER_UP 0x22
191 #define CLUSTER_GOING_DOWN 0x23
193 #define INBOUND_NOT_COMING_UP 0x31
194 #define INBOUND_COMING_UP 0x32
197 * Offsets for the mcpm_sync_struct members, for use in asm.
198 * We don't want to make them global to the kernel via asm-offsets.c.
200 #define MCPM_SYNC_CLUSTER_CPUS 0
201 #define MCPM_SYNC_CPU_SIZE __CACHE_WRITEBACK_GRANULE
202 #define MCPM_SYNC_CLUSTER_CLUSTER \
203 (MCPM_SYNC_CLUSTER_CPUS + MCPM_SYNC_CPU_SIZE * MAX_CPUS_PER_CLUSTER)
204 #define MCPM_SYNC_CLUSTER_INBOUND \
205 (MCPM_SYNC_CLUSTER_CLUSTER + __CACHE_WRITEBACK_GRANULE)
206 #define MCPM_SYNC_CLUSTER_SIZE \
207 (MCPM_SYNC_CLUSTER_INBOUND + __CACHE_WRITEBACK_GRANULE)