1 // SPDX-License-Identifier: GPL-2.0
3 * SMP support for SoCs with APMU
5 * Copyright (C) 2014 Renesas Electronics Corporation
6 * Copyright (C) 2013 Magnus Damm
8 #include <linux/cpu_pm.h>
9 #include <linux/delay.h>
10 #include <linux/init.h>
12 #include <linux/ioport.h>
14 #include <linux/of_address.h>
15 #include <linux/smp.h>
16 #include <linux/suspend.h>
17 #include <linux/threads.h>
18 #include <asm/cacheflush.h>
20 #include <asm/proc-fns.h>
21 #include <asm/smp_plat.h>
22 #include <asm/suspend.h>
24 #include "rcar-gen2.h"
31 #define WUPCR_OFFS 0x10 /* Wake Up Control Register */
32 #define PSTR_OFFS 0x40 /* Power Status Register */
33 #define CPUNCR_OFFS(n) (0x100 + (0x10 * (n)))
34 /* CPUn Power Status Control Register */
35 #define DBGRCR_OFFS 0x180 /* Debug Resource Reset Control Reg. */
37 /* Power Status Register */
38 #define CPUNST(r, n) (((r) >> (n * 4)) & 3) /* CPUn Status Bit */
39 #define CPUST_RUN 0 /* Run Mode */
40 #define CPUST_STANDBY 3 /* CoreStandby Mode */
42 /* Debug Resource Reset Control Register */
43 #define DBGCPUREN BIT(24) /* CPU Other Reset Request Enable */
44 #define DBGCPUNREN(n) BIT((n) + 20) /* CPUn Reset Request Enable */
45 #define DBGCPUPREN BIT(19) /* CPU Peripheral Reset Req. Enable */
47 static int __maybe_unused
apmu_power_on(void __iomem
*p
, int bit
)
49 /* request power on */
50 writel_relaxed(BIT(bit
), p
+ WUPCR_OFFS
);
52 /* wait for APMU to finish */
53 while (readl_relaxed(p
+ WUPCR_OFFS
) != 0)
59 static int __maybe_unused
apmu_power_off(void __iomem
*p
, int bit
)
61 /* request Core Standby for next WFI */
62 writel_relaxed(3, p
+ CPUNCR_OFFS(bit
));
66 static int __maybe_unused
apmu_power_off_poll(void __iomem
*p
, int bit
)
70 for (k
= 0; k
< 1000; k
++) {
71 if (CPUNST(readl_relaxed(p
+ PSTR_OFFS
), bit
) == CPUST_STANDBY
)
80 static int __maybe_unused
apmu_wrap(int cpu
, int (*fn
)(void __iomem
*p
, int cpu
))
82 void __iomem
*p
= apmu_cpus
[cpu
].iomem
;
84 return p
? fn(p
, apmu_cpus
[cpu
].bit
) : -EINVAL
;
87 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_SUSPEND)
88 /* nicked from arch/arm/mach-exynos/hotplug.c */
89 static inline void cpu_enter_lowpower_a15(void)
94 " mrc p15, 0, %0, c1, c0, 0\n"
96 " mcr p15, 0, %0, c1, c0, 0\n"
107 " mrc p15, 0, %0, c1, c0, 1\n"
109 " mcr p15, 0, %0, c1, c0, 1\n"
118 static void shmobile_smp_apmu_cpu_shutdown(unsigned int cpu
)
121 /* Select next sleep mode using the APMU */
122 apmu_wrap(cpu
, apmu_power_off
);
124 /* Do ARM specific CPU shutdown */
125 cpu_enter_lowpower_a15();
129 #if defined(CONFIG_HOTPLUG_CPU)
130 static void shmobile_smp_apmu_cpu_die(unsigned int cpu
)
132 /* For this particular CPU deregister boot vector */
133 shmobile_smp_hook(cpu
, 0, 0);
135 /* Shutdown CPU core */
136 shmobile_smp_apmu_cpu_shutdown(cpu
);
138 /* jump to shared mach-shmobile sleep / reset code */
139 shmobile_smp_sleep();
142 static int shmobile_smp_apmu_cpu_kill(unsigned int cpu
)
144 return apmu_wrap(cpu
, apmu_power_off_poll
);
148 #if defined(CONFIG_SUSPEND)
149 static int shmobile_smp_apmu_do_suspend(unsigned long cpu
)
151 shmobile_smp_hook(cpu
, __pa_symbol(cpu_resume
), 0);
152 shmobile_smp_apmu_cpu_shutdown(cpu
);
153 cpu_do_idle(); /* WFI selects Core Standby */
157 static inline void cpu_leave_lowpower(void)
161 asm volatile("mrc p15, 0, %0, c1, c0, 0\n"
163 " mcr p15, 0, %0, c1, c0, 0\n"
164 " mrc p15, 0, %0, c1, c0, 1\n"
166 " mcr p15, 0, %0, c1, c0, 1\n"
168 : "Ir" (CR_C
), "Ir" (0x40)
172 static int shmobile_smp_apmu_enter_suspend(suspend_state_t state
)
174 cpu_suspend(smp_processor_id(), shmobile_smp_apmu_do_suspend
);
175 cpu_leave_lowpower();
179 void __init
shmobile_smp_apmu_suspend_init(void)
181 shmobile_suspend_ops
.enter
= shmobile_smp_apmu_enter_suspend
;
186 static void apmu_init_cpu(struct resource
*res
, int cpu
, int bit
)
190 if ((cpu
>= ARRAY_SIZE(apmu_cpus
)) || apmu_cpus
[cpu
].iomem
)
193 apmu_cpus
[cpu
].iomem
= ioremap(res
->start
, resource_size(res
));
194 apmu_cpus
[cpu
].bit
= bit
;
196 pr_debug("apmu ioremap %d %d %pr\n", cpu
, bit
, res
);
198 /* Setup for debug mode */
199 x
= readl(apmu_cpus
[cpu
].iomem
+ DBGRCR_OFFS
);
200 x
|= DBGCPUREN
| DBGCPUNREN(bit
) | DBGCPUPREN
;
201 writel(x
, apmu_cpus
[cpu
].iomem
+ DBGRCR_OFFS
);
204 static const struct of_device_id apmu_ids
[] = {
205 { .compatible
= "renesas,apmu" },
209 static void apmu_parse_dt(void (*fn
)(struct resource
*res
, int cpu
, int bit
))
211 struct device_node
*np_apmu
, *np_cpu
;
215 for_each_matching_node(np_apmu
, apmu_ids
) {
216 /* only enable the cluster that includes the boot CPU */
217 bool is_allowed
= false;
219 for (bit
= 0; bit
< CONFIG_NR_CPUS
; bit
++) {
220 np_cpu
= of_parse_phandle(np_apmu
, "cpus", bit
);
223 if (of_cpu_node_to_id(np_cpu
) == 0) {
233 for (bit
= 0; bit
< CONFIG_NR_CPUS
; bit
++) {
234 np_cpu
= of_parse_phandle(np_apmu
, "cpus", bit
);
238 index
= of_cpu_node_to_id(np_cpu
);
240 !of_address_to_resource(np_apmu
, 0, &res
))
241 fn(&res
, index
, bit
);
248 static void __init
shmobile_smp_apmu_setup_boot(void)
250 /* install boot code shared by all CPUs */
251 shmobile_boot_fn
= __pa_symbol(shmobile_smp_boot
);
252 shmobile_boot_fn_gen2
= shmobile_boot_fn
;
255 static int shmobile_smp_apmu_boot_secondary(unsigned int cpu
,
256 struct task_struct
*idle
)
258 /* For this particular CPU register boot vector */
259 shmobile_smp_hook(cpu
, __pa_symbol(shmobile_boot_apmu
), 0);
261 return apmu_wrap(cpu
, apmu_power_on
);
264 static void __init
shmobile_smp_apmu_prepare_cpus_dt(unsigned int max_cpus
)
266 shmobile_smp_apmu_setup_boot();
267 apmu_parse_dt(apmu_init_cpu
);
271 static struct smp_operations apmu_smp_ops __initdata
= {
272 .smp_prepare_cpus
= shmobile_smp_apmu_prepare_cpus_dt
,
273 .smp_boot_secondary
= shmobile_smp_apmu_boot_secondary
,
274 #ifdef CONFIG_HOTPLUG_CPU
275 .cpu_can_disable
= shmobile_smp_cpu_can_disable
,
276 .cpu_die
= shmobile_smp_apmu_cpu_die
,
277 .cpu_kill
= shmobile_smp_apmu_cpu_kill
,
281 CPU_METHOD_OF_DECLARE(shmobile_smp_apmu
, "renesas,apmu", &apmu_smp_ops
);
282 #endif /* CONFIG_SMP */