1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2013 MundoReader S.L.
4 * Author: Heiko Stuebner <heiko@sntech.de>
7 #include <linux/delay.h>
8 #include <linux/init.h>
12 #include <linux/of_address.h>
13 #include <linux/regmap.h>
14 #include <linux/mfd/syscon.h>
16 #include <linux/reset.h>
17 #include <linux/cpu.h>
18 #include <asm/cacheflush.h>
20 #include <asm/smp_scu.h>
21 #include <asm/smp_plat.h>
22 #include <asm/mach/map.h>
26 static void __iomem
*scu_base_addr
;
27 static void __iomem
*sram_base_addr
;
30 #define PMU_PWRDN_CON 0x08
31 #define PMU_PWRDN_ST 0x0c
33 #define PMU_PWRDN_SCU 4
35 static struct regmap
*pmu
;
36 static int has_pmu
= true;
38 static int pmu_power_domain_is_on(int pd
)
43 ret
= regmap_read(pmu
, PMU_PWRDN_ST
, &val
);
47 return !(val
& BIT(pd
));
50 static struct reset_control
*rockchip_get_core_reset(int cpu
)
52 struct device
*dev
= get_cpu_device(cpu
);
53 struct device_node
*np
;
55 /* The cpu device is only available after the initial core bringup */
59 np
= of_get_cpu_node(cpu
, NULL
);
61 return of_reset_control_get_exclusive(np
, NULL
);
64 static int pmu_set_power_domain(int pd
, bool on
)
66 u32 val
= (on
) ? 0 : BIT(pd
);
67 struct reset_control
*rstc
= rockchip_get_core_reset(pd
);
70 if (IS_ERR(rstc
) && read_cpuid_part() != ARM_CPU_PART_CORTEX_A9
) {
71 pr_err("%s: could not get reset control for core %d\n",
77 * We need to soft reset the cpu when we turn off the cpu power domain,
78 * or else the active processors might be stalled when the individual
79 * processor is powered down.
81 if (!IS_ERR(rstc
) && !on
)
82 reset_control_assert(rstc
);
85 ret
= regmap_update_bits(pmu
, PMU_PWRDN_CON
, BIT(pd
), val
);
87 pr_err("%s: could not update power domain\n",
94 ret
= pmu_power_domain_is_on(pd
);
96 pr_err("%s: could not read power domain state\n",
105 reset_control_deassert(rstc
);
106 reset_control_put(rstc
);
113 * Handling of CPU cores
116 static int rockchip_boot_secondary(unsigned int cpu
, struct task_struct
*idle
)
120 if (!sram_base_addr
|| (has_pmu
&& !pmu
)) {
121 pr_err("%s: sram or pmu missing for cpu boot\n", __func__
);
126 pr_err("%s: cpu %d outside maximum number of cpus %d\n",
127 __func__
, cpu
, ncores
);
132 ret
= pmu_set_power_domain(0 + cpu
, true);
136 if (read_cpuid_part() != ARM_CPU_PART_CORTEX_A9
) {
138 * We communicate with the bootrom to active the cpus other
139 * than cpu0, after a blob of initialize code, they will
140 * stay at wfe state, once they are actived, they will check
142 * sram_base_addr + 4: 0xdeadbeaf
143 * sram_base_addr + 8: start address for pc
144 * The cpu0 need to wait the other cpus other than cpu0 entering
145 * the wfe state.The wait time is affected by many aspects.
146 * (e.g: cpu frequency, bootrom frequency, sram frequency, ...)
148 mdelay(1); /* ensure the cpus other than cpu0 to startup */
150 writel(__pa_symbol(secondary_startup
), sram_base_addr
+ 8);
151 writel(0xDEADBEAF, sram_base_addr
+ 4);
159 * rockchip_smp_prepare_sram - populate necessary sram block
160 * Starting cores execute the code residing at the start of the on-chip sram
161 * after power-on. Therefore make sure, this sram region is reserved and
162 * big enough. After this check, copy the trampoline code that directs the
163 * core to the real startup code in ram into the sram-region.
164 * @node: mmio-sram device node
166 static int __init
rockchip_smp_prepare_sram(struct device_node
*node
)
168 unsigned int trampoline_sz
= &rockchip_secondary_trampoline_end
-
169 &rockchip_secondary_trampoline
;
174 ret
= of_address_to_resource(node
, 0, &res
);
176 pr_err("%s: could not get address for node %pOF\n",
181 rsize
= resource_size(&res
);
182 if (rsize
< trampoline_sz
) {
183 pr_err("%s: reserved block with size 0x%x is too small for trampoline size 0x%x\n",
184 __func__
, rsize
, trampoline_sz
);
188 /* set the boot function for the sram code */
189 rockchip_boot_fn
= __pa_symbol(secondary_startup
);
191 /* copy the trampoline to sram, that runs during startup of the core */
192 memcpy(sram_base_addr
, &rockchip_secondary_trampoline
, trampoline_sz
);
194 outer_clean_range(0, trampoline_sz
);
201 static const struct regmap_config rockchip_pmu_regmap_config
= {
202 .name
= "rockchip-pmu",
208 static int __init
rockchip_smp_prepare_pmu(void)
210 struct device_node
*node
;
211 void __iomem
*pmu_base
;
214 * This function is only called via smp_ops->smp_prepare_cpu().
215 * That only happens if a "/cpus" device tree node exists
216 * and has an "enable-method" property that selects the SMP
217 * operations defined herein.
219 node
= of_find_node_by_path("/cpus");
221 pmu
= syscon_regmap_lookup_by_phandle(node
, "rockchip,pmu");
226 pmu
= syscon_regmap_lookup_by_compatible("rockchip,rk3066-pmu");
230 /* fallback, create our own regmap for the pmu area */
232 node
= of_find_compatible_node(NULL
, NULL
, "rockchip,rk3066-pmu");
234 pr_err("%s: could not find pmu dt node\n", __func__
);
238 pmu_base
= of_iomap(node
, 0);
241 pr_err("%s: could not map pmu registers\n", __func__
);
245 pmu
= regmap_init_mmio(NULL
, pmu_base
, &rockchip_pmu_regmap_config
);
247 int ret
= PTR_ERR(pmu
);
251 pr_err("%s: regmap init failed\n", __func__
);
258 static void __init
rockchip_smp_prepare_cpus(unsigned int max_cpus
)
260 struct device_node
*node
;
263 node
= of_find_compatible_node(NULL
, NULL
, "rockchip,rk3066-smp-sram");
265 pr_err("%s: could not find sram dt node\n", __func__
);
269 sram_base_addr
= of_iomap(node
, 0);
270 if (!sram_base_addr
) {
271 pr_err("%s: could not map sram registers\n", __func__
);
276 if (has_pmu
&& rockchip_smp_prepare_pmu()) {
281 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9
) {
282 if (rockchip_smp_prepare_sram(node
)) {
287 /* enable the SCU power domain */
288 pmu_set_power_domain(PMU_PWRDN_SCU
, true);
291 node
= of_find_compatible_node(NULL
, NULL
, "arm,cortex-a9-scu");
293 pr_err("%s: missing scu\n", __func__
);
297 scu_base_addr
= of_iomap(node
, 0);
298 if (!scu_base_addr
) {
299 pr_err("%s: could not map scu registers\n", __func__
);
305 * While the number of cpus is gathered from dt, also get the
306 * number of cores from the scu to verify this value when
309 ncores
= scu_get_core_count(scu_base_addr
);
310 pr_err("%s: ncores %d\n", __func__
, ncores
);
312 scu_enable(scu_base_addr
);
316 asm ("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr
));
317 ncores
= ((l2ctlr
>> 24) & 0x3) + 1;
321 /* Make sure that all cores except the first are really off */
322 for (i
= 1; i
< ncores
; i
++)
323 pmu_set_power_domain(0 + i
, false);
326 static void __init
rk3036_smp_prepare_cpus(unsigned int max_cpus
)
330 rockchip_smp_prepare_cpus(max_cpus
);
333 #ifdef CONFIG_HOTPLUG_CPU
334 static int rockchip_cpu_kill(unsigned int cpu
)
337 * We need a delay here to ensure that the dying CPU can finish
338 * executing v7_coherency_exit() and reach the WFI/WFE state
339 * prior to having the power domain disabled.
343 pmu_set_power_domain(0 + cpu
, false);
347 static void rockchip_cpu_die(unsigned int cpu
)
349 v7_exit_coherency_flush(louis
);
355 static const struct smp_operations rk3036_smp_ops __initconst
= {
356 .smp_prepare_cpus
= rk3036_smp_prepare_cpus
,
357 .smp_boot_secondary
= rockchip_boot_secondary
,
358 #ifdef CONFIG_HOTPLUG_CPU
359 .cpu_kill
= rockchip_cpu_kill
,
360 .cpu_die
= rockchip_cpu_die
,
364 static const struct smp_operations rockchip_smp_ops __initconst
= {
365 .smp_prepare_cpus
= rockchip_smp_prepare_cpus
,
366 .smp_boot_secondary
= rockchip_boot_secondary
,
367 #ifdef CONFIG_HOTPLUG_CPU
368 .cpu_kill
= rockchip_cpu_kill
,
369 .cpu_die
= rockchip_cpu_die
,
373 CPU_METHOD_OF_DECLARE(rk3036_smp
, "rockchip,rk3036-smp", &rk3036_smp_ops
);
374 CPU_METHOD_OF_DECLARE(rk3066_smp
, "rockchip,rk3066-smp", &rockchip_smp_ops
);