Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[linux/fpc-iii.git] / arch / arm / mach-zynq / platsmp.c
blobabc82ef085c1617011fa3174bfaea2b7d8cb6244
1 /*
2 * This file contains Xilinx specific SMP code, used to start up
3 * the second processor.
5 * Copyright (C) 2011-2013 Xilinx
7 * based on linux/arch/arm/mach-realview/platsmp.c
9 * Copyright (C) 2002 ARM Ltd.
11 * This software is licensed under the terms of the GNU General Public
12 * License version 2, as published by the Free Software Foundation, and
13 * may be copied, distributed, and modified under those terms.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
21 #include <linux/export.h>
22 #include <linux/jiffies.h>
23 #include <linux/init.h>
24 #include <linux/io.h>
25 #include <asm/cacheflush.h>
26 #include <asm/smp_scu.h>
27 #include <linux/irqchip/arm-gic.h>
28 #include "common.h"
31 * Store number of cores in the system
32 * Because of scu_get_core_count() must be in __init section and can't
33 * be called from zynq_cpun_start() because it is not in __init section.
35 static int ncores;
37 int zynq_cpun_start(u32 address, int cpu)
39 u32 trampoline_code_size = &zynq_secondary_trampoline_end -
40 &zynq_secondary_trampoline;
42 /* MS: Expectation that SLCR are directly map and accessible */
43 /* Not possible to jump to non aligned address */
44 if (!(address & 3) && (!address || (address >= trampoline_code_size))) {
45 /* Store pointer to ioremap area which points to address 0x0 */
46 static u8 __iomem *zero;
47 u32 trampoline_size = &zynq_secondary_trampoline_jump -
48 &zynq_secondary_trampoline;
50 zynq_slcr_cpu_stop(cpu);
51 if (address) {
52 if (__pa(PAGE_OFFSET)) {
53 zero = ioremap(0, trampoline_code_size);
54 if (!zero) {
55 pr_warn("BOOTUP jump vectors not accessible\n");
56 return -1;
58 } else {
59 zero = (__force u8 __iomem *)PAGE_OFFSET;
63 * This is elegant way how to jump to any address
64 * 0x0: Load address at 0x8 to r0
65 * 0x4: Jump by mov instruction
66 * 0x8: Jumping address
68 memcpy((__force void *)zero, &zynq_secondary_trampoline,
69 trampoline_size);
70 writel(address, zero + trampoline_size);
72 flush_cache_all();
73 outer_flush_range(0, trampoline_code_size);
74 smp_wmb();
76 if (__pa(PAGE_OFFSET))
77 iounmap(zero);
79 zynq_slcr_cpu_start(cpu);
81 return 0;
84 pr_warn("Can't start CPU%d: Wrong starting address %x\n", cpu, address);
86 return -1;
88 EXPORT_SYMBOL(zynq_cpun_start);
90 static int zynq_boot_secondary(unsigned int cpu,
91 struct task_struct *idle)
93 return zynq_cpun_start(virt_to_phys(zynq_secondary_startup), cpu);
97 * Initialise the CPU possible map early - this describes the CPUs
98 * which may be present or become present in the system.
100 static void __init zynq_smp_init_cpus(void)
102 int i;
104 ncores = scu_get_core_count(zynq_scu_base);
106 for (i = 0; i < ncores && i < CONFIG_NR_CPUS; i++)
107 set_cpu_possible(i, true);
110 static void __init zynq_smp_prepare_cpus(unsigned int max_cpus)
112 scu_enable(zynq_scu_base);
115 #ifdef CONFIG_HOTPLUG_CPU
116 static int zynq_cpu_kill(unsigned cpu)
118 zynq_slcr_cpu_stop(cpu);
119 return 1;
121 #endif
123 struct smp_operations zynq_smp_ops __initdata = {
124 .smp_init_cpus = zynq_smp_init_cpus,
125 .smp_prepare_cpus = zynq_smp_prepare_cpus,
126 .smp_boot_secondary = zynq_boot_secondary,
127 #ifdef CONFIG_HOTPLUG_CPU
128 .cpu_die = zynq_platform_cpu_die,
129 .cpu_kill = zynq_cpu_kill,
130 #endif