x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / arch / arm64 / kernel / smp_spin_table.c
blob93034651c87ea37517cc1db519ab4e097eee45d8
1 /*
2 * Spin Table SMP initialisation
4 * Copyright (C) 2013 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/delay.h>
20 #include <linux/init.h>
21 #include <linux/of.h>
22 #include <linux/smp.h>
23 #include <linux/types.h>
24 #include <linux/mm.h>
26 #include <asm/cacheflush.h>
27 #include <asm/cpu_ops.h>
28 #include <asm/cputype.h>
29 #include <asm/io.h>
30 #include <asm/smp_plat.h>
32 extern void secondary_holding_pen(void);
33 volatile unsigned long __section(".mmuoff.data.read")
34 secondary_holding_pen_release = INVALID_HWID;
36 static phys_addr_t cpu_release_addr[NR_CPUS];
39 * Write secondary_holding_pen_release in a way that is guaranteed to be
40 * visible to all observers, irrespective of whether they're taking part
41 * in coherency or not. This is necessary for the hotplug code to work
42 * reliably.
44 static void write_pen_release(u64 val)
46 void *start = (void *)&secondary_holding_pen_release;
47 unsigned long size = sizeof(secondary_holding_pen_release);
49 secondary_holding_pen_release = val;
50 __flush_dcache_area(start, size);
54 static int smp_spin_table_cpu_init(unsigned int cpu)
56 struct device_node *dn;
57 int ret;
59 dn = of_get_cpu_node(cpu, NULL);
60 if (!dn)
61 return -ENODEV;
64 * Determine the address from which the CPU is polling.
66 ret = of_property_read_u64(dn, "cpu-release-addr",
67 &cpu_release_addr[cpu]);
68 if (ret)
69 pr_err("CPU %d: missing or invalid cpu-release-addr property\n",
70 cpu);
72 of_node_put(dn);
74 return ret;
77 static int smp_spin_table_cpu_prepare(unsigned int cpu)
79 __le64 __iomem *release_addr;
81 if (!cpu_release_addr[cpu])
82 return -ENODEV;
85 * The cpu-release-addr may or may not be inside the linear mapping.
86 * As ioremap_cache will either give us a new mapping or reuse the
87 * existing linear mapping, we can use it to cover both cases. In
88 * either case the memory will be MT_NORMAL.
90 release_addr = ioremap_cache(cpu_release_addr[cpu],
91 sizeof(*release_addr));
92 if (!release_addr)
93 return -ENOMEM;
96 * We write the release address as LE regardless of the native
97 * endianess of the kernel. Therefore, any boot-loaders that
98 * read this address need to convert this address to the
99 * boot-loader's endianess before jumping. This is mandated by
100 * the boot protocol.
102 writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr);
103 __flush_dcache_area((__force void *)release_addr,
104 sizeof(*release_addr));
107 * Send an event to wake up the secondary CPU.
109 sev();
111 iounmap(release_addr);
113 return 0;
116 static int smp_spin_table_cpu_boot(unsigned int cpu)
119 * Update the pen release flag.
121 write_pen_release(cpu_logical_map(cpu));
124 * Send an event, causing the secondaries to read pen_release.
126 sev();
128 return 0;
131 const struct cpu_operations smp_spin_table_ops = {
132 .name = "spin-table",
133 .cpu_init = smp_spin_table_cpu_init,
134 .cpu_prepare = smp_spin_table_cpu_prepare,
135 .cpu_boot = smp_spin_table_cpu_boot,