Revert "tty: hvc: Fix data abort due to race in hvc_open"
[linux/fpc-iii.git] / arch / x86 / kernel / apic / x2apic_cluster.c
blobb0889c48a2ac5e02e2ff64c53b1b1f8e25cef7eb
1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/cpuhotplug.h>
4 #include <linux/cpumask.h>
5 #include <linux/slab.h>
6 #include <linux/mm.h>
8 #include <asm/apic.h>
10 #include "local.h"
12 struct cluster_mask {
13 unsigned int clusterid;
14 int node;
15 struct cpumask mask;
18 static DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
19 static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
20 static DEFINE_PER_CPU(struct cluster_mask *, cluster_masks);
21 static struct cluster_mask *cluster_hotplug_mask;
23 static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
25 return x2apic_enabled();
28 static void x2apic_send_IPI(int cpu, int vector)
30 u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
32 x2apic_wrmsr_fence();
33 __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
36 static void
37 __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
39 unsigned int cpu, clustercpu;
40 struct cpumask *tmpmsk;
41 unsigned long flags;
42 u32 dest;
44 x2apic_wrmsr_fence();
45 local_irq_save(flags);
47 tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
48 cpumask_copy(tmpmsk, mask);
49 /* If IPI should not be sent to self, clear current CPU */
50 if (apic_dest != APIC_DEST_ALLINC)
51 __cpumask_clear_cpu(smp_processor_id(), tmpmsk);
53 /* Collapse cpus in a cluster so a single IPI per cluster is sent */
54 for_each_cpu(cpu, tmpmsk) {
55 struct cluster_mask *cmsk = per_cpu(cluster_masks, cpu);
57 dest = 0;
58 for_each_cpu_and(clustercpu, tmpmsk, &cmsk->mask)
59 dest |= per_cpu(x86_cpu_to_logical_apicid, clustercpu);
61 if (!dest)
62 continue;
64 __x2apic_send_IPI_dest(dest, vector, apic->dest_logical);
65 /* Remove cluster CPUs from tmpmask */
66 cpumask_andnot(tmpmsk, tmpmsk, &cmsk->mask);
69 local_irq_restore(flags);
72 static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
74 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
77 static void
78 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
80 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
83 static void x2apic_send_IPI_allbutself(int vector)
85 __x2apic_send_IPI_shorthand(vector, APIC_DEST_ALLBUT);
88 static void x2apic_send_IPI_all(int vector)
90 __x2apic_send_IPI_shorthand(vector, APIC_DEST_ALLINC);
93 static u32 x2apic_calc_apicid(unsigned int cpu)
95 return per_cpu(x86_cpu_to_logical_apicid, cpu);
98 static void init_x2apic_ldr(void)
100 struct cluster_mask *cmsk = this_cpu_read(cluster_masks);
101 u32 cluster, apicid = apic_read(APIC_LDR);
102 unsigned int cpu;
104 this_cpu_write(x86_cpu_to_logical_apicid, apicid);
106 if (cmsk)
107 goto update;
109 cluster = apicid >> 16;
110 for_each_online_cpu(cpu) {
111 cmsk = per_cpu(cluster_masks, cpu);
112 /* Matching cluster found. Link and update it. */
113 if (cmsk && cmsk->clusterid == cluster)
114 goto update;
116 cmsk = cluster_hotplug_mask;
117 cmsk->clusterid = cluster;
118 cluster_hotplug_mask = NULL;
119 update:
120 this_cpu_write(cluster_masks, cmsk);
121 cpumask_set_cpu(smp_processor_id(), &cmsk->mask);
124 static int alloc_clustermask(unsigned int cpu, int node)
126 if (per_cpu(cluster_masks, cpu))
127 return 0;
129 * If a hotplug spare mask exists, check whether it's on the right
130 * node. If not, free it and allocate a new one.
132 if (cluster_hotplug_mask) {
133 if (cluster_hotplug_mask->node == node)
134 return 0;
135 kfree(cluster_hotplug_mask);
138 cluster_hotplug_mask = kzalloc_node(sizeof(*cluster_hotplug_mask),
139 GFP_KERNEL, node);
140 if (!cluster_hotplug_mask)
141 return -ENOMEM;
142 cluster_hotplug_mask->node = node;
143 return 0;
146 static int x2apic_prepare_cpu(unsigned int cpu)
148 if (alloc_clustermask(cpu, cpu_to_node(cpu)) < 0)
149 return -ENOMEM;
150 if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL))
151 return -ENOMEM;
152 return 0;
155 static int x2apic_dead_cpu(unsigned int dead_cpu)
157 struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
159 if (cmsk)
160 cpumask_clear_cpu(dead_cpu, &cmsk->mask);
161 free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
162 return 0;
165 static int x2apic_cluster_probe(void)
167 if (!x2apic_mode)
168 return 0;
170 if (cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "x86/x2apic:prepare",
171 x2apic_prepare_cpu, x2apic_dead_cpu) < 0) {
172 pr_err("Failed to register X2APIC_PREPARE\n");
173 return 0;
175 init_x2apic_ldr();
176 return 1;
179 static struct apic apic_x2apic_cluster __ro_after_init = {
181 .name = "cluster x2apic",
182 .probe = x2apic_cluster_probe,
183 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
184 .apic_id_valid = x2apic_apic_id_valid,
185 .apic_id_registered = x2apic_apic_id_registered,
187 .irq_delivery_mode = dest_Fixed,
188 .irq_dest_mode = 1, /* logical */
190 .disable_esr = 0,
191 .dest_logical = APIC_DEST_LOGICAL,
192 .check_apicid_used = NULL,
194 .init_apic_ldr = init_x2apic_ldr,
196 .ioapic_phys_id_map = NULL,
197 .setup_apic_routing = NULL,
198 .cpu_present_to_apicid = default_cpu_present_to_apicid,
199 .apicid_to_cpu_present = NULL,
200 .check_phys_apicid_present = default_check_phys_apicid_present,
201 .phys_pkg_id = x2apic_phys_pkg_id,
203 .get_apic_id = x2apic_get_apic_id,
204 .set_apic_id = x2apic_set_apic_id,
206 .calc_dest_apicid = x2apic_calc_apicid,
208 .send_IPI = x2apic_send_IPI,
209 .send_IPI_mask = x2apic_send_IPI_mask,
210 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
211 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
212 .send_IPI_all = x2apic_send_IPI_all,
213 .send_IPI_self = x2apic_send_IPI_self,
215 .inquire_remote_apic = NULL,
217 .read = native_apic_msr_read,
218 .write = native_apic_msr_write,
219 .eoi_write = native_apic_msr_eoi_write,
220 .icr_read = native_x2apic_icr_read,
221 .icr_write = native_x2apic_icr_write,
222 .wait_icr_idle = native_x2apic_wait_icr_idle,
223 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
226 apic_driver(apic_x2apic_cluster);