spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / arch / x86 / kernel / apic / bigsmp_32.c
blob521bead01137f2921fc53473e7bdcebcd31ce61e
1 /*
2 * APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs.
4 * Drives the local APIC in "clustered mode".
5 */
6 #include <linux/threads.h>
7 #include <linux/cpumask.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/dmi.h>
11 #include <linux/smp.h>
13 #include <asm/apicdef.h>
14 #include <asm/fixmap.h>
15 #include <asm/mpspec.h>
16 #include <asm/apic.h>
17 #include <asm/ipi.h>
19 static unsigned bigsmp_get_apic_id(unsigned long x)
21 return (x >> 24) & 0xFF;
24 static int bigsmp_apic_id_registered(void)
26 return 1;
29 static const struct cpumask *bigsmp_target_cpus(void)
31 #ifdef CONFIG_SMP
32 return cpu_online_mask;
33 #else
34 return cpumask_of(0);
35 #endif
38 static unsigned long bigsmp_check_apicid_used(physid_mask_t *map, int apicid)
40 return 0;
43 static unsigned long bigsmp_check_apicid_present(int bit)
45 return 1;
48 static int bigsmp_early_logical_apicid(int cpu)
50 /* on bigsmp, logical apicid is the same as physical */
51 return early_per_cpu(x86_cpu_to_apicid, cpu);
54 static inline unsigned long calculate_ldr(int cpu)
56 unsigned long val, id;
58 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
59 id = per_cpu(x86_bios_cpu_apicid, cpu);
60 val |= SET_APIC_LOGICAL_ID(id);
62 return val;
66 * Set up the logical destination ID.
68 * Intel recommends to set DFR, LDR and TPR before enabling
69 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
70 * document number 292116). So here it goes...
72 static void bigsmp_init_apic_ldr(void)
74 unsigned long val;
75 int cpu = smp_processor_id();
77 apic_write(APIC_DFR, APIC_DFR_FLAT);
78 val = calculate_ldr(cpu);
79 apic_write(APIC_LDR, val);
82 static void bigsmp_setup_apic_routing(void)
84 printk(KERN_INFO
85 "Enabling APIC mode: Physflat. Using %d I/O APICs\n",
86 nr_ioapics);
89 static int bigsmp_cpu_present_to_apicid(int mps_cpu)
91 if (mps_cpu < nr_cpu_ids)
92 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
94 return BAD_APICID;
97 static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
99 /* For clustered we don't have a good way to do this yet - hack */
100 physids_promote(0xFFL, retmap);
103 static int bigsmp_check_phys_apicid_present(int phys_apicid)
105 return 1;
108 /* As we are using single CPU as destination, pick only one CPU here */
109 static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
111 int cpu = cpumask_first(cpumask);
113 if (cpu < nr_cpu_ids)
114 return cpu_physical_id(cpu);
115 return BAD_APICID;
118 static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
119 const struct cpumask *andmask)
121 int cpu;
124 * We're using fixed IRQ delivery, can only return one phys APIC ID.
125 * May as well be the first.
127 for_each_cpu_and(cpu, cpumask, andmask) {
128 if (cpumask_test_cpu(cpu, cpu_online_mask))
129 return cpu_physical_id(cpu);
131 return BAD_APICID;
134 static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
136 return cpuid_apic >> index_msb;
139 static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector)
141 default_send_IPI_mask_sequence_phys(mask, vector);
144 static void bigsmp_send_IPI_allbutself(int vector)
146 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
149 static void bigsmp_send_IPI_all(int vector)
151 bigsmp_send_IPI_mask(cpu_online_mask, vector);
154 static int dmi_bigsmp; /* can be set by dmi scanners */
156 static int hp_ht_bigsmp(const struct dmi_system_id *d)
158 printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
159 dmi_bigsmp = 1;
161 return 0;
165 static const struct dmi_system_id bigsmp_dmi_table[] = {
166 { hp_ht_bigsmp, "HP ProLiant DL760 G2",
167 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
168 DMI_MATCH(DMI_BIOS_VERSION, "P44-"),
172 { hp_ht_bigsmp, "HP ProLiant DL740",
173 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
174 DMI_MATCH(DMI_BIOS_VERSION, "P47-"),
177 { } /* NULL entry stops DMI scanning */
180 static void bigsmp_vector_allocation_domain(int cpu, struct cpumask *retmask)
182 cpumask_clear(retmask);
183 cpumask_set_cpu(cpu, retmask);
186 static int probe_bigsmp(void)
188 if (def_to_bigsmp)
189 dmi_bigsmp = 1;
190 else
191 dmi_check_system(bigsmp_dmi_table);
193 return dmi_bigsmp;
196 static struct apic apic_bigsmp = {
198 .name = "bigsmp",
199 .probe = probe_bigsmp,
200 .acpi_madt_oem_check = NULL,
201 .apic_id_registered = bigsmp_apic_id_registered,
203 .irq_delivery_mode = dest_Fixed,
204 /* phys delivery to target CPU: */
205 .irq_dest_mode = 0,
207 .target_cpus = bigsmp_target_cpus,
208 .disable_esr = 1,
209 .dest_logical = 0,
210 .check_apicid_used = bigsmp_check_apicid_used,
211 .check_apicid_present = bigsmp_check_apicid_present,
213 .vector_allocation_domain = bigsmp_vector_allocation_domain,
214 .init_apic_ldr = bigsmp_init_apic_ldr,
216 .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
217 .setup_apic_routing = bigsmp_setup_apic_routing,
218 .multi_timer_check = NULL,
219 .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid,
220 .apicid_to_cpu_present = physid_set_mask_of_physid,
221 .setup_portio_remap = NULL,
222 .check_phys_apicid_present = bigsmp_check_phys_apicid_present,
223 .enable_apic_mode = NULL,
224 .phys_pkg_id = bigsmp_phys_pkg_id,
225 .mps_oem_check = NULL,
227 .get_apic_id = bigsmp_get_apic_id,
228 .set_apic_id = NULL,
229 .apic_id_mask = 0xFF << 24,
231 .cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid,
232 .cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and,
234 .send_IPI_mask = bigsmp_send_IPI_mask,
235 .send_IPI_mask_allbutself = NULL,
236 .send_IPI_allbutself = bigsmp_send_IPI_allbutself,
237 .send_IPI_all = bigsmp_send_IPI_all,
238 .send_IPI_self = default_send_IPI_self,
240 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
241 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
243 .wait_for_init_deassert = default_wait_for_init_deassert,
245 .smp_callin_clear_local_apic = NULL,
246 .inquire_remote_apic = default_inquire_remote_apic,
248 .read = native_apic_mem_read,
249 .write = native_apic_mem_write,
250 .icr_read = native_apic_icr_read,
251 .icr_write = native_apic_icr_write,
252 .wait_icr_idle = native_apic_wait_icr_idle,
253 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
255 .x86_32_early_logical_apicid = bigsmp_early_logical_apicid,
258 void __init generic_bigsmp_probe(void)
260 unsigned int cpu;
262 if (!probe_bigsmp())
263 return;
265 apic = &apic_bigsmp;
267 for_each_possible_cpu(cpu) {
268 if (early_per_cpu(x86_cpu_to_logical_apicid,
269 cpu) == BAD_APICID)
270 continue;
271 early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
272 bigsmp_early_logical_apicid(cpu);
275 pr_info("Overriding APIC driver with %s\n", apic_bigsmp.name);
278 apic_driver(apic_bigsmp);