2 * APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs.
4 * Drives the local APIC in "clustered mode".
6 #include <linux/threads.h>
7 #include <linux/cpumask.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/dmi.h>
11 #include <linux/smp.h>
13 #include <asm/apicdef.h>
14 #include <asm/fixmap.h>
15 #include <asm/mpspec.h>
19 static unsigned bigsmp_get_apic_id(unsigned long x
)
21 return (x
>> 24) & 0xFF;
24 static int bigsmp_apic_id_registered(void)
29 static const struct cpumask
*bigsmp_target_cpus(void)
32 return cpu_online_mask
;
38 static unsigned long bigsmp_check_apicid_used(physid_mask_t
*map
, int apicid
)
43 static unsigned long bigsmp_check_apicid_present(int bit
)
48 static inline unsigned long calculate_ldr(int cpu
)
50 unsigned long val
, id
;
52 val
= apic_read(APIC_LDR
) & ~APIC_LDR_MASK
;
53 id
= per_cpu(x86_bios_cpu_apicid
, cpu
);
54 val
|= SET_APIC_LOGICAL_ID(id
);
60 * Set up the logical destination ID.
62 * Intel recommends to set DFR, LDR and TPR before enabling
63 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
64 * document number 292116). So here it goes...
66 static void bigsmp_init_apic_ldr(void)
69 int cpu
= smp_processor_id();
71 apic_write(APIC_DFR
, APIC_DFR_FLAT
);
72 val
= calculate_ldr(cpu
);
73 apic_write(APIC_LDR
, val
);
76 static void bigsmp_setup_apic_routing(void)
79 "Enabling APIC mode: Physflat. Using %d I/O APICs\n",
83 static int bigsmp_apicid_to_node(int logical_apicid
)
85 return apicid_2_node
[hard_smp_processor_id()];
88 static int bigsmp_cpu_present_to_apicid(int mps_cpu
)
90 if (mps_cpu
< nr_cpu_ids
)
91 return (int) per_cpu(x86_bios_cpu_apicid
, mps_cpu
);
96 /* Mapping from cpu number to logical apicid */
97 static inline int bigsmp_cpu_to_logical_apicid(int cpu
)
99 if (cpu
>= nr_cpu_ids
)
101 return cpu_physical_id(cpu
);
104 static void bigsmp_ioapic_phys_id_map(physid_mask_t
*phys_map
, physid_mask_t
*retmap
)
106 /* For clustered we don't have a good way to do this yet - hack */
107 physids_promote(0xFFL
, retmap
);
110 static int bigsmp_check_phys_apicid_present(int phys_apicid
)
115 /* As we are using single CPU as destination, pick only one CPU here */
116 static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask
*cpumask
)
118 return bigsmp_cpu_to_logical_apicid(cpumask_first(cpumask
));
121 static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask
*cpumask
,
122 const struct cpumask
*andmask
)
127 * We're using fixed IRQ delivery, can only return one phys APIC ID.
128 * May as well be the first.
130 for_each_cpu_and(cpu
, cpumask
, andmask
) {
131 if (cpumask_test_cpu(cpu
, cpu_online_mask
))
134 return bigsmp_cpu_to_logical_apicid(cpu
);
137 static int bigsmp_phys_pkg_id(int cpuid_apic
, int index_msb
)
139 return cpuid_apic
>> index_msb
;
142 static inline void bigsmp_send_IPI_mask(const struct cpumask
*mask
, int vector
)
144 default_send_IPI_mask_sequence_phys(mask
, vector
);
147 static void bigsmp_send_IPI_allbutself(int vector
)
149 default_send_IPI_mask_allbutself_phys(cpu_online_mask
, vector
);
152 static void bigsmp_send_IPI_all(int vector
)
154 bigsmp_send_IPI_mask(cpu_online_mask
, vector
);
157 static int dmi_bigsmp
; /* can be set by dmi scanners */
159 static int hp_ht_bigsmp(const struct dmi_system_id
*d
)
161 printk(KERN_NOTICE
"%s detected: force use of apic=bigsmp\n", d
->ident
);
168 static const struct dmi_system_id bigsmp_dmi_table
[] = {
169 { hp_ht_bigsmp
, "HP ProLiant DL760 G2",
170 { DMI_MATCH(DMI_BIOS_VENDOR
, "HP"),
171 DMI_MATCH(DMI_BIOS_VERSION
, "P44-"),
175 { hp_ht_bigsmp
, "HP ProLiant DL740",
176 { DMI_MATCH(DMI_BIOS_VENDOR
, "HP"),
177 DMI_MATCH(DMI_BIOS_VERSION
, "P47-"),
180 { } /* NULL entry stops DMI scanning */
183 static void bigsmp_vector_allocation_domain(int cpu
, struct cpumask
*retmask
)
185 cpumask_clear(retmask
);
186 cpumask_set_cpu(cpu
, retmask
);
189 static int probe_bigsmp(void)
194 dmi_check_system(bigsmp_dmi_table
);
199 struct apic apic_bigsmp
= {
202 .probe
= probe_bigsmp
,
203 .acpi_madt_oem_check
= NULL
,
204 .apic_id_registered
= bigsmp_apic_id_registered
,
206 .irq_delivery_mode
= dest_Fixed
,
207 /* phys delivery to target CPU: */
210 .target_cpus
= bigsmp_target_cpus
,
213 .check_apicid_used
= bigsmp_check_apicid_used
,
214 .check_apicid_present
= bigsmp_check_apicid_present
,
216 .vector_allocation_domain
= bigsmp_vector_allocation_domain
,
217 .init_apic_ldr
= bigsmp_init_apic_ldr
,
219 .ioapic_phys_id_map
= bigsmp_ioapic_phys_id_map
,
220 .setup_apic_routing
= bigsmp_setup_apic_routing
,
221 .multi_timer_check
= NULL
,
222 .apicid_to_node
= bigsmp_apicid_to_node
,
223 .cpu_to_logical_apicid
= bigsmp_cpu_to_logical_apicid
,
224 .cpu_present_to_apicid
= bigsmp_cpu_present_to_apicid
,
225 .apicid_to_cpu_present
= physid_set_mask_of_physid
,
226 .setup_portio_remap
= NULL
,
227 .check_phys_apicid_present
= bigsmp_check_phys_apicid_present
,
228 .enable_apic_mode
= NULL
,
229 .phys_pkg_id
= bigsmp_phys_pkg_id
,
230 .mps_oem_check
= NULL
,
232 .get_apic_id
= bigsmp_get_apic_id
,
234 .apic_id_mask
= 0xFF << 24,
236 .cpu_mask_to_apicid
= bigsmp_cpu_mask_to_apicid
,
237 .cpu_mask_to_apicid_and
= bigsmp_cpu_mask_to_apicid_and
,
239 .send_IPI_mask
= bigsmp_send_IPI_mask
,
240 .send_IPI_mask_allbutself
= NULL
,
241 .send_IPI_allbutself
= bigsmp_send_IPI_allbutself
,
242 .send_IPI_all
= bigsmp_send_IPI_all
,
243 .send_IPI_self
= default_send_IPI_self
,
245 .trampoline_phys_low
= DEFAULT_TRAMPOLINE_PHYS_LOW
,
246 .trampoline_phys_high
= DEFAULT_TRAMPOLINE_PHYS_HIGH
,
248 .wait_for_init_deassert
= default_wait_for_init_deassert
,
250 .smp_callin_clear_local_apic
= NULL
,
251 .inquire_remote_apic
= default_inquire_remote_apic
,
253 .read
= native_apic_mem_read
,
254 .write
= native_apic_mem_write
,
255 .icr_read
= native_apic_icr_read
,
256 .icr_write
= native_apic_icr_write
,
257 .wait_icr_idle
= native_apic_wait_icr_idle
,
258 .safe_wait_icr_idle
= native_safe_apic_wait_icr_idle
,