1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/cpumask.h>
8 DEFINE_STATIC_KEY_FALSE(apic_use_ipi_shorthand
);
11 static int apic_ipi_shorthand_off __ro_after_init
;
13 static __init
int apic_ipi_shorthand(char *str
)
15 get_option(&str
, &apic_ipi_shorthand_off
);
18 __setup("no_ipi_broadcast=", apic_ipi_shorthand
);
20 static int __init
print_ipi_mode(void)
22 pr_info("IPI shorthand broadcast: %s\n",
23 apic_ipi_shorthand_off
? "disabled" : "enabled");
26 late_initcall(print_ipi_mode
);
28 void apic_smt_update(void)
31 * Do not switch to broadcast mode if:
32 * - Disabled on the command line
33 * - Only a single CPU is online
34 * - Not all present CPUs have been at least booted once
36 * The latter is important as the local APIC might be in some
37 * random state and a broadcast might cause havoc. That's
38 * especially true for NMI broadcasting.
40 if (apic_ipi_shorthand_off
|| num_online_cpus() == 1 ||
41 !cpumask_equal(cpu_present_mask
, &cpus_booted_once_mask
)) {
42 static_branch_disable(&apic_use_ipi_shorthand
);
44 static_branch_enable(&apic_use_ipi_shorthand
);
48 void apic_send_IPI_allbutself(unsigned int vector
)
50 if (num_online_cpus() < 2)
53 if (static_branch_likely(&apic_use_ipi_shorthand
))
54 apic
->send_IPI_allbutself(vector
);
56 apic
->send_IPI_mask_allbutself(cpu_online_mask
, vector
);
60 * Send a 'reschedule' IPI to another CPU. It goes straight through and
61 * wastes no time serializing anything. Worst case is that we lose a
64 void native_smp_send_reschedule(int cpu
)
66 if (unlikely(cpu_is_offline(cpu
))) {
67 WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu
);
70 apic
->send_IPI(cpu
, RESCHEDULE_VECTOR
);
73 void native_send_call_func_single_ipi(int cpu
)
75 apic
->send_IPI(cpu
, CALL_FUNCTION_SINGLE_VECTOR
);
78 void native_send_call_func_ipi(const struct cpumask
*mask
)
80 if (static_branch_likely(&apic_use_ipi_shorthand
)) {
81 unsigned int cpu
= smp_processor_id();
83 if (!cpumask_or_equal(mask
, cpumask_of(cpu
), cpu_online_mask
))
86 if (cpumask_test_cpu(cpu
, mask
))
87 apic
->send_IPI_all(CALL_FUNCTION_VECTOR
);
88 else if (num_online_cpus() > 1)
89 apic
->send_IPI_allbutself(CALL_FUNCTION_VECTOR
);
94 apic
->send_IPI_mask(mask
, CALL_FUNCTION_VECTOR
);
97 #endif /* CONFIG_SMP */
99 static inline int __prepare_ICR2(unsigned int mask
)
101 return SET_APIC_DEST_FIELD(mask
);
104 static inline void __xapic_wait_icr_idle(void)
106 while (native_apic_mem_read(APIC_ICR
) & APIC_ICR_BUSY
)
110 void __default_send_IPI_shortcut(unsigned int shortcut
, int vector
)
113 * Subtle. In the case of the 'never do double writes' workaround
114 * we have to lock out interrupts to be safe. As we don't care
115 * of the value read we use an atomic rmw access to avoid costly
116 * cli/sti. Otherwise we use an even cheaper single atomic write
124 if (unlikely(vector
== NMI_VECTOR
))
125 safe_apic_wait_icr_idle();
127 __xapic_wait_icr_idle();
130 * No need to touch the target chip field. Also the destination
131 * mode is ignored when a shorthand is used.
133 cfg
= __prepare_ICR(shortcut
, vector
, 0);
136 * Send the IPI. The write to APIC_ICR fires this off.
138 native_apic_mem_write(APIC_ICR
, cfg
);
142 * This is used to send an IPI with no shorthand notation (the destination is
143 * specified in bits 56 to 63 of the ICR).
145 void __default_send_IPI_dest_field(unsigned int mask
, int vector
, unsigned int dest
)
152 if (unlikely(vector
== NMI_VECTOR
))
153 safe_apic_wait_icr_idle();
155 __xapic_wait_icr_idle();
158 * prepare target chip field
160 cfg
= __prepare_ICR2(mask
);
161 native_apic_mem_write(APIC_ICR2
, cfg
);
166 cfg
= __prepare_ICR(0, vector
, dest
);
169 * Send the IPI. The write to APIC_ICR fires this off.
171 native_apic_mem_write(APIC_ICR
, cfg
);
174 void default_send_IPI_single_phys(int cpu
, int vector
)
178 local_irq_save(flags
);
179 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid
, cpu
),
180 vector
, APIC_DEST_PHYSICAL
);
181 local_irq_restore(flags
);
184 void default_send_IPI_mask_sequence_phys(const struct cpumask
*mask
, int vector
)
186 unsigned long query_cpu
;
190 * Hack. The clustered APIC addressing mode doesn't allow us to send
191 * to an arbitrary mask, so I do a unicast to each CPU instead.
194 local_irq_save(flags
);
195 for_each_cpu(query_cpu
, mask
) {
196 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid
,
197 query_cpu
), vector
, APIC_DEST_PHYSICAL
);
199 local_irq_restore(flags
);
202 void default_send_IPI_mask_allbutself_phys(const struct cpumask
*mask
,
205 unsigned int this_cpu
= smp_processor_id();
206 unsigned int query_cpu
;
209 /* See Hack comment above */
211 local_irq_save(flags
);
212 for_each_cpu(query_cpu
, mask
) {
213 if (query_cpu
== this_cpu
)
215 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid
,
216 query_cpu
), vector
, APIC_DEST_PHYSICAL
);
218 local_irq_restore(flags
);
222 * Helper function for APICs which insist on cpumasks
224 void default_send_IPI_single(int cpu
, int vector
)
226 apic
->send_IPI_mask(cpumask_of(cpu
), vector
);
229 void default_send_IPI_allbutself(int vector
)
231 __default_send_IPI_shortcut(APIC_DEST_ALLBUT
, vector
);
234 void default_send_IPI_all(int vector
)
236 __default_send_IPI_shortcut(APIC_DEST_ALLINC
, vector
);
239 void default_send_IPI_self(int vector
)
241 __default_send_IPI_shortcut(APIC_DEST_SELF
, vector
);
246 void default_send_IPI_mask_sequence_logical(const struct cpumask
*mask
,
250 unsigned int query_cpu
;
253 * Hack. The clustered APIC addressing mode doesn't allow us to send
254 * to an arbitrary mask, so I do a unicasts to each CPU instead. This
255 * should be modified to do 1 message per cluster ID - mbligh
258 local_irq_save(flags
);
259 for_each_cpu(query_cpu
, mask
)
260 __default_send_IPI_dest_field(
261 early_per_cpu(x86_cpu_to_logical_apicid
, query_cpu
),
262 vector
, apic
->dest_logical
);
263 local_irq_restore(flags
);
266 void default_send_IPI_mask_allbutself_logical(const struct cpumask
*mask
,
270 unsigned int query_cpu
;
271 unsigned int this_cpu
= smp_processor_id();
273 /* See Hack comment above */
275 local_irq_save(flags
);
276 for_each_cpu(query_cpu
, mask
) {
277 if (query_cpu
== this_cpu
)
279 __default_send_IPI_dest_field(
280 early_per_cpu(x86_cpu_to_logical_apicid
, query_cpu
),
281 vector
, apic
->dest_logical
);
283 local_irq_restore(flags
);
287 * This is only used on smaller machines.
289 void default_send_IPI_mask_logical(const struct cpumask
*cpumask
, int vector
)
291 unsigned long mask
= cpumask_bits(cpumask
)[0];
297 local_irq_save(flags
);
298 WARN_ON(mask
& ~cpumask_bits(cpu_online_mask
)[0]);
299 __default_send_IPI_dest_field(mask
, vector
, apic
->dest_logical
);
300 local_irq_restore(flags
);
303 /* must come after the send_IPI functions above for inlining */
304 static int convert_apicid_to_cpu(int apic_id
)
308 for_each_possible_cpu(i
) {
309 if (per_cpu(x86_cpu_to_apicid
, i
) == apic_id
)
315 int safe_smp_processor_id(void)
319 if (!boot_cpu_has(X86_FEATURE_APIC
))
322 apicid
= hard_smp_processor_id();
323 if (apicid
== BAD_APICID
)
326 cpuid
= convert_apicid_to_cpu(apicid
);
328 return cpuid
>= 0 ? cpuid
: 0;