1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/cpumask.h>
3 #include <linux/interrupt.h>
6 #include <linux/delay.h>
7 #include <linux/spinlock.h>
8 #include <linux/kernel_stat.h>
9 #include <linux/mc146818rtc.h>
10 #include <linux/cache.h>
11 #include <linux/cpu.h>
15 #include <asm/tlbflush.h>
16 #include <asm/mmu_context.h>
18 #include <asm/proto.h>
21 void __default_send_IPI_shortcut(unsigned int shortcut
, int vector
, unsigned int dest
)
24 * Subtle. In the case of the 'never do double writes' workaround
25 * we have to lock out interrupts to be safe. As we don't care
26 * of the value read we use an atomic rmw access to avoid costly
27 * cli/sti. Otherwise we use an even cheaper single atomic write
35 __xapic_wait_icr_idle();
38 * No need to touch the target chip field
40 cfg
= __prepare_ICR(shortcut
, vector
, dest
);
43 * Send the IPI. The write to APIC_ICR fires this off.
45 native_apic_mem_write(APIC_ICR
, cfg
);
49 * This is used to send an IPI with no shorthand notation (the destination is
50 * specified in bits 56 to 63 of the ICR).
52 void __default_send_IPI_dest_field(unsigned int mask
, int vector
, unsigned int dest
)
59 if (unlikely(vector
== NMI_VECTOR
))
60 safe_apic_wait_icr_idle();
62 __xapic_wait_icr_idle();
65 * prepare target chip field
67 cfg
= __prepare_ICR2(mask
);
68 native_apic_mem_write(APIC_ICR2
, cfg
);
73 cfg
= __prepare_ICR(0, vector
, dest
);
76 * Send the IPI. The write to APIC_ICR fires this off.
78 native_apic_mem_write(APIC_ICR
, cfg
);
81 void default_send_IPI_single_phys(int cpu
, int vector
)
85 local_irq_save(flags
);
86 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid
, cpu
),
87 vector
, APIC_DEST_PHYSICAL
);
88 local_irq_restore(flags
);
91 void default_send_IPI_mask_sequence_phys(const struct cpumask
*mask
, int vector
)
93 unsigned long query_cpu
;
97 * Hack. The clustered APIC addressing mode doesn't allow us to send
98 * to an arbitrary mask, so I do a unicast to each CPU instead.
101 local_irq_save(flags
);
102 for_each_cpu(query_cpu
, mask
) {
103 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid
,
104 query_cpu
), vector
, APIC_DEST_PHYSICAL
);
106 local_irq_restore(flags
);
109 void default_send_IPI_mask_allbutself_phys(const struct cpumask
*mask
,
112 unsigned int this_cpu
= smp_processor_id();
113 unsigned int query_cpu
;
116 /* See Hack comment above */
118 local_irq_save(flags
);
119 for_each_cpu(query_cpu
, mask
) {
120 if (query_cpu
== this_cpu
)
122 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid
,
123 query_cpu
), vector
, APIC_DEST_PHYSICAL
);
125 local_irq_restore(flags
);
129 * Helper function for APICs which insist on cpumasks
131 void default_send_IPI_single(int cpu
, int vector
)
133 apic
->send_IPI_mask(cpumask_of(cpu
), vector
);
138 void default_send_IPI_mask_sequence_logical(const struct cpumask
*mask
,
142 unsigned int query_cpu
;
145 * Hack. The clustered APIC addressing mode doesn't allow us to send
146 * to an arbitrary mask, so I do a unicasts to each CPU instead. This
147 * should be modified to do 1 message per cluster ID - mbligh
150 local_irq_save(flags
);
151 for_each_cpu(query_cpu
, mask
)
152 __default_send_IPI_dest_field(
153 early_per_cpu(x86_cpu_to_logical_apicid
, query_cpu
),
154 vector
, apic
->dest_logical
);
155 local_irq_restore(flags
);
158 void default_send_IPI_mask_allbutself_logical(const struct cpumask
*mask
,
162 unsigned int query_cpu
;
163 unsigned int this_cpu
= smp_processor_id();
165 /* See Hack comment above */
167 local_irq_save(flags
);
168 for_each_cpu(query_cpu
, mask
) {
169 if (query_cpu
== this_cpu
)
171 __default_send_IPI_dest_field(
172 early_per_cpu(x86_cpu_to_logical_apicid
, query_cpu
),
173 vector
, apic
->dest_logical
);
175 local_irq_restore(flags
);
179 * This is only used on smaller machines.
181 void default_send_IPI_mask_logical(const struct cpumask
*cpumask
, int vector
)
183 unsigned long mask
= cpumask_bits(cpumask
)[0];
189 local_irq_save(flags
);
190 WARN_ON(mask
& ~cpumask_bits(cpu_online_mask
)[0]);
191 __default_send_IPI_dest_field(mask
, vector
, apic
->dest_logical
);
192 local_irq_restore(flags
);
195 void default_send_IPI_allbutself(int vector
)
198 * if there are no other CPUs in the system then we get an APIC send
199 * error if we try to broadcast, thus avoid sending IPIs in this case.
201 if (!(num_online_cpus() > 1))
204 __default_local_send_IPI_allbutself(vector
);
207 void default_send_IPI_all(int vector
)
209 __default_local_send_IPI_all(vector
);
212 void default_send_IPI_self(int vector
)
214 __default_send_IPI_shortcut(APIC_DEST_SELF
, vector
, apic
->dest_logical
);
217 /* must come after the send_IPI functions above for inlining */
218 static int convert_apicid_to_cpu(int apic_id
)
222 for_each_possible_cpu(i
) {
223 if (per_cpu(x86_cpu_to_apicid
, i
) == apic_id
)
229 int safe_smp_processor_id(void)
233 if (!boot_cpu_has(X86_FEATURE_APIC
))
236 apicid
= hard_smp_processor_id();
237 if (apicid
== BAD_APICID
)
240 cpuid
= convert_apicid_to_cpu(apicid
);
242 return cpuid
>= 0 ? cpuid
: 0;