2 * Generic helpers for smp ipi calls
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
6 #include <linux/rcupdate.h>
7 #include <linux/rculist.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/percpu.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
13 #include <linux/cpu.h>
15 static DEFINE_PER_CPU(struct call_single_queue
, call_single_queue
);
18 struct list_head queue
;
20 } call_function __cacheline_aligned_in_smp
=
22 .queue
= LIST_HEAD_INIT(call_function
.queue
),
23 .lock
= RAW_SPIN_LOCK_UNLOCKED(call_function
.lock
),
30 struct call_function_data
{
31 struct call_single_data csd
;
34 cpumask_var_t cpumask
;
37 struct call_single_queue
{
38 struct list_head list
;
42 static DEFINE_PER_CPU(struct call_function_data
, cfd_data
) = {
43 .lock
= RAW_SPIN_LOCK_UNLOCKED(cfd_data
.lock
),
47 hotplug_cfd(struct notifier_block
*nfb
, unsigned long action
, void *hcpu
)
49 long cpu
= (long)hcpu
;
50 struct call_function_data
*cfd
= &per_cpu(cfd_data
, cpu
);
54 case CPU_UP_PREPARE_FROZEN
:
55 if (!alloc_cpumask_var_node(&cfd
->cpumask
, GFP_KERNEL
,
60 #ifdef CONFIG_CPU_HOTPLUG
62 case CPU_UP_CANCELED_FROZEN
:
66 free_cpumask_var(cfd
->cpumask
);
74 static struct notifier_block __cpuinitdata hotplug_cfd_notifier
= {
75 .notifier_call
= hotplug_cfd
,
78 static int __cpuinit
init_call_single_data(void)
80 void *cpu
= (void *)(long)smp_processor_id();
83 for_each_possible_cpu(i
) {
84 struct call_single_queue
*q
= &per_cpu(call_single_queue
, i
);
86 spin_lock_init(&q
->lock
);
87 INIT_LIST_HEAD(&q
->list
);
90 hotplug_cfd(&hotplug_cfd_notifier
, CPU_UP_PREPARE
, cpu
);
91 register_cpu_notifier(&hotplug_cfd_notifier
);
95 early_initcall(init_call_single_data
);
98 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
100 * For non-synchronous ipi calls the csd can still be in use by the
101 * previous function call. For multi-cpu calls its even more interesting
102 * as we'll have to ensure no other cpu is observing our csd.
104 static void csd_lock_wait(struct call_single_data
*data
)
106 while (data
->flags
& CSD_FLAG_LOCK
)
110 static void csd_lock(struct call_single_data
*data
)
113 data
->flags
= CSD_FLAG_LOCK
;
116 * prevent CPU from reordering the above assignment
117 * to ->flags with any subsequent assignments to other
118 * fields of the specified call_single_data structure:
123 static void csd_unlock(struct call_single_data
*data
)
125 WARN_ON(!(data
->flags
& CSD_FLAG_LOCK
));
128 * ensure we're all done before releasing data:
132 data
->flags
&= ~CSD_FLAG_LOCK
;
136 * Insert a previously allocated call_single_data element
137 * for execution on the given CPU. data must already have
138 * ->func, ->info, and ->flags set.
141 void generic_exec_single(int cpu
, struct call_single_data
*data
, int wait
)
143 struct call_single_queue
*dst
= &per_cpu(call_single_queue
, cpu
);
147 spin_lock_irqsave(&dst
->lock
, flags
);
148 ipi
= list_empty(&dst
->list
);
149 list_add_tail(&data
->list
, &dst
->list
);
150 spin_unlock_irqrestore(&dst
->lock
, flags
);
153 * The list addition should be visible before sending the IPI
154 * handler locks the list to pull the entry off it because of
155 * normal cache coherency rules implied by spinlocks.
157 * If IPIs can go out of order to the cache coherency protocol
158 * in an architecture, sufficient synchronisation should be added
159 * to arch code to make it appear to obey cache coherency WRT
160 * locking and barrier primitives. Generic code isn't really
161 * equipped to do the right thing...
164 arch_send_call_function_single_ipi(cpu
);
171 * Invoked by arch to handle an IPI for call function. Must be called with
172 * interrupts disabled.
174 void generic_smp_call_function_interrupt(void)
176 struct call_function_data
*data
;
180 * Ensure entry is visible on call_function_queue after we have
181 * entered the IPI. See comment in smp_call_function_many.
182 * If we don't have this, then we may miss an entry on the list
183 * and never get another IPI to process it.
188 * It's ok to use list_for_each_rcu() here even though we may
189 * delete 'pos', since list_del_rcu() doesn't clear ->next
191 list_for_each_entry_rcu(data
, &call_function
.queue
, csd
.list
) {
194 spin_lock(&data
->lock
);
195 if (!cpumask_test_cpu(cpu
, data
->cpumask
)) {
196 spin_unlock(&data
->lock
);
199 cpumask_clear_cpu(cpu
, data
->cpumask
);
200 spin_unlock(&data
->lock
);
202 data
->csd
.func(data
->csd
.info
);
204 spin_lock(&data
->lock
);
205 WARN_ON(data
->refs
== 0);
208 spin_lock(&call_function
.lock
);
209 list_del_rcu(&data
->csd
.list
);
210 spin_unlock(&call_function
.lock
);
212 spin_unlock(&data
->lock
);
217 csd_unlock(&data
->csd
);
224 * Invoked by arch to handle an IPI for call function single. Must be
225 * called from the arch with interrupts disabled.
227 void generic_smp_call_function_single_interrupt(void)
229 struct call_single_queue
*q
= &__get_cpu_var(call_single_queue
);
230 unsigned int data_flags
;
234 list_replace_init(&q
->list
, &list
);
235 spin_unlock(&q
->lock
);
237 while (!list_empty(&list
)) {
238 struct call_single_data
*data
;
240 data
= list_entry(list
.next
, struct call_single_data
, list
);
241 list_del(&data
->list
);
244 * 'data' can be invalid after this call if flags == 0
245 * (when called through generic_exec_single()),
246 * so save them away before making the call:
248 data_flags
= data
->flags
;
250 data
->func(data
->info
);
253 * Unlocked CSDs are valid through generic_exec_single():
255 if (data_flags
& CSD_FLAG_LOCK
)
260 static DEFINE_PER_CPU(struct call_single_data
, csd_data
);
263 * smp_call_function_single - Run a function on a specific CPU
264 * @func: The function to run. This must be fast and non-blocking.
265 * @info: An arbitrary pointer to pass to the function.
266 * @wait: If true, wait until function has completed on other CPUs.
268 * Returns 0 on success, else a negative status code. Note that @wait
269 * will be implicitly turned on in case of allocation failures, since
270 * we fall back to on-stack allocation.
272 int smp_call_function_single(int cpu
, void (*func
) (void *info
), void *info
,
275 struct call_single_data d
= {
283 * prevent preemption and reschedule on another processor,
284 * as well as CPU removal
286 this_cpu
= get_cpu();
288 /* Can deadlock when called with interrupts disabled */
289 WARN_ON_ONCE(irqs_disabled() && !oops_in_progress
);
291 if (cpu
== this_cpu
) {
292 local_irq_save(flags
);
294 local_irq_restore(flags
);
296 if ((unsigned)cpu
< nr_cpu_ids
&& cpu_online(cpu
)) {
297 struct call_single_data
*data
= &d
;
300 data
= &__get_cpu_var(csd_data
);
306 generic_exec_single(cpu
, data
, wait
);
308 err
= -ENXIO
; /* CPU not online */
316 EXPORT_SYMBOL(smp_call_function_single
);
319 * __smp_call_function_single(): Run a function on another CPU
320 * @cpu: The CPU to run on.
321 * @data: Pre-allocated and setup data structure
323 * Like smp_call_function_single(), but allow caller to pass in a
324 * pre-allocated data structure. Useful for embedding @data inside
325 * other structures, for instance.
327 void __smp_call_function_single(int cpu
, struct call_single_data
*data
,
332 /* Can deadlock when called with interrupts disabled */
333 WARN_ON_ONCE(wait
&& irqs_disabled() && !oops_in_progress
);
335 generic_exec_single(cpu
, data
, wait
);
338 /* Deprecated: shim for archs using old arch_send_call_function_ipi API. */
340 #ifndef arch_send_call_function_ipi_mask
341 # define arch_send_call_function_ipi_mask(maskp) \
342 arch_send_call_function_ipi(*(maskp))
346 * smp_call_function_many(): Run a function on a set of other CPUs.
347 * @mask: The set of cpus to run on (only runs on online subset).
348 * @func: The function to run. This must be fast and non-blocking.
349 * @info: An arbitrary pointer to pass to the function.
350 * @wait: If true, wait (atomically) until function has completed
353 * If @wait is true, then returns once @func has returned. Note that @wait
354 * will be implicitly turned on in case of allocation failures, since
355 * we fall back to on-stack allocation.
357 * You must not call this function with disabled interrupts or from a
358 * hardware interrupt handler or from a bottom half handler. Preemption
359 * must be disabled when calling this function.
361 void smp_call_function_many(const struct cpumask
*mask
,
362 void (*func
)(void *), void *info
, bool wait
)
364 struct call_function_data
*data
;
366 int cpu
, next_cpu
, this_cpu
= smp_processor_id();
368 /* Can deadlock when called with interrupts disabled */
369 WARN_ON_ONCE(irqs_disabled() && !oops_in_progress
);
371 /* So, what's a CPU they want? Ignoring this one. */
372 cpu
= cpumask_first_and(mask
, cpu_online_mask
);
374 cpu
= cpumask_next_and(cpu
, mask
, cpu_online_mask
);
376 /* No online cpus? We're done. */
377 if (cpu
>= nr_cpu_ids
)
380 /* Do we have another CPU which isn't us? */
381 next_cpu
= cpumask_next_and(cpu
, mask
, cpu_online_mask
);
382 if (next_cpu
== this_cpu
)
383 next_cpu
= cpumask_next_and(next_cpu
, mask
, cpu_online_mask
);
385 /* Fastpath: do that cpu by itself. */
386 if (next_cpu
>= nr_cpu_ids
) {
387 smp_call_function_single(cpu
, func
, info
, wait
);
391 data
= &__get_cpu_var(cfd_data
);
392 csd_lock(&data
->csd
);
394 spin_lock_irqsave(&data
->lock
, flags
);
395 data
->csd
.func
= func
;
396 data
->csd
.info
= info
;
397 cpumask_and(data
->cpumask
, mask
, cpu_online_mask
);
398 cpumask_clear_cpu(this_cpu
, data
->cpumask
);
399 data
->refs
= cpumask_weight(data
->cpumask
);
401 spin_lock(&call_function
.lock
);
403 * Place entry at the _HEAD_ of the list, so that any cpu still
404 * observing the entry in generic_smp_call_function_interrupt()
405 * will not miss any other list entries:
407 list_add_rcu(&data
->csd
.list
, &call_function
.queue
);
408 spin_unlock(&call_function
.lock
);
410 spin_unlock_irqrestore(&data
->lock
, flags
);
413 * Make the list addition visible before sending the ipi.
414 * (IPIs must obey or appear to obey normal Linux cache
415 * coherency rules -- see comment in generic_exec_single).
419 /* Send a message to all CPUs in the map */
420 arch_send_call_function_ipi_mask(data
->cpumask
);
422 /* Optionally wait for the CPUs to complete */
424 csd_lock_wait(&data
->csd
);
426 EXPORT_SYMBOL(smp_call_function_many
);
429 * smp_call_function(): Run a function on all other CPUs.
430 * @func: The function to run. This must be fast and non-blocking.
431 * @info: An arbitrary pointer to pass to the function.
432 * @wait: If true, wait (atomically) until function has completed
437 * If @wait is true, then returns once @func has returned; otherwise
438 * it returns just before the target cpu calls @func. In case of allocation
439 * failure, @wait will be implicitly turned on.
441 * You must not call this function with disabled interrupts or from a
442 * hardware interrupt handler or from a bottom half handler.
444 int smp_call_function(void (*func
)(void *), void *info
, int wait
)
447 smp_call_function_many(cpu_online_mask
, func
, info
, wait
);
452 EXPORT_SYMBOL(smp_call_function
);
454 void ipi_call_lock(void)
456 spin_lock(&call_function
.lock
);
459 void ipi_call_unlock(void)
461 spin_unlock(&call_function
.lock
);
464 void ipi_call_lock_irq(void)
466 spin_lock_irq(&call_function
.lock
);
469 void ipi_call_unlock_irq(void)
471 spin_unlock_irq(&call_function
.lock
);