2 * Generic helpers for smp ipi calls
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
6 #include <linux/irq_work.h>
7 #include <linux/rcupdate.h>
8 #include <linux/rculist.h>
9 #include <linux/kernel.h>
10 #include <linux/export.h>
11 #include <linux/percpu.h>
12 #include <linux/init.h>
13 #include <linux/gfp.h>
14 #include <linux/smp.h>
15 #include <linux/cpu.h>
16 #include <linux/sched.h>
17 #include <linux/hypervisor.h>
23 CSD_FLAG_SYNCHRONOUS
= 0x02,
26 struct call_function_data
{
27 struct call_single_data __percpu
*csd
;
28 cpumask_var_t cpumask
;
31 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data
, cfd_data
);
33 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head
, call_single_queue
);
35 static void flush_smp_call_function_queue(bool warn_cpu_offline
);
37 int smpcfd_prepare_cpu(unsigned int cpu
)
39 struct call_function_data
*cfd
= &per_cpu(cfd_data
, cpu
);
41 if (!zalloc_cpumask_var_node(&cfd
->cpumask
, GFP_KERNEL
,
44 cfd
->csd
= alloc_percpu(struct call_single_data
);
46 free_cpumask_var(cfd
->cpumask
);
53 int smpcfd_dead_cpu(unsigned int cpu
)
55 struct call_function_data
*cfd
= &per_cpu(cfd_data
, cpu
);
57 free_cpumask_var(cfd
->cpumask
);
58 free_percpu(cfd
->csd
);
62 int smpcfd_dying_cpu(unsigned int cpu
)
65 * The IPIs for the smp-call-function callbacks queued by other
66 * CPUs might arrive late, either due to hardware latencies or
67 * because this CPU disabled interrupts (inside stop-machine)
68 * before the IPIs were sent. So flush out any pending callbacks
69 * explicitly (without waiting for the IPIs to arrive), to
70 * ensure that the outgoing CPU doesn't go offline with work
73 flush_smp_call_function_queue(false);
77 void __init
call_function_init(void)
81 for_each_possible_cpu(i
)
82 init_llist_head(&per_cpu(call_single_queue
, i
));
84 smpcfd_prepare_cpu(smp_processor_id());
88 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
90 * For non-synchronous ipi calls the csd can still be in use by the
91 * previous function call. For multi-cpu calls its even more interesting
92 * as we'll have to ensure no other cpu is observing our csd.
94 static __always_inline
void csd_lock_wait(struct call_single_data
*csd
)
96 smp_cond_load_acquire(&csd
->flags
, !(VAL
& CSD_FLAG_LOCK
));
99 static __always_inline
void csd_lock(struct call_single_data
*csd
)
102 csd
->flags
|= CSD_FLAG_LOCK
;
105 * prevent CPU from reordering the above assignment
106 * to ->flags with any subsequent assignments to other
107 * fields of the specified call_single_data structure:
112 static __always_inline
void csd_unlock(struct call_single_data
*csd
)
114 WARN_ON(!(csd
->flags
& CSD_FLAG_LOCK
));
117 * ensure we're all done before releasing data:
119 smp_store_release(&csd
->flags
, 0);
122 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data
, csd_data
);
125 * Insert a previously allocated call_single_data element
126 * for execution on the given CPU. data must already have
127 * ->func, ->info, and ->flags set.
129 static int generic_exec_single(int cpu
, struct call_single_data
*csd
,
130 smp_call_func_t func
, void *info
)
132 if (cpu
== smp_processor_id()) {
136 * We can unlock early even for the synchronous on-stack case,
137 * since we're doing this from the same CPU..
140 local_irq_save(flags
);
142 local_irq_restore(flags
);
147 if ((unsigned)cpu
>= nr_cpu_ids
|| !cpu_online(cpu
)) {
156 * The list addition should be visible before sending the IPI
157 * handler locks the list to pull the entry off it because of
158 * normal cache coherency rules implied by spinlocks.
160 * If IPIs can go out of order to the cache coherency protocol
161 * in an architecture, sufficient synchronisation should be added
162 * to arch code to make it appear to obey cache coherency WRT
163 * locking and barrier primitives. Generic code isn't really
164 * equipped to do the right thing...
166 if (llist_add(&csd
->llist
, &per_cpu(call_single_queue
, cpu
)))
167 arch_send_call_function_single_ipi(cpu
);
173 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
175 * Invoked by arch to handle an IPI for call function single.
176 * Must be called with interrupts disabled.
178 void generic_smp_call_function_single_interrupt(void)
180 flush_smp_call_function_queue(true);
184 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
186 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
187 * offline CPU. Skip this check if set to 'false'.
189 * Flush any pending smp-call-function callbacks queued on this CPU. This is
190 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
191 * to ensure that all pending IPI callbacks are run before it goes completely
194 * Loop through the call_single_queue and run all the queued callbacks.
195 * Must be called with interrupts disabled.
197 static void flush_smp_call_function_queue(bool warn_cpu_offline
)
199 struct llist_head
*head
;
200 struct llist_node
*entry
;
201 struct call_single_data
*csd
, *csd_next
;
204 WARN_ON(!irqs_disabled());
206 head
= this_cpu_ptr(&call_single_queue
);
207 entry
= llist_del_all(head
);
208 entry
= llist_reverse_order(entry
);
210 /* There shouldn't be any pending callbacks on an offline CPU. */
211 if (unlikely(warn_cpu_offline
&& !cpu_online(smp_processor_id()) &&
212 !warned
&& !llist_empty(head
))) {
214 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
217 * We don't have to use the _safe() variant here
218 * because we are not invoking the IPI handlers yet.
220 llist_for_each_entry(csd
, entry
, llist
)
221 pr_warn("IPI callback %pS sent to offline CPU\n",
225 llist_for_each_entry_safe(csd
, csd_next
, entry
, llist
) {
226 smp_call_func_t func
= csd
->func
;
227 void *info
= csd
->info
;
229 /* Do we wait until *after* callback? */
230 if (csd
->flags
& CSD_FLAG_SYNCHRONOUS
) {
240 * Handle irq works queued remotely by irq_work_queue_on().
241 * Smp functions above are typically synchronous so they
242 * better run first since some other CPUs may be busy waiting
249 * smp_call_function_single - Run a function on a specific CPU
250 * @func: The function to run. This must be fast and non-blocking.
251 * @info: An arbitrary pointer to pass to the function.
252 * @wait: If true, wait until function has completed on other CPUs.
254 * Returns 0 on success, else a negative status code.
256 int smp_call_function_single(int cpu
, smp_call_func_t func
, void *info
,
259 struct call_single_data
*csd
;
260 struct call_single_data csd_stack
= { .flags
= CSD_FLAG_LOCK
| CSD_FLAG_SYNCHRONOUS
};
265 * prevent preemption and reschedule on another processor,
266 * as well as CPU removal
268 this_cpu
= get_cpu();
271 * Can deadlock when called with interrupts disabled.
272 * We allow cpu's that are not yet online though, as no one else can
273 * send smp call function interrupt to this cpu and as such deadlocks
276 WARN_ON_ONCE(cpu_online(this_cpu
) && irqs_disabled()
277 && !oops_in_progress
);
281 csd
= this_cpu_ptr(&csd_data
);
285 err
= generic_exec_single(cpu
, csd
, func
, info
);
294 EXPORT_SYMBOL(smp_call_function_single
);
297 * smp_call_function_single_async(): Run an asynchronous function on a
299 * @cpu: The CPU to run on.
300 * @csd: Pre-allocated and setup data structure
302 * Like smp_call_function_single(), but the call is asynchonous and
303 * can thus be done from contexts with disabled interrupts.
305 * The caller passes his own pre-allocated data structure
306 * (ie: embedded in an object) and is responsible for synchronizing it
307 * such that the IPIs performed on the @csd are strictly serialized.
309 * NOTE: Be careful, there is unfortunately no current debugging facility to
310 * validate the correctness of this serialization.
312 int smp_call_function_single_async(int cpu
, struct call_single_data
*csd
)
318 /* We could deadlock if we have to wait here with interrupts disabled! */
319 if (WARN_ON_ONCE(csd
->flags
& CSD_FLAG_LOCK
))
322 csd
->flags
= CSD_FLAG_LOCK
;
325 err
= generic_exec_single(cpu
, csd
, csd
->func
, csd
->info
);
330 EXPORT_SYMBOL_GPL(smp_call_function_single_async
);
333 * smp_call_function_any - Run a function on any of the given cpus
334 * @mask: The mask of cpus it can run on.
335 * @func: The function to run. This must be fast and non-blocking.
336 * @info: An arbitrary pointer to pass to the function.
337 * @wait: If true, wait until function has completed.
339 * Returns 0 on success, else a negative status code (if no cpus were online).
341 * Selection preference:
342 * 1) current cpu if in @mask
343 * 2) any cpu of current node if in @mask
344 * 3) any other online cpu in @mask
346 int smp_call_function_any(const struct cpumask
*mask
,
347 smp_call_func_t func
, void *info
, int wait
)
350 const struct cpumask
*nodemask
;
353 /* Try for same CPU (cheapest) */
355 if (cpumask_test_cpu(cpu
, mask
))
358 /* Try for same node. */
359 nodemask
= cpumask_of_node(cpu_to_node(cpu
));
360 for (cpu
= cpumask_first_and(nodemask
, mask
); cpu
< nr_cpu_ids
;
361 cpu
= cpumask_next_and(cpu
, nodemask
, mask
)) {
366 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
367 cpu
= cpumask_any_and(mask
, cpu_online_mask
);
369 ret
= smp_call_function_single(cpu
, func
, info
, wait
);
373 EXPORT_SYMBOL_GPL(smp_call_function_any
);
376 * smp_call_function_many(): Run a function on a set of other CPUs.
377 * @mask: The set of cpus to run on (only runs on online subset).
378 * @func: The function to run. This must be fast and non-blocking.
379 * @info: An arbitrary pointer to pass to the function.
380 * @wait: If true, wait (atomically) until function has completed
383 * If @wait is true, then returns once @func has returned.
385 * You must not call this function with disabled interrupts or from a
386 * hardware interrupt handler or from a bottom half handler. Preemption
387 * must be disabled when calling this function.
389 void smp_call_function_many(const struct cpumask
*mask
,
390 smp_call_func_t func
, void *info
, bool wait
)
392 struct call_function_data
*cfd
;
393 int cpu
, next_cpu
, this_cpu
= smp_processor_id();
396 * Can deadlock when called with interrupts disabled.
397 * We allow cpu's that are not yet online though, as no one else can
398 * send smp call function interrupt to this cpu and as such deadlocks
401 WARN_ON_ONCE(cpu_online(this_cpu
) && irqs_disabled()
402 && !oops_in_progress
&& !early_boot_irqs_disabled
);
404 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
405 cpu
= cpumask_first_and(mask
, cpu_online_mask
);
407 cpu
= cpumask_next_and(cpu
, mask
, cpu_online_mask
);
409 /* No online cpus? We're done. */
410 if (cpu
>= nr_cpu_ids
)
413 /* Do we have another CPU which isn't us? */
414 next_cpu
= cpumask_next_and(cpu
, mask
, cpu_online_mask
);
415 if (next_cpu
== this_cpu
)
416 next_cpu
= cpumask_next_and(next_cpu
, mask
, cpu_online_mask
);
418 /* Fastpath: do that cpu by itself. */
419 if (next_cpu
>= nr_cpu_ids
) {
420 smp_call_function_single(cpu
, func
, info
, wait
);
424 cfd
= this_cpu_ptr(&cfd_data
);
426 cpumask_and(cfd
->cpumask
, mask
, cpu_online_mask
);
427 cpumask_clear_cpu(this_cpu
, cfd
->cpumask
);
429 /* Some callers race with other cpus changing the passed mask */
430 if (unlikely(!cpumask_weight(cfd
->cpumask
)))
433 for_each_cpu(cpu
, cfd
->cpumask
) {
434 struct call_single_data
*csd
= per_cpu_ptr(cfd
->csd
, cpu
);
438 csd
->flags
|= CSD_FLAG_SYNCHRONOUS
;
441 llist_add(&csd
->llist
, &per_cpu(call_single_queue
, cpu
));
444 /* Send a message to all CPUs in the map */
445 arch_send_call_function_ipi_mask(cfd
->cpumask
);
448 for_each_cpu(cpu
, cfd
->cpumask
) {
449 struct call_single_data
*csd
;
451 csd
= per_cpu_ptr(cfd
->csd
, cpu
);
456 EXPORT_SYMBOL(smp_call_function_many
);
459 * smp_call_function(): Run a function on all other CPUs.
460 * @func: The function to run. This must be fast and non-blocking.
461 * @info: An arbitrary pointer to pass to the function.
462 * @wait: If true, wait (atomically) until function has completed
467 * If @wait is true, then returns once @func has returned; otherwise
468 * it returns just before the target cpu calls @func.
470 * You must not call this function with disabled interrupts or from a
471 * hardware interrupt handler or from a bottom half handler.
473 int smp_call_function(smp_call_func_t func
, void *info
, int wait
)
476 smp_call_function_many(cpu_online_mask
, func
, info
, wait
);
481 EXPORT_SYMBOL(smp_call_function
);
483 /* Setup configured maximum number of CPUs to activate */
484 unsigned int setup_max_cpus
= NR_CPUS
;
485 EXPORT_SYMBOL(setup_max_cpus
);
489 * Setup routine for controlling SMP activation
491 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
492 * activation entirely (the MPS table probe still happens, though).
494 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
495 * greater than 0, limits the maximum number of CPUs activated in
499 void __weak
arch_disable_smp_support(void) { }
501 static int __init
nosmp(char *str
)
504 arch_disable_smp_support();
509 early_param("nosmp", nosmp
);
511 /* this is hard limit */
512 static int __init
nrcpus(char *str
)
516 get_option(&str
, &nr_cpus
);
517 if (nr_cpus
> 0 && nr_cpus
< nr_cpu_ids
)
518 nr_cpu_ids
= nr_cpus
;
523 early_param("nr_cpus", nrcpus
);
525 static int __init
maxcpus(char *str
)
527 get_option(&str
, &setup_max_cpus
);
528 if (setup_max_cpus
== 0)
529 arch_disable_smp_support();
534 early_param("maxcpus", maxcpus
);
536 /* Setup number of possible processor ids */
537 int nr_cpu_ids __read_mostly
= NR_CPUS
;
538 EXPORT_SYMBOL(nr_cpu_ids
);
540 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
541 void __init
setup_nr_cpu_ids(void)
543 nr_cpu_ids
= find_last_bit(cpumask_bits(cpu_possible_mask
),NR_CPUS
) + 1;
546 void __weak
smp_announce(void)
548 printk(KERN_INFO
"Brought up %d CPUs\n", num_online_cpus());
551 /* Called by boot processor to activate the rest. */
552 void __init
smp_init(void)
557 cpuhp_threads_init();
559 /* FIXME: This should be done in userspace --RR */
560 for_each_present_cpu(cpu
) {
561 if (num_online_cpus() >= setup_max_cpus
)
563 if (!cpu_online(cpu
))
567 /* Any cleanup work */
569 smp_cpus_done(setup_max_cpus
);
573 * Call a function on all processors. May be used during early boot while
574 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
575 * of local_irq_disable/enable().
577 int on_each_cpu(void (*func
) (void *info
), void *info
, int wait
)
583 ret
= smp_call_function(func
, info
, wait
);
584 local_irq_save(flags
);
586 local_irq_restore(flags
);
590 EXPORT_SYMBOL(on_each_cpu
);
593 * on_each_cpu_mask(): Run a function on processors specified by
594 * cpumask, which may include the local processor.
595 * @mask: The set of cpus to run on (only runs on online subset).
596 * @func: The function to run. This must be fast and non-blocking.
597 * @info: An arbitrary pointer to pass to the function.
598 * @wait: If true, wait (atomically) until function has completed
601 * If @wait is true, then returns once @func has returned.
603 * You must not call this function with disabled interrupts or from a
604 * hardware interrupt handler or from a bottom half handler. The
605 * exception is that it may be used during early boot while
606 * early_boot_irqs_disabled is set.
608 void on_each_cpu_mask(const struct cpumask
*mask
, smp_call_func_t func
,
609 void *info
, bool wait
)
613 smp_call_function_many(mask
, func
, info
, wait
);
614 if (cpumask_test_cpu(cpu
, mask
)) {
616 local_irq_save(flags
);
618 local_irq_restore(flags
);
622 EXPORT_SYMBOL(on_each_cpu_mask
);
625 * on_each_cpu_cond(): Call a function on each processor for which
626 * the supplied function cond_func returns true, optionally waiting
627 * for all the required CPUs to finish. This may include the local
629 * @cond_func: A callback function that is passed a cpu id and
630 * the the info parameter. The function is called
631 * with preemption disabled. The function should
632 * return a blooean value indicating whether to IPI
634 * @func: The function to run on all applicable CPUs.
635 * This must be fast and non-blocking.
636 * @info: An arbitrary pointer to pass to both functions.
637 * @wait: If true, wait (atomically) until function has
638 * completed on other CPUs.
639 * @gfp_flags: GFP flags to use when allocating the cpumask
640 * used internally by the function.
642 * The function might sleep if the GFP flags indicates a non
643 * atomic allocation is allowed.
645 * Preemption is disabled to protect against CPUs going offline but not online.
646 * CPUs going online during the call will not be seen or sent an IPI.
648 * You must not call this function with disabled interrupts or
649 * from a hardware interrupt handler or from a bottom half handler.
651 void on_each_cpu_cond(bool (*cond_func
)(int cpu
, void *info
),
652 smp_call_func_t func
, void *info
, bool wait
,
658 might_sleep_if(gfpflags_allow_blocking(gfp_flags
));
660 if (likely(zalloc_cpumask_var(&cpus
, (gfp_flags
|__GFP_NOWARN
)))) {
662 for_each_online_cpu(cpu
)
663 if (cond_func(cpu
, info
))
664 cpumask_set_cpu(cpu
, cpus
);
665 on_each_cpu_mask(cpus
, func
, info
, wait
);
667 free_cpumask_var(cpus
);
670 * No free cpumask, bother. No matter, we'll
671 * just have to IPI them one by one.
674 for_each_online_cpu(cpu
)
675 if (cond_func(cpu
, info
)) {
676 ret
= smp_call_function_single(cpu
, func
,
683 EXPORT_SYMBOL(on_each_cpu_cond
);
685 static void do_nothing(void *unused
)
690 * kick_all_cpus_sync - Force all cpus out of idle
692 * Used to synchronize the update of pm_idle function pointer. It's
693 * called after the pointer is updated and returns after the dummy
694 * callback function has been executed on all cpus. The execution of
695 * the function can only happen on the remote cpus after they have
696 * left the idle function which had been called via pm_idle function
697 * pointer. So it's guaranteed that nothing uses the previous pointer
700 void kick_all_cpus_sync(void)
702 /* Make sure the change is visible before we kick the cpus */
704 smp_call_function(do_nothing
, NULL
, 1);
706 EXPORT_SYMBOL_GPL(kick_all_cpus_sync
);
709 * wake_up_all_idle_cpus - break all cpus out of idle
710 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
711 * including idle polling cpus, for non-idle cpus, we will do nothing
714 void wake_up_all_idle_cpus(void)
719 for_each_online_cpu(cpu
) {
720 if (cpu
== smp_processor_id())
723 wake_up_if_idle(cpu
);
727 EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus
);
730 * smp_call_on_cpu - Call a function on a specific cpu
732 * Used to call a function on a specific cpu and wait for it to return.
733 * Optionally make sure the call is done on a specified physical cpu via vcpu
734 * pinning in order to support virtualized environments.
736 struct smp_call_on_cpu_struct
{
737 struct work_struct work
;
738 struct completion done
;
745 static void smp_call_on_cpu_callback(struct work_struct
*work
)
747 struct smp_call_on_cpu_struct
*sscs
;
749 sscs
= container_of(work
, struct smp_call_on_cpu_struct
, work
);
751 hypervisor_pin_vcpu(sscs
->cpu
);
752 sscs
->ret
= sscs
->func(sscs
->data
);
754 hypervisor_pin_vcpu(-1);
756 complete(&sscs
->done
);
759 int smp_call_on_cpu(unsigned int cpu
, int (*func
)(void *), void *par
, bool phys
)
761 struct smp_call_on_cpu_struct sscs
= {
762 .done
= COMPLETION_INITIALIZER_ONSTACK(sscs
.done
),
765 .cpu
= phys
? cpu
: -1,
768 INIT_WORK_ONSTACK(&sscs
.work
, smp_call_on_cpu_callback
);
770 if (cpu
>= nr_cpu_ids
|| !cpu_online(cpu
))
773 queue_work_on(cpu
, system_wq
, &sscs
.work
);
774 wait_for_completion(&sscs
.done
);
778 EXPORT_SYMBOL_GPL(smp_call_on_cpu
);