Linux 5.8-rc4
[linux/fpc-iii.git] / kernel / smp.c
blobaa17eedff5bee9159382759f8927ac6d50195510
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Generic helpers for smp ipi calls
5 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
6 */
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/irq_work.h>
11 #include <linux/rcupdate.h>
12 #include <linux/rculist.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/percpu.h>
16 #include <linux/init.h>
17 #include <linux/gfp.h>
18 #include <linux/smp.h>
19 #include <linux/cpu.h>
20 #include <linux/sched.h>
21 #include <linux/sched/idle.h>
22 #include <linux/hypervisor.h>
24 #include "smpboot.h"
25 #include "sched/smp.h"
27 #define CSD_TYPE(_csd) ((_csd)->flags & CSD_FLAG_TYPE_MASK)
29 struct call_function_data {
30 call_single_data_t __percpu *csd;
31 cpumask_var_t cpumask;
32 cpumask_var_t cpumask_ipi;
35 static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
37 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
39 static void flush_smp_call_function_queue(bool warn_cpu_offline);
41 int smpcfd_prepare_cpu(unsigned int cpu)
43 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
45 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
46 cpu_to_node(cpu)))
47 return -ENOMEM;
48 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
49 cpu_to_node(cpu))) {
50 free_cpumask_var(cfd->cpumask);
51 return -ENOMEM;
53 cfd->csd = alloc_percpu(call_single_data_t);
54 if (!cfd->csd) {
55 free_cpumask_var(cfd->cpumask);
56 free_cpumask_var(cfd->cpumask_ipi);
57 return -ENOMEM;
60 return 0;
63 int smpcfd_dead_cpu(unsigned int cpu)
65 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
67 free_cpumask_var(cfd->cpumask);
68 free_cpumask_var(cfd->cpumask_ipi);
69 free_percpu(cfd->csd);
70 return 0;
73 int smpcfd_dying_cpu(unsigned int cpu)
76 * The IPIs for the smp-call-function callbacks queued by other
77 * CPUs might arrive late, either due to hardware latencies or
78 * because this CPU disabled interrupts (inside stop-machine)
79 * before the IPIs were sent. So flush out any pending callbacks
80 * explicitly (without waiting for the IPIs to arrive), to
81 * ensure that the outgoing CPU doesn't go offline with work
82 * still pending.
84 flush_smp_call_function_queue(false);
85 irq_work_run();
86 return 0;
89 void __init call_function_init(void)
91 int i;
93 for_each_possible_cpu(i)
94 init_llist_head(&per_cpu(call_single_queue, i));
96 smpcfd_prepare_cpu(smp_processor_id());
100 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
102 * For non-synchronous ipi calls the csd can still be in use by the
103 * previous function call. For multi-cpu calls its even more interesting
104 * as we'll have to ensure no other cpu is observing our csd.
106 static __always_inline void csd_lock_wait(call_single_data_t *csd)
108 smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
111 static __always_inline void csd_lock(call_single_data_t *csd)
113 csd_lock_wait(csd);
114 csd->flags |= CSD_FLAG_LOCK;
117 * prevent CPU from reordering the above assignment
118 * to ->flags with any subsequent assignments to other
119 * fields of the specified call_single_data_t structure:
121 smp_wmb();
124 static __always_inline void csd_unlock(call_single_data_t *csd)
126 WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
129 * ensure we're all done before releasing data:
131 smp_store_release(&csd->flags, 0);
134 static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
136 void __smp_call_single_queue(int cpu, struct llist_node *node)
139 * The list addition should be visible before sending the IPI
140 * handler locks the list to pull the entry off it because of
141 * normal cache coherency rules implied by spinlocks.
143 * If IPIs can go out of order to the cache coherency protocol
144 * in an architecture, sufficient synchronisation should be added
145 * to arch code to make it appear to obey cache coherency WRT
146 * locking and barrier primitives. Generic code isn't really
147 * equipped to do the right thing...
149 if (llist_add(node, &per_cpu(call_single_queue, cpu)))
150 send_call_function_single_ipi(cpu);
154 * Insert a previously allocated call_single_data_t element
155 * for execution on the given CPU. data must already have
156 * ->func, ->info, and ->flags set.
158 static int generic_exec_single(int cpu, call_single_data_t *csd)
160 if (cpu == smp_processor_id()) {
161 smp_call_func_t func = csd->func;
162 void *info = csd->info;
163 unsigned long flags;
166 * We can unlock early even for the synchronous on-stack case,
167 * since we're doing this from the same CPU..
169 csd_unlock(csd);
170 local_irq_save(flags);
171 func(info);
172 local_irq_restore(flags);
173 return 0;
176 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
177 csd_unlock(csd);
178 return -ENXIO;
181 __smp_call_single_queue(cpu, &csd->llist);
183 return 0;
187 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
189 * Invoked by arch to handle an IPI for call function single.
190 * Must be called with interrupts disabled.
192 void generic_smp_call_function_single_interrupt(void)
194 flush_smp_call_function_queue(true);
198 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
200 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
201 * offline CPU. Skip this check if set to 'false'.
203 * Flush any pending smp-call-function callbacks queued on this CPU. This is
204 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
205 * to ensure that all pending IPI callbacks are run before it goes completely
206 * offline.
208 * Loop through the call_single_queue and run all the queued callbacks.
209 * Must be called with interrupts disabled.
211 static void flush_smp_call_function_queue(bool warn_cpu_offline)
213 call_single_data_t *csd, *csd_next;
214 struct llist_node *entry, *prev;
215 struct llist_head *head;
216 static bool warned;
218 lockdep_assert_irqs_disabled();
220 head = this_cpu_ptr(&call_single_queue);
221 entry = llist_del_all(head);
222 entry = llist_reverse_order(entry);
224 /* There shouldn't be any pending callbacks on an offline CPU. */
225 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
226 !warned && !llist_empty(head))) {
227 warned = true;
228 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
231 * We don't have to use the _safe() variant here
232 * because we are not invoking the IPI handlers yet.
234 llist_for_each_entry(csd, entry, llist) {
235 switch (CSD_TYPE(csd)) {
236 case CSD_TYPE_ASYNC:
237 case CSD_TYPE_SYNC:
238 case CSD_TYPE_IRQ_WORK:
239 pr_warn("IPI callback %pS sent to offline CPU\n",
240 csd->func);
241 break;
243 case CSD_TYPE_TTWU:
244 pr_warn("IPI task-wakeup sent to offline CPU\n");
245 break;
247 default:
248 pr_warn("IPI callback, unknown type %d, sent to offline CPU\n",
249 CSD_TYPE(csd));
250 break;
256 * First; run all SYNC callbacks, people are waiting for us.
258 prev = NULL;
259 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
260 /* Do we wait until *after* callback? */
261 if (CSD_TYPE(csd) == CSD_TYPE_SYNC) {
262 smp_call_func_t func = csd->func;
263 void *info = csd->info;
265 if (prev) {
266 prev->next = &csd_next->llist;
267 } else {
268 entry = &csd_next->llist;
271 func(info);
272 csd_unlock(csd);
273 } else {
274 prev = &csd->llist;
278 if (!entry)
279 return;
282 * Second; run all !SYNC callbacks.
284 prev = NULL;
285 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
286 int type = CSD_TYPE(csd);
288 if (type != CSD_TYPE_TTWU) {
289 if (prev) {
290 prev->next = &csd_next->llist;
291 } else {
292 entry = &csd_next->llist;
295 if (type == CSD_TYPE_ASYNC) {
296 smp_call_func_t func = csd->func;
297 void *info = csd->info;
299 csd_unlock(csd);
300 func(info);
301 } else if (type == CSD_TYPE_IRQ_WORK) {
302 irq_work_single(csd);
305 } else {
306 prev = &csd->llist;
311 * Third; only CSD_TYPE_TTWU is left, issue those.
313 if (entry)
314 sched_ttwu_pending(entry);
317 void flush_smp_call_function_from_idle(void)
319 unsigned long flags;
321 if (llist_empty(this_cpu_ptr(&call_single_queue)))
322 return;
324 local_irq_save(flags);
325 flush_smp_call_function_queue(true);
326 local_irq_restore(flags);
330 * smp_call_function_single - Run a function on a specific CPU
331 * @func: The function to run. This must be fast and non-blocking.
332 * @info: An arbitrary pointer to pass to the function.
333 * @wait: If true, wait until function has completed on other CPUs.
335 * Returns 0 on success, else a negative status code.
337 int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
338 int wait)
340 call_single_data_t *csd;
341 call_single_data_t csd_stack = {
342 .flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC,
344 int this_cpu;
345 int err;
348 * prevent preemption and reschedule on another processor,
349 * as well as CPU removal
351 this_cpu = get_cpu();
354 * Can deadlock when called with interrupts disabled.
355 * We allow cpu's that are not yet online though, as no one else can
356 * send smp call function interrupt to this cpu and as such deadlocks
357 * can't happen.
359 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
360 && !oops_in_progress);
363 * When @wait we can deadlock when we interrupt between llist_add() and
364 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
365 * csd_lock() on because the interrupt context uses the same csd
366 * storage.
368 WARN_ON_ONCE(!in_task());
370 csd = &csd_stack;
371 if (!wait) {
372 csd = this_cpu_ptr(&csd_data);
373 csd_lock(csd);
376 csd->func = func;
377 csd->info = info;
379 err = generic_exec_single(cpu, csd);
381 if (wait)
382 csd_lock_wait(csd);
384 put_cpu();
386 return err;
388 EXPORT_SYMBOL(smp_call_function_single);
391 * smp_call_function_single_async(): Run an asynchronous function on a
392 * specific CPU.
393 * @cpu: The CPU to run on.
394 * @csd: Pre-allocated and setup data structure
396 * Like smp_call_function_single(), but the call is asynchonous and
397 * can thus be done from contexts with disabled interrupts.
399 * The caller passes his own pre-allocated data structure
400 * (ie: embedded in an object) and is responsible for synchronizing it
401 * such that the IPIs performed on the @csd are strictly serialized.
403 * If the function is called with one csd which has not yet been
404 * processed by previous call to smp_call_function_single_async(), the
405 * function will return immediately with -EBUSY showing that the csd
406 * object is still in progress.
408 * NOTE: Be careful, there is unfortunately no current debugging facility to
409 * validate the correctness of this serialization.
411 int smp_call_function_single_async(int cpu, call_single_data_t *csd)
413 int err = 0;
415 preempt_disable();
417 if (csd->flags & CSD_FLAG_LOCK) {
418 err = -EBUSY;
419 goto out;
422 csd->flags = CSD_FLAG_LOCK;
423 smp_wmb();
425 err = generic_exec_single(cpu, csd);
427 out:
428 preempt_enable();
430 return err;
432 EXPORT_SYMBOL_GPL(smp_call_function_single_async);
435 * smp_call_function_any - Run a function on any of the given cpus
436 * @mask: The mask of cpus it can run on.
437 * @func: The function to run. This must be fast and non-blocking.
438 * @info: An arbitrary pointer to pass to the function.
439 * @wait: If true, wait until function has completed.
441 * Returns 0 on success, else a negative status code (if no cpus were online).
443 * Selection preference:
444 * 1) current cpu if in @mask
445 * 2) any cpu of current node if in @mask
446 * 3) any other online cpu in @mask
448 int smp_call_function_any(const struct cpumask *mask,
449 smp_call_func_t func, void *info, int wait)
451 unsigned int cpu;
452 const struct cpumask *nodemask;
453 int ret;
455 /* Try for same CPU (cheapest) */
456 cpu = get_cpu();
457 if (cpumask_test_cpu(cpu, mask))
458 goto call;
460 /* Try for same node. */
461 nodemask = cpumask_of_node(cpu_to_node(cpu));
462 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
463 cpu = cpumask_next_and(cpu, nodemask, mask)) {
464 if (cpu_online(cpu))
465 goto call;
468 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
469 cpu = cpumask_any_and(mask, cpu_online_mask);
470 call:
471 ret = smp_call_function_single(cpu, func, info, wait);
472 put_cpu();
473 return ret;
475 EXPORT_SYMBOL_GPL(smp_call_function_any);
477 static void smp_call_function_many_cond(const struct cpumask *mask,
478 smp_call_func_t func, void *info,
479 bool wait, smp_cond_func_t cond_func)
481 struct call_function_data *cfd;
482 int cpu, next_cpu, this_cpu = smp_processor_id();
485 * Can deadlock when called with interrupts disabled.
486 * We allow cpu's that are not yet online though, as no one else can
487 * send smp call function interrupt to this cpu and as such deadlocks
488 * can't happen.
490 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
491 && !oops_in_progress && !early_boot_irqs_disabled);
494 * When @wait we can deadlock when we interrupt between llist_add() and
495 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
496 * csd_lock() on because the interrupt context uses the same csd
497 * storage.
499 WARN_ON_ONCE(!in_task());
501 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
502 cpu = cpumask_first_and(mask, cpu_online_mask);
503 if (cpu == this_cpu)
504 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
506 /* No online cpus? We're done. */
507 if (cpu >= nr_cpu_ids)
508 return;
510 /* Do we have another CPU which isn't us? */
511 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
512 if (next_cpu == this_cpu)
513 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
515 /* Fastpath: do that cpu by itself. */
516 if (next_cpu >= nr_cpu_ids) {
517 if (!cond_func || cond_func(cpu, info))
518 smp_call_function_single(cpu, func, info, wait);
519 return;
522 cfd = this_cpu_ptr(&cfd_data);
524 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
525 __cpumask_clear_cpu(this_cpu, cfd->cpumask);
527 /* Some callers race with other cpus changing the passed mask */
528 if (unlikely(!cpumask_weight(cfd->cpumask)))
529 return;
531 cpumask_clear(cfd->cpumask_ipi);
532 for_each_cpu(cpu, cfd->cpumask) {
533 call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
535 if (cond_func && !cond_func(cpu, info))
536 continue;
538 csd_lock(csd);
539 if (wait)
540 csd->flags |= CSD_TYPE_SYNC;
541 csd->func = func;
542 csd->info = info;
543 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
544 __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
547 /* Send a message to all CPUs in the map */
548 arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
550 if (wait) {
551 for_each_cpu(cpu, cfd->cpumask) {
552 call_single_data_t *csd;
554 csd = per_cpu_ptr(cfd->csd, cpu);
555 csd_lock_wait(csd);
561 * smp_call_function_many(): Run a function on a set of other CPUs.
562 * @mask: The set of cpus to run on (only runs on online subset).
563 * @func: The function to run. This must be fast and non-blocking.
564 * @info: An arbitrary pointer to pass to the function.
565 * @wait: If true, wait (atomically) until function has completed
566 * on other CPUs.
568 * If @wait is true, then returns once @func has returned.
570 * You must not call this function with disabled interrupts or from a
571 * hardware interrupt handler or from a bottom half handler. Preemption
572 * must be disabled when calling this function.
574 void smp_call_function_many(const struct cpumask *mask,
575 smp_call_func_t func, void *info, bool wait)
577 smp_call_function_many_cond(mask, func, info, wait, NULL);
579 EXPORT_SYMBOL(smp_call_function_many);
582 * smp_call_function(): Run a function on all other CPUs.
583 * @func: The function to run. This must be fast and non-blocking.
584 * @info: An arbitrary pointer to pass to the function.
585 * @wait: If true, wait (atomically) until function has completed
586 * on other CPUs.
588 * Returns 0.
590 * If @wait is true, then returns once @func has returned; otherwise
591 * it returns just before the target cpu calls @func.
593 * You must not call this function with disabled interrupts or from a
594 * hardware interrupt handler or from a bottom half handler.
596 void smp_call_function(smp_call_func_t func, void *info, int wait)
598 preempt_disable();
599 smp_call_function_many(cpu_online_mask, func, info, wait);
600 preempt_enable();
602 EXPORT_SYMBOL(smp_call_function);
604 /* Setup configured maximum number of CPUs to activate */
605 unsigned int setup_max_cpus = NR_CPUS;
606 EXPORT_SYMBOL(setup_max_cpus);
610 * Setup routine for controlling SMP activation
612 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
613 * activation entirely (the MPS table probe still happens, though).
615 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
616 * greater than 0, limits the maximum number of CPUs activated in
617 * SMP mode to <NUM>.
620 void __weak arch_disable_smp_support(void) { }
622 static int __init nosmp(char *str)
624 setup_max_cpus = 0;
625 arch_disable_smp_support();
627 return 0;
630 early_param("nosmp", nosmp);
632 /* this is hard limit */
633 static int __init nrcpus(char *str)
635 int nr_cpus;
637 get_option(&str, &nr_cpus);
638 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
639 nr_cpu_ids = nr_cpus;
641 return 0;
644 early_param("nr_cpus", nrcpus);
646 static int __init maxcpus(char *str)
648 get_option(&str, &setup_max_cpus);
649 if (setup_max_cpus == 0)
650 arch_disable_smp_support();
652 return 0;
655 early_param("maxcpus", maxcpus);
657 /* Setup number of possible processor ids */
658 unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
659 EXPORT_SYMBOL(nr_cpu_ids);
661 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
662 void __init setup_nr_cpu_ids(void)
664 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
667 /* Called by boot processor to activate the rest. */
668 void __init smp_init(void)
670 int num_nodes, num_cpus;
672 idle_threads_init();
673 cpuhp_threads_init();
675 pr_info("Bringing up secondary CPUs ...\n");
677 bringup_nonboot_cpus(setup_max_cpus);
679 num_nodes = num_online_nodes();
680 num_cpus = num_online_cpus();
681 pr_info("Brought up %d node%s, %d CPU%s\n",
682 num_nodes, (num_nodes > 1 ? "s" : ""),
683 num_cpus, (num_cpus > 1 ? "s" : ""));
685 /* Any cleanup work */
686 smp_cpus_done(setup_max_cpus);
690 * Call a function on all processors. May be used during early boot while
691 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
692 * of local_irq_disable/enable().
694 void on_each_cpu(smp_call_func_t func, void *info, int wait)
696 unsigned long flags;
698 preempt_disable();
699 smp_call_function(func, info, wait);
700 local_irq_save(flags);
701 func(info);
702 local_irq_restore(flags);
703 preempt_enable();
705 EXPORT_SYMBOL(on_each_cpu);
708 * on_each_cpu_mask(): Run a function on processors specified by
709 * cpumask, which may include the local processor.
710 * @mask: The set of cpus to run on (only runs on online subset).
711 * @func: The function to run. This must be fast and non-blocking.
712 * @info: An arbitrary pointer to pass to the function.
713 * @wait: If true, wait (atomically) until function has completed
714 * on other CPUs.
716 * If @wait is true, then returns once @func has returned.
718 * You must not call this function with disabled interrupts or from a
719 * hardware interrupt handler or from a bottom half handler. The
720 * exception is that it may be used during early boot while
721 * early_boot_irqs_disabled is set.
723 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
724 void *info, bool wait)
726 int cpu = get_cpu();
728 smp_call_function_many(mask, func, info, wait);
729 if (cpumask_test_cpu(cpu, mask)) {
730 unsigned long flags;
731 local_irq_save(flags);
732 func(info);
733 local_irq_restore(flags);
735 put_cpu();
737 EXPORT_SYMBOL(on_each_cpu_mask);
740 * on_each_cpu_cond(): Call a function on each processor for which
741 * the supplied function cond_func returns true, optionally waiting
742 * for all the required CPUs to finish. This may include the local
743 * processor.
744 * @cond_func: A callback function that is passed a cpu id and
745 * the the info parameter. The function is called
746 * with preemption disabled. The function should
747 * return a blooean value indicating whether to IPI
748 * the specified CPU.
749 * @func: The function to run on all applicable CPUs.
750 * This must be fast and non-blocking.
751 * @info: An arbitrary pointer to pass to both functions.
752 * @wait: If true, wait (atomically) until function has
753 * completed on other CPUs.
755 * Preemption is disabled to protect against CPUs going offline but not online.
756 * CPUs going online during the call will not be seen or sent an IPI.
758 * You must not call this function with disabled interrupts or
759 * from a hardware interrupt handler or from a bottom half handler.
761 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
762 void *info, bool wait, const struct cpumask *mask)
764 int cpu = get_cpu();
766 smp_call_function_many_cond(mask, func, info, wait, cond_func);
767 if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) {
768 unsigned long flags;
770 local_irq_save(flags);
771 func(info);
772 local_irq_restore(flags);
774 put_cpu();
776 EXPORT_SYMBOL(on_each_cpu_cond_mask);
778 void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
779 void *info, bool wait)
781 on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
783 EXPORT_SYMBOL(on_each_cpu_cond);
785 static void do_nothing(void *unused)
790 * kick_all_cpus_sync - Force all cpus out of idle
792 * Used to synchronize the update of pm_idle function pointer. It's
793 * called after the pointer is updated and returns after the dummy
794 * callback function has been executed on all cpus. The execution of
795 * the function can only happen on the remote cpus after they have
796 * left the idle function which had been called via pm_idle function
797 * pointer. So it's guaranteed that nothing uses the previous pointer
798 * anymore.
800 void kick_all_cpus_sync(void)
802 /* Make sure the change is visible before we kick the cpus */
803 smp_mb();
804 smp_call_function(do_nothing, NULL, 1);
806 EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
809 * wake_up_all_idle_cpus - break all cpus out of idle
810 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
811 * including idle polling cpus, for non-idle cpus, we will do nothing
812 * for them.
814 void wake_up_all_idle_cpus(void)
816 int cpu;
818 preempt_disable();
819 for_each_online_cpu(cpu) {
820 if (cpu == smp_processor_id())
821 continue;
823 wake_up_if_idle(cpu);
825 preempt_enable();
827 EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
830 * smp_call_on_cpu - Call a function on a specific cpu
832 * Used to call a function on a specific cpu and wait for it to return.
833 * Optionally make sure the call is done on a specified physical cpu via vcpu
834 * pinning in order to support virtualized environments.
836 struct smp_call_on_cpu_struct {
837 struct work_struct work;
838 struct completion done;
839 int (*func)(void *);
840 void *data;
841 int ret;
842 int cpu;
845 static void smp_call_on_cpu_callback(struct work_struct *work)
847 struct smp_call_on_cpu_struct *sscs;
849 sscs = container_of(work, struct smp_call_on_cpu_struct, work);
850 if (sscs->cpu >= 0)
851 hypervisor_pin_vcpu(sscs->cpu);
852 sscs->ret = sscs->func(sscs->data);
853 if (sscs->cpu >= 0)
854 hypervisor_pin_vcpu(-1);
856 complete(&sscs->done);
859 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
861 struct smp_call_on_cpu_struct sscs = {
862 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
863 .func = func,
864 .data = par,
865 .cpu = phys ? cpu : -1,
868 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
870 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
871 return -ENXIO;
873 queue_work_on(cpu, system_wq, &sscs.work);
874 wait_for_completion(&sscs.done);
876 return sscs.ret;
878 EXPORT_SYMBOL_GPL(smp_call_on_cpu);