1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * kernel/stop_machine.c
5 * Copyright (C) 2008, 2005 IBM Corporation.
6 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au
7 * Copyright (C) 2010 SUSE Linux Products GmbH
8 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
10 #include <linux/compiler.h>
11 #include <linux/completion.h>
12 #include <linux/cpu.h>
13 #include <linux/init.h>
14 #include <linux/kthread.h>
15 #include <linux/export.h>
16 #include <linux/percpu.h>
17 #include <linux/sched.h>
18 #include <linux/stop_machine.h>
19 #include <linux/interrupt.h>
20 #include <linux/kallsyms.h>
21 #include <linux/smpboot.h>
22 #include <linux/atomic.h>
23 #include <linux/nmi.h>
24 #include <linux/sched/wake_q.h>
27 * Structure to determine completion condition and record errors. May
28 * be shared by works on different cpus.
30 struct cpu_stop_done
{
31 atomic_t nr_todo
; /* nr left to execute */
32 int ret
; /* collected return value */
33 struct completion completion
; /* fired if nr_todo reaches 0 */
36 /* the actual stopper, one per every possible cpu, enabled on online cpus */
38 struct task_struct
*thread
;
41 bool enabled
; /* is this stopper enabled? */
42 struct list_head works
; /* list of pending works */
44 struct cpu_stop_work stop_work
; /* for stop_cpus */
47 static DEFINE_PER_CPU(struct cpu_stopper
, cpu_stopper
);
48 static bool stop_machine_initialized
= false;
50 /* static data for stop_cpus */
51 static DEFINE_MUTEX(stop_cpus_mutex
);
52 static bool stop_cpus_in_progress
;
54 static void cpu_stop_init_done(struct cpu_stop_done
*done
, unsigned int nr_todo
)
56 memset(done
, 0, sizeof(*done
));
57 atomic_set(&done
->nr_todo
, nr_todo
);
58 init_completion(&done
->completion
);
61 /* signal completion unless @done is NULL */
62 static void cpu_stop_signal_done(struct cpu_stop_done
*done
)
64 if (atomic_dec_and_test(&done
->nr_todo
))
65 complete(&done
->completion
);
68 static void __cpu_stop_queue_work(struct cpu_stopper
*stopper
,
69 struct cpu_stop_work
*work
,
70 struct wake_q_head
*wakeq
)
72 list_add_tail(&work
->list
, &stopper
->works
);
73 wake_q_add(wakeq
, stopper
->thread
);
76 /* queue @work to @stopper. if offline, @work is completed immediately */
77 static bool cpu_stop_queue_work(unsigned int cpu
, struct cpu_stop_work
*work
)
79 struct cpu_stopper
*stopper
= &per_cpu(cpu_stopper
, cpu
);
85 raw_spin_lock_irqsave(&stopper
->lock
, flags
);
86 enabled
= stopper
->enabled
;
88 __cpu_stop_queue_work(stopper
, work
, &wakeq
);
90 cpu_stop_signal_done(work
->done
);
91 raw_spin_unlock_irqrestore(&stopper
->lock
, flags
);
100 * stop_one_cpu - stop a cpu
102 * @fn: function to execute
103 * @arg: argument to @fn
105 * Execute @fn(@arg) on @cpu. @fn is run in a process context with
106 * the highest priority preempting any task on the cpu and
107 * monopolizing it. This function returns after the execution is
110 * This function doesn't guarantee @cpu stays online till @fn
111 * completes. If @cpu goes down in the middle, execution may happen
112 * partially or fully on different cpus. @fn should either be ready
113 * for that or the caller should ensure that @cpu stays online until
114 * this function completes.
120 * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
121 * otherwise, the return value of @fn.
123 int stop_one_cpu(unsigned int cpu
, cpu_stop_fn_t fn
, void *arg
)
125 struct cpu_stop_done done
;
126 struct cpu_stop_work work
= { .fn
= fn
, .arg
= arg
, .done
= &done
};
128 cpu_stop_init_done(&done
, 1);
129 if (!cpu_stop_queue_work(cpu
, &work
))
132 * In case @cpu == smp_proccessor_id() we can avoid a sleep+wakeup
133 * cycle by doing a preemption:
136 wait_for_completion(&done
.completion
);
140 /* This controls the threads on each CPU. */
141 enum multi_stop_state
{
142 /* Dummy starting state for thread. */
144 /* Awaiting everyone to be scheduled. */
146 /* Disable interrupts. */
147 MULTI_STOP_DISABLE_IRQ
,
148 /* Run the function */
154 struct multi_stop_data
{
157 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
158 unsigned int num_threads
;
159 const struct cpumask
*active_cpus
;
161 enum multi_stop_state state
;
165 static void set_state(struct multi_stop_data
*msdata
,
166 enum multi_stop_state newstate
)
168 /* Reset ack counter. */
169 atomic_set(&msdata
->thread_ack
, msdata
->num_threads
);
171 WRITE_ONCE(msdata
->state
, newstate
);
174 /* Last one to ack a state moves to the next state. */
175 static void ack_state(struct multi_stop_data
*msdata
)
177 if (atomic_dec_and_test(&msdata
->thread_ack
))
178 set_state(msdata
, msdata
->state
+ 1);
181 void __weak
stop_machine_yield(const struct cpumask
*cpumask
)
186 /* This is the cpu_stop function which stops the CPU. */
187 static int multi_cpu_stop(void *data
)
189 struct multi_stop_data
*msdata
= data
;
190 enum multi_stop_state newstate
, curstate
= MULTI_STOP_NONE
;
191 int cpu
= smp_processor_id(), err
= 0;
192 const struct cpumask
*cpumask
;
197 * When called from stop_machine_from_inactive_cpu(), irq might
198 * already be disabled. Save the state and restore it on exit.
200 local_save_flags(flags
);
202 if (!msdata
->active_cpus
) {
203 cpumask
= cpu_online_mask
;
204 is_active
= cpu
== cpumask_first(cpumask
);
206 cpumask
= msdata
->active_cpus
;
207 is_active
= cpumask_test_cpu(cpu
, cpumask
);
210 /* Simple state machine */
212 /* Chill out and ensure we re-read multi_stop_state. */
213 stop_machine_yield(cpumask
);
214 newstate
= READ_ONCE(msdata
->state
);
215 if (newstate
!= curstate
) {
218 case MULTI_STOP_DISABLE_IRQ
:
224 err
= msdata
->fn(msdata
->data
);
230 } else if (curstate
> MULTI_STOP_PREPARE
) {
232 * At this stage all other CPUs we depend on must spin
233 * in the same loop. Any reason for hard-lockup should
234 * be detected and reported on their side.
236 touch_nmi_watchdog();
238 } while (curstate
!= MULTI_STOP_EXIT
);
240 local_irq_restore(flags
);
244 static int cpu_stop_queue_two_works(int cpu1
, struct cpu_stop_work
*work1
,
245 int cpu2
, struct cpu_stop_work
*work2
)
247 struct cpu_stopper
*stopper1
= per_cpu_ptr(&cpu_stopper
, cpu1
);
248 struct cpu_stopper
*stopper2
= per_cpu_ptr(&cpu_stopper
, cpu2
);
249 DEFINE_WAKE_Q(wakeq
);
254 * The waking up of stopper threads has to happen in the same
255 * scheduling context as the queueing. Otherwise, there is a
256 * possibility of one of the above stoppers being woken up by another
257 * CPU, and preempting us. This will cause us to not wake up the other
261 raw_spin_lock_irq(&stopper1
->lock
);
262 raw_spin_lock_nested(&stopper2
->lock
, SINGLE_DEPTH_NESTING
);
264 if (!stopper1
->enabled
|| !stopper2
->enabled
) {
270 * Ensure that if we race with __stop_cpus() the stoppers won't get
271 * queued up in reverse order leading to system deadlock.
273 * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has
274 * queued a work on cpu1 but not on cpu2, we hold both locks.
276 * It can be falsely true but it is safe to spin until it is cleared,
277 * queue_stop_cpus_work() does everything under preempt_disable().
279 if (unlikely(stop_cpus_in_progress
)) {
285 __cpu_stop_queue_work(stopper1
, work1
, &wakeq
);
286 __cpu_stop_queue_work(stopper2
, work2
, &wakeq
);
289 raw_spin_unlock(&stopper2
->lock
);
290 raw_spin_unlock_irq(&stopper1
->lock
);
292 if (unlikely(err
== -EDEADLK
)) {
295 while (stop_cpus_in_progress
)
307 * stop_two_cpus - stops two cpus
308 * @cpu1: the cpu to stop
309 * @cpu2: the other cpu to stop
310 * @fn: function to execute
311 * @arg: argument to @fn
313 * Stops both the current and specified CPU and runs @fn on one of them.
315 * returns when both are completed.
317 int stop_two_cpus(unsigned int cpu1
, unsigned int cpu2
, cpu_stop_fn_t fn
, void *arg
)
319 struct cpu_stop_done done
;
320 struct cpu_stop_work work1
, work2
;
321 struct multi_stop_data msdata
;
323 msdata
= (struct multi_stop_data
){
327 .active_cpus
= cpumask_of(cpu1
),
330 work1
= work2
= (struct cpu_stop_work
){
331 .fn
= multi_cpu_stop
,
336 cpu_stop_init_done(&done
, 2);
337 set_state(&msdata
, MULTI_STOP_PREPARE
);
341 if (cpu_stop_queue_two_works(cpu1
, &work1
, cpu2
, &work2
))
344 wait_for_completion(&done
.completion
);
349 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
351 * @fn: function to execute
352 * @arg: argument to @fn
353 * @work_buf: pointer to cpu_stop_work structure
355 * Similar to stop_one_cpu() but doesn't wait for completion. The
356 * caller is responsible for ensuring @work_buf is currently unused
357 * and will remain untouched until stopper starts executing @fn.
363 * true if cpu_stop_work was queued successfully and @fn will be called,
366 bool stop_one_cpu_nowait(unsigned int cpu
, cpu_stop_fn_t fn
, void *arg
,
367 struct cpu_stop_work
*work_buf
)
369 *work_buf
= (struct cpu_stop_work
){ .fn
= fn
, .arg
= arg
, };
370 return cpu_stop_queue_work(cpu
, work_buf
);
373 static bool queue_stop_cpus_work(const struct cpumask
*cpumask
,
374 cpu_stop_fn_t fn
, void *arg
,
375 struct cpu_stop_done
*done
)
377 struct cpu_stop_work
*work
;
382 * Disable preemption while queueing to avoid getting
383 * preempted by a stopper which might wait for other stoppers
384 * to enter @fn which can lead to deadlock.
387 stop_cpus_in_progress
= true;
389 for_each_cpu(cpu
, cpumask
) {
390 work
= &per_cpu(cpu_stopper
.stop_work
, cpu
);
394 if (cpu_stop_queue_work(cpu
, work
))
398 stop_cpus_in_progress
= false;
404 static int __stop_cpus(const struct cpumask
*cpumask
,
405 cpu_stop_fn_t fn
, void *arg
)
407 struct cpu_stop_done done
;
409 cpu_stop_init_done(&done
, cpumask_weight(cpumask
));
410 if (!queue_stop_cpus_work(cpumask
, fn
, arg
, &done
))
412 wait_for_completion(&done
.completion
);
417 * stop_cpus - stop multiple cpus
418 * @cpumask: cpus to stop
419 * @fn: function to execute
420 * @arg: argument to @fn
422 * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu,
423 * @fn is run in a process context with the highest priority
424 * preempting any task on the cpu and monopolizing it. This function
425 * returns after all executions are complete.
427 * This function doesn't guarantee the cpus in @cpumask stay online
428 * till @fn completes. If some cpus go down in the middle, execution
429 * on the cpu may happen partially or fully on different cpus. @fn
430 * should either be ready for that or the caller should ensure that
431 * the cpus stay online until this function completes.
433 * All stop_cpus() calls are serialized making it safe for @fn to wait
434 * for all cpus to start executing it.
440 * -ENOENT if @fn(@arg) was not executed at all because all cpus in
441 * @cpumask were offline; otherwise, 0 if all executions of @fn
442 * returned 0, any non zero return value if any returned non zero.
444 int stop_cpus(const struct cpumask
*cpumask
, cpu_stop_fn_t fn
, void *arg
)
448 /* static works are used, process one request at a time */
449 mutex_lock(&stop_cpus_mutex
);
450 ret
= __stop_cpus(cpumask
, fn
, arg
);
451 mutex_unlock(&stop_cpus_mutex
);
456 * try_stop_cpus - try to stop multiple cpus
457 * @cpumask: cpus to stop
458 * @fn: function to execute
459 * @arg: argument to @fn
461 * Identical to stop_cpus() except that it fails with -EAGAIN if
462 * someone else is already using the facility.
468 * -EAGAIN if someone else is already stopping cpus, -ENOENT if
469 * @fn(@arg) was not executed at all because all cpus in @cpumask were
470 * offline; otherwise, 0 if all executions of @fn returned 0, any non
471 * zero return value if any returned non zero.
473 int try_stop_cpus(const struct cpumask
*cpumask
, cpu_stop_fn_t fn
, void *arg
)
477 /* static works are used, process one request at a time */
478 if (!mutex_trylock(&stop_cpus_mutex
))
480 ret
= __stop_cpus(cpumask
, fn
, arg
);
481 mutex_unlock(&stop_cpus_mutex
);
485 static int cpu_stop_should_run(unsigned int cpu
)
487 struct cpu_stopper
*stopper
= &per_cpu(cpu_stopper
, cpu
);
491 raw_spin_lock_irqsave(&stopper
->lock
, flags
);
492 run
= !list_empty(&stopper
->works
);
493 raw_spin_unlock_irqrestore(&stopper
->lock
, flags
);
497 static void cpu_stopper_thread(unsigned int cpu
)
499 struct cpu_stopper
*stopper
= &per_cpu(cpu_stopper
, cpu
);
500 struct cpu_stop_work
*work
;
504 raw_spin_lock_irq(&stopper
->lock
);
505 if (!list_empty(&stopper
->works
)) {
506 work
= list_first_entry(&stopper
->works
,
507 struct cpu_stop_work
, list
);
508 list_del_init(&work
->list
);
510 raw_spin_unlock_irq(&stopper
->lock
);
513 cpu_stop_fn_t fn
= work
->fn
;
514 void *arg
= work
->arg
;
515 struct cpu_stop_done
*done
= work
->done
;
518 /* cpu stop callbacks must not sleep, make in_atomic() == T */
524 cpu_stop_signal_done(done
);
527 WARN_ONCE(preempt_count(),
528 "cpu_stop: %ps(%p) leaked preempt count\n", fn
, arg
);
533 void stop_machine_park(int cpu
)
535 struct cpu_stopper
*stopper
= &per_cpu(cpu_stopper
, cpu
);
537 * Lockless. cpu_stopper_thread() will take stopper->lock and flush
538 * the pending works before it parks, until then it is fine to queue
541 stopper
->enabled
= false;
542 kthread_park(stopper
->thread
);
545 extern void sched_set_stop_task(int cpu
, struct task_struct
*stop
);
547 static void cpu_stop_create(unsigned int cpu
)
549 sched_set_stop_task(cpu
, per_cpu(cpu_stopper
.thread
, cpu
));
552 static void cpu_stop_park(unsigned int cpu
)
554 struct cpu_stopper
*stopper
= &per_cpu(cpu_stopper
, cpu
);
556 WARN_ON(!list_empty(&stopper
->works
));
559 void stop_machine_unpark(int cpu
)
561 struct cpu_stopper
*stopper
= &per_cpu(cpu_stopper
, cpu
);
563 stopper
->enabled
= true;
564 kthread_unpark(stopper
->thread
);
567 static struct smp_hotplug_thread cpu_stop_threads
= {
568 .store
= &cpu_stopper
.thread
,
569 .thread_should_run
= cpu_stop_should_run
,
570 .thread_fn
= cpu_stopper_thread
,
571 .thread_comm
= "migration/%u",
572 .create
= cpu_stop_create
,
573 .park
= cpu_stop_park
,
577 static int __init
cpu_stop_init(void)
581 for_each_possible_cpu(cpu
) {
582 struct cpu_stopper
*stopper
= &per_cpu(cpu_stopper
, cpu
);
584 raw_spin_lock_init(&stopper
->lock
);
585 INIT_LIST_HEAD(&stopper
->works
);
588 BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads
));
589 stop_machine_unpark(raw_smp_processor_id());
590 stop_machine_initialized
= true;
593 early_initcall(cpu_stop_init
);
595 int stop_machine_cpuslocked(cpu_stop_fn_t fn
, void *data
,
596 const struct cpumask
*cpus
)
598 struct multi_stop_data msdata
= {
601 .num_threads
= num_online_cpus(),
605 lockdep_assert_cpus_held();
607 if (!stop_machine_initialized
) {
609 * Handle the case where stop_machine() is called
610 * early in boot before stop_machine() has been
616 WARN_ON_ONCE(msdata
.num_threads
!= 1);
618 local_irq_save(flags
);
621 local_irq_restore(flags
);
626 /* Set the initial state and stop all online cpus. */
627 set_state(&msdata
, MULTI_STOP_PREPARE
);
628 return stop_cpus(cpu_online_mask
, multi_cpu_stop
, &msdata
);
631 int stop_machine(cpu_stop_fn_t fn
, void *data
, const struct cpumask
*cpus
)
635 /* No CPUs can come up or down during this. */
637 ret
= stop_machine_cpuslocked(fn
, data
, cpus
);
641 EXPORT_SYMBOL_GPL(stop_machine
);
644 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
645 * @fn: the function to run
646 * @data: the data ptr for the @fn()
647 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
649 * This is identical to stop_machine() but can be called from a CPU which
650 * is not active. The local CPU is in the process of hotplug (so no other
651 * CPU hotplug can start) and not marked active and doesn't have enough
654 * This function provides stop_machine() functionality for such state by
655 * using busy-wait for synchronization and executing @fn directly for local
659 * Local CPU is inactive. Temporarily stops all active CPUs.
662 * 0 if all executions of @fn returned 0, any non zero return value if any
665 int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn
, void *data
,
666 const struct cpumask
*cpus
)
668 struct multi_stop_data msdata
= { .fn
= fn
, .data
= data
,
669 .active_cpus
= cpus
};
670 struct cpu_stop_done done
;
673 /* Local CPU must be inactive and CPU hotplug in progress. */
674 BUG_ON(cpu_active(raw_smp_processor_id()));
675 msdata
.num_threads
= num_active_cpus() + 1; /* +1 for local */
677 /* No proper task established and can't sleep - busy wait for lock. */
678 while (!mutex_trylock(&stop_cpus_mutex
))
681 /* Schedule work on other CPUs and execute directly for local CPU */
682 set_state(&msdata
, MULTI_STOP_PREPARE
);
683 cpu_stop_init_done(&done
, num_active_cpus());
684 queue_stop_cpus_work(cpu_active_mask
, multi_cpu_stop
, &msdata
,
686 ret
= multi_cpu_stop(&msdata
);
688 /* Busy wait for completion. */
689 while (!completion_done(&done
.completion
))
692 mutex_unlock(&stop_cpus_mutex
);
693 return ret
?: done
.ret
;