1 /* Copyright 2008, 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation.
2 * GPL v2 and any later version.
6 #include <linux/kthread.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/stop_machine.h>
10 #include <linux/syscalls.h>
11 #include <linux/interrupt.h>
13 #include <asm/atomic.h>
14 #include <asm/uaccess.h>
16 /* This controls the threads on each CPU. */
17 enum stopmachine_state
{
18 /* Dummy starting state for thread. */
20 /* Awaiting everyone to be scheduled. */
22 /* Disable interrupts. */
23 STOPMACHINE_DISABLE_IRQ
,
24 /* Run the function */
29 static enum stopmachine_state state
;
31 struct stop_machine_data
{
37 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
38 static unsigned int num_threads
;
39 static atomic_t thread_ack
;
40 static DEFINE_MUTEX(lock
);
41 /* setup_lock protects refcount, stop_machine_wq and stop_machine_work. */
42 static DEFINE_MUTEX(setup_lock
);
43 /* do not start up until all worklets have been placed: */
44 static DEFINE_MUTEX(startup_lock
);
45 /* Users of stop_machine. */
47 static struct workqueue_struct
*stop_machine_wq
;
48 static struct stop_machine_data active
, idle
;
49 static const cpumask_t
*active_cpus
;
50 static void *stop_machine_work
;
52 static void set_state(enum stopmachine_state newstate
)
54 /* Reset ack counter. */
55 atomic_set(&thread_ack
, num_threads
);
60 /* Last one to ack a state moves to the next state. */
61 static void ack_state(void)
63 if (atomic_dec_and_test(&thread_ack
))
67 /* This is the actual function which stops the CPU. It runs
68 * in the context of a dedicated stopmachine workqueue. */
69 static void stop_cpu(struct work_struct
*unused
)
71 enum stopmachine_state curstate
= STOPMACHINE_NONE
;
72 struct stop_machine_data
*smdata
= &idle
;
73 int cpu
= smp_processor_id();
77 * Wait for the startup loop to finish:
79 mutex_lock(&startup_lock
);
81 * Let other threads continue too:
83 mutex_unlock(&startup_lock
);
86 if (cpu
== cpumask_first(cpu_online_mask
))
89 if (cpumask_test_cpu(cpu
, active_cpus
))
92 /* Simple state machine */
94 /* Chill out and ensure we re-read stopmachine_state. */
96 if (state
!= curstate
) {
99 case STOPMACHINE_DISABLE_IRQ
:
103 case STOPMACHINE_RUN
:
104 /* On multiple CPUs only a single error code
105 * is needed to tell that something failed. */
106 err
= smdata
->fn(smdata
->data
);
115 } while (curstate
!= STOPMACHINE_EXIT
);
120 /* Callback for CPUs which aren't supposed to do anything. */
121 static int chill(void *unused
)
126 int stop_machine_create(void)
128 mutex_lock(&setup_lock
);
131 stop_machine_wq
= create_rt_workqueue("kstop");
132 if (!stop_machine_wq
)
134 stop_machine_work
= alloc_percpu(struct work_struct
);
135 if (!stop_machine_work
)
139 mutex_unlock(&setup_lock
);
144 destroy_workqueue(stop_machine_wq
);
145 mutex_unlock(&setup_lock
);
148 EXPORT_SYMBOL_GPL(stop_machine_create
);
150 void stop_machine_destroy(void)
152 mutex_lock(&setup_lock
);
156 destroy_workqueue(stop_machine_wq
);
157 free_percpu(stop_machine_work
);
159 mutex_unlock(&setup_lock
);
161 EXPORT_SYMBOL_GPL(stop_machine_destroy
);
163 int __stop_machine(int (*fn
)(void *), void *data
, const struct cpumask
*cpus
)
165 struct work_struct
*sm_work
;
168 /* Set up initial state. */
170 num_threads
= num_online_cpus();
178 set_state(STOPMACHINE_PREPARE
);
181 * Schedule the stop_cpu work on all cpus before allowing any
182 * of the CPUs to execute it:
184 mutex_lock(&startup_lock
);
186 for_each_online_cpu(i
) {
187 sm_work
= per_cpu_ptr(stop_machine_work
, i
);
188 INIT_WORK(sm_work
, stop_cpu
);
189 queue_work_on(i
, stop_machine_wq
, sm_work
);
192 /* This will release the thread on all CPUs: */
193 mutex_unlock(&startup_lock
);
195 flush_workqueue(stop_machine_wq
);
201 int stop_machine(int (*fn
)(void *), void *data
, const struct cpumask
*cpus
)
205 ret
= stop_machine_create();
208 /* No CPUs can come up or down during this. */
210 ret
= __stop_machine(fn
, data
, cpus
);
212 stop_machine_destroy();
215 EXPORT_SYMBOL_GPL(stop_machine
);