udl-kms: avoid prefetch
[linux/fpc-iii.git] / kernel / stop_machine.c
blob1ff523dae6e2b7c0980162549db02bdd350620cc
1 /*
2 * kernel/stop_machine.c
4 * Copyright (C) 2008, 2005 IBM Corporation.
5 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au
6 * Copyright (C) 2010 SUSE Linux Products GmbH
7 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
9 * This file is released under the GPLv2 and any later version.
11 #include <linux/completion.h>
12 #include <linux/cpu.h>
13 #include <linux/init.h>
14 #include <linux/kthread.h>
15 #include <linux/export.h>
16 #include <linux/percpu.h>
17 #include <linux/sched.h>
18 #include <linux/stop_machine.h>
19 #include <linux/interrupt.h>
20 #include <linux/kallsyms.h>
21 #include <linux/smpboot.h>
22 #include <linux/atomic.h>
23 #include <linux/nmi.h>
24 #include <linux/sched/wake_q.h>
27 * Structure to determine completion condition and record errors. May
28 * be shared by works on different cpus.
30 struct cpu_stop_done {
31 atomic_t nr_todo; /* nr left to execute */
32 int ret; /* collected return value */
33 struct completion completion; /* fired if nr_todo reaches 0 */
36 /* the actual stopper, one per every possible cpu, enabled on online cpus */
37 struct cpu_stopper {
38 struct task_struct *thread;
40 raw_spinlock_t lock;
41 bool enabled; /* is this stopper enabled? */
42 struct list_head works; /* list of pending works */
44 struct cpu_stop_work stop_work; /* for stop_cpus */
47 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
48 static bool stop_machine_initialized = false;
50 /* static data for stop_cpus */
51 static DEFINE_MUTEX(stop_cpus_mutex);
52 static bool stop_cpus_in_progress;
54 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
56 memset(done, 0, sizeof(*done));
57 atomic_set(&done->nr_todo, nr_todo);
58 init_completion(&done->completion);
61 /* signal completion unless @done is NULL */
62 static void cpu_stop_signal_done(struct cpu_stop_done *done)
64 if (atomic_dec_and_test(&done->nr_todo))
65 complete(&done->completion);
68 static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
69 struct cpu_stop_work *work,
70 struct wake_q_head *wakeq)
72 list_add_tail(&work->list, &stopper->works);
73 wake_q_add(wakeq, stopper->thread);
76 /* queue @work to @stopper. if offline, @work is completed immediately */
77 static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
79 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
80 DEFINE_WAKE_Q(wakeq);
81 unsigned long flags;
82 bool enabled;
84 raw_spin_lock_irqsave(&stopper->lock, flags);
85 enabled = stopper->enabled;
86 if (enabled)
87 __cpu_stop_queue_work(stopper, work, &wakeq);
88 else if (work->done)
89 cpu_stop_signal_done(work->done);
90 raw_spin_unlock_irqrestore(&stopper->lock, flags);
92 wake_up_q(&wakeq);
94 return enabled;
97 /**
98 * stop_one_cpu - stop a cpu
99 * @cpu: cpu to stop
100 * @fn: function to execute
101 * @arg: argument to @fn
103 * Execute @fn(@arg) on @cpu. @fn is run in a process context with
104 * the highest priority preempting any task on the cpu and
105 * monopolizing it. This function returns after the execution is
106 * complete.
108 * This function doesn't guarantee @cpu stays online till @fn
109 * completes. If @cpu goes down in the middle, execution may happen
110 * partially or fully on different cpus. @fn should either be ready
111 * for that or the caller should ensure that @cpu stays online until
112 * this function completes.
114 * CONTEXT:
115 * Might sleep.
117 * RETURNS:
118 * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
119 * otherwise, the return value of @fn.
121 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
123 struct cpu_stop_done done;
124 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
126 cpu_stop_init_done(&done, 1);
127 if (!cpu_stop_queue_work(cpu, &work))
128 return -ENOENT;
130 * In case @cpu == smp_proccessor_id() we can avoid a sleep+wakeup
131 * cycle by doing a preemption:
133 cond_resched();
134 wait_for_completion(&done.completion);
135 return done.ret;
138 /* This controls the threads on each CPU. */
139 enum multi_stop_state {
140 /* Dummy starting state for thread. */
141 MULTI_STOP_NONE,
142 /* Awaiting everyone to be scheduled. */
143 MULTI_STOP_PREPARE,
144 /* Disable interrupts. */
145 MULTI_STOP_DISABLE_IRQ,
146 /* Run the function */
147 MULTI_STOP_RUN,
148 /* Exit */
149 MULTI_STOP_EXIT,
152 struct multi_stop_data {
153 cpu_stop_fn_t fn;
154 void *data;
155 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
156 unsigned int num_threads;
157 const struct cpumask *active_cpus;
159 enum multi_stop_state state;
160 atomic_t thread_ack;
163 static void set_state(struct multi_stop_data *msdata,
164 enum multi_stop_state newstate)
166 /* Reset ack counter. */
167 atomic_set(&msdata->thread_ack, msdata->num_threads);
168 smp_wmb();
169 msdata->state = newstate;
172 /* Last one to ack a state moves to the next state. */
173 static void ack_state(struct multi_stop_data *msdata)
175 if (atomic_dec_and_test(&msdata->thread_ack))
176 set_state(msdata, msdata->state + 1);
179 /* This is the cpu_stop function which stops the CPU. */
180 static int multi_cpu_stop(void *data)
182 struct multi_stop_data *msdata = data;
183 enum multi_stop_state curstate = MULTI_STOP_NONE;
184 int cpu = smp_processor_id(), err = 0;
185 unsigned long flags;
186 bool is_active;
189 * When called from stop_machine_from_inactive_cpu(), irq might
190 * already be disabled. Save the state and restore it on exit.
192 local_save_flags(flags);
194 if (!msdata->active_cpus)
195 is_active = cpu == cpumask_first(cpu_online_mask);
196 else
197 is_active = cpumask_test_cpu(cpu, msdata->active_cpus);
199 /* Simple state machine */
200 do {
201 /* Chill out and ensure we re-read multi_stop_state. */
202 cpu_relax_yield();
203 if (msdata->state != curstate) {
204 curstate = msdata->state;
205 switch (curstate) {
206 case MULTI_STOP_DISABLE_IRQ:
207 local_irq_disable();
208 hard_irq_disable();
209 break;
210 case MULTI_STOP_RUN:
211 if (is_active)
212 err = msdata->fn(msdata->data);
213 break;
214 default:
215 break;
217 ack_state(msdata);
218 } else if (curstate > MULTI_STOP_PREPARE) {
220 * At this stage all other CPUs we depend on must spin
221 * in the same loop. Any reason for hard-lockup should
222 * be detected and reported on their side.
224 touch_nmi_watchdog();
226 } while (curstate != MULTI_STOP_EXIT);
228 local_irq_restore(flags);
229 return err;
232 static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
233 int cpu2, struct cpu_stop_work *work2)
235 struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
236 struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
237 DEFINE_WAKE_Q(wakeq);
238 int err;
239 retry:
240 raw_spin_lock_irq(&stopper1->lock);
241 raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
243 err = -ENOENT;
244 if (!stopper1->enabled || !stopper2->enabled)
245 goto unlock;
247 * Ensure that if we race with __stop_cpus() the stoppers won't get
248 * queued up in reverse order leading to system deadlock.
250 * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has
251 * queued a work on cpu1 but not on cpu2, we hold both locks.
253 * It can be falsely true but it is safe to spin until it is cleared,
254 * queue_stop_cpus_work() does everything under preempt_disable().
256 err = -EDEADLK;
257 if (unlikely(stop_cpus_in_progress))
258 goto unlock;
260 err = 0;
261 __cpu_stop_queue_work(stopper1, work1, &wakeq);
262 __cpu_stop_queue_work(stopper2, work2, &wakeq);
263 unlock:
264 raw_spin_unlock(&stopper2->lock);
265 raw_spin_unlock_irq(&stopper1->lock);
267 if (unlikely(err == -EDEADLK)) {
268 while (stop_cpus_in_progress)
269 cpu_relax();
270 goto retry;
273 if (!err) {
274 preempt_disable();
275 wake_up_q(&wakeq);
276 preempt_enable();
279 return err;
282 * stop_two_cpus - stops two cpus
283 * @cpu1: the cpu to stop
284 * @cpu2: the other cpu to stop
285 * @fn: function to execute
286 * @arg: argument to @fn
288 * Stops both the current and specified CPU and runs @fn on one of them.
290 * returns when both are completed.
292 int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
294 struct cpu_stop_done done;
295 struct cpu_stop_work work1, work2;
296 struct multi_stop_data msdata;
298 msdata = (struct multi_stop_data){
299 .fn = fn,
300 .data = arg,
301 .num_threads = 2,
302 .active_cpus = cpumask_of(cpu1),
305 work1 = work2 = (struct cpu_stop_work){
306 .fn = multi_cpu_stop,
307 .arg = &msdata,
308 .done = &done
311 cpu_stop_init_done(&done, 2);
312 set_state(&msdata, MULTI_STOP_PREPARE);
314 if (cpu1 > cpu2)
315 swap(cpu1, cpu2);
316 if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2))
317 return -ENOENT;
319 wait_for_completion(&done.completion);
320 return done.ret;
324 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
325 * @cpu: cpu to stop
326 * @fn: function to execute
327 * @arg: argument to @fn
328 * @work_buf: pointer to cpu_stop_work structure
330 * Similar to stop_one_cpu() but doesn't wait for completion. The
331 * caller is responsible for ensuring @work_buf is currently unused
332 * and will remain untouched until stopper starts executing @fn.
334 * CONTEXT:
335 * Don't care.
337 * RETURNS:
338 * true if cpu_stop_work was queued successfully and @fn will be called,
339 * false otherwise.
341 bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
342 struct cpu_stop_work *work_buf)
344 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
345 return cpu_stop_queue_work(cpu, work_buf);
348 static bool queue_stop_cpus_work(const struct cpumask *cpumask,
349 cpu_stop_fn_t fn, void *arg,
350 struct cpu_stop_done *done)
352 struct cpu_stop_work *work;
353 unsigned int cpu;
354 bool queued = false;
357 * Disable preemption while queueing to avoid getting
358 * preempted by a stopper which might wait for other stoppers
359 * to enter @fn which can lead to deadlock.
361 preempt_disable();
362 stop_cpus_in_progress = true;
363 for_each_cpu(cpu, cpumask) {
364 work = &per_cpu(cpu_stopper.stop_work, cpu);
365 work->fn = fn;
366 work->arg = arg;
367 work->done = done;
368 if (cpu_stop_queue_work(cpu, work))
369 queued = true;
371 stop_cpus_in_progress = false;
372 preempt_enable();
374 return queued;
377 static int __stop_cpus(const struct cpumask *cpumask,
378 cpu_stop_fn_t fn, void *arg)
380 struct cpu_stop_done done;
382 cpu_stop_init_done(&done, cpumask_weight(cpumask));
383 if (!queue_stop_cpus_work(cpumask, fn, arg, &done))
384 return -ENOENT;
385 wait_for_completion(&done.completion);
386 return done.ret;
390 * stop_cpus - stop multiple cpus
391 * @cpumask: cpus to stop
392 * @fn: function to execute
393 * @arg: argument to @fn
395 * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu,
396 * @fn is run in a process context with the highest priority
397 * preempting any task on the cpu and monopolizing it. This function
398 * returns after all executions are complete.
400 * This function doesn't guarantee the cpus in @cpumask stay online
401 * till @fn completes. If some cpus go down in the middle, execution
402 * on the cpu may happen partially or fully on different cpus. @fn
403 * should either be ready for that or the caller should ensure that
404 * the cpus stay online until this function completes.
406 * All stop_cpus() calls are serialized making it safe for @fn to wait
407 * for all cpus to start executing it.
409 * CONTEXT:
410 * Might sleep.
412 * RETURNS:
413 * -ENOENT if @fn(@arg) was not executed at all because all cpus in
414 * @cpumask were offline; otherwise, 0 if all executions of @fn
415 * returned 0, any non zero return value if any returned non zero.
417 int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
419 int ret;
421 /* static works are used, process one request at a time */
422 mutex_lock(&stop_cpus_mutex);
423 ret = __stop_cpus(cpumask, fn, arg);
424 mutex_unlock(&stop_cpus_mutex);
425 return ret;
429 * try_stop_cpus - try to stop multiple cpus
430 * @cpumask: cpus to stop
431 * @fn: function to execute
432 * @arg: argument to @fn
434 * Identical to stop_cpus() except that it fails with -EAGAIN if
435 * someone else is already using the facility.
437 * CONTEXT:
438 * Might sleep.
440 * RETURNS:
441 * -EAGAIN if someone else is already stopping cpus, -ENOENT if
442 * @fn(@arg) was not executed at all because all cpus in @cpumask were
443 * offline; otherwise, 0 if all executions of @fn returned 0, any non
444 * zero return value if any returned non zero.
446 int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
448 int ret;
450 /* static works are used, process one request at a time */
451 if (!mutex_trylock(&stop_cpus_mutex))
452 return -EAGAIN;
453 ret = __stop_cpus(cpumask, fn, arg);
454 mutex_unlock(&stop_cpus_mutex);
455 return ret;
458 static int cpu_stop_should_run(unsigned int cpu)
460 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
461 unsigned long flags;
462 int run;
464 raw_spin_lock_irqsave(&stopper->lock, flags);
465 run = !list_empty(&stopper->works);
466 raw_spin_unlock_irqrestore(&stopper->lock, flags);
467 return run;
470 static void cpu_stopper_thread(unsigned int cpu)
472 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
473 struct cpu_stop_work *work;
475 repeat:
476 work = NULL;
477 raw_spin_lock_irq(&stopper->lock);
478 if (!list_empty(&stopper->works)) {
479 work = list_first_entry(&stopper->works,
480 struct cpu_stop_work, list);
481 list_del_init(&work->list);
483 raw_spin_unlock_irq(&stopper->lock);
485 if (work) {
486 cpu_stop_fn_t fn = work->fn;
487 void *arg = work->arg;
488 struct cpu_stop_done *done = work->done;
489 int ret;
491 /* cpu stop callbacks must not sleep, make in_atomic() == T */
492 preempt_count_inc();
493 ret = fn(arg);
494 if (done) {
495 if (ret)
496 done->ret = ret;
497 cpu_stop_signal_done(done);
499 preempt_count_dec();
500 WARN_ONCE(preempt_count(),
501 "cpu_stop: %pf(%p) leaked preempt count\n", fn, arg);
502 goto repeat;
506 void stop_machine_park(int cpu)
508 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
510 * Lockless. cpu_stopper_thread() will take stopper->lock and flush
511 * the pending works before it parks, until then it is fine to queue
512 * the new works.
514 stopper->enabled = false;
515 kthread_park(stopper->thread);
518 extern void sched_set_stop_task(int cpu, struct task_struct *stop);
520 static void cpu_stop_create(unsigned int cpu)
522 sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
525 static void cpu_stop_park(unsigned int cpu)
527 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
529 WARN_ON(!list_empty(&stopper->works));
532 void stop_machine_unpark(int cpu)
534 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
536 stopper->enabled = true;
537 kthread_unpark(stopper->thread);
540 static struct smp_hotplug_thread cpu_stop_threads = {
541 .store = &cpu_stopper.thread,
542 .thread_should_run = cpu_stop_should_run,
543 .thread_fn = cpu_stopper_thread,
544 .thread_comm = "migration/%u",
545 .create = cpu_stop_create,
546 .park = cpu_stop_park,
547 .selfparking = true,
550 static int __init cpu_stop_init(void)
552 unsigned int cpu;
554 for_each_possible_cpu(cpu) {
555 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
557 raw_spin_lock_init(&stopper->lock);
558 INIT_LIST_HEAD(&stopper->works);
561 BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
562 stop_machine_unpark(raw_smp_processor_id());
563 stop_machine_initialized = true;
564 return 0;
566 early_initcall(cpu_stop_init);
568 int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
569 const struct cpumask *cpus)
571 struct multi_stop_data msdata = {
572 .fn = fn,
573 .data = data,
574 .num_threads = num_online_cpus(),
575 .active_cpus = cpus,
578 lockdep_assert_cpus_held();
580 if (!stop_machine_initialized) {
582 * Handle the case where stop_machine() is called
583 * early in boot before stop_machine() has been
584 * initialized.
586 unsigned long flags;
587 int ret;
589 WARN_ON_ONCE(msdata.num_threads != 1);
591 local_irq_save(flags);
592 hard_irq_disable();
593 ret = (*fn)(data);
594 local_irq_restore(flags);
596 return ret;
599 /* Set the initial state and stop all online cpus. */
600 set_state(&msdata, MULTI_STOP_PREPARE);
601 return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
604 int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
606 int ret;
608 /* No CPUs can come up or down during this. */
609 cpus_read_lock();
610 ret = stop_machine_cpuslocked(fn, data, cpus);
611 cpus_read_unlock();
612 return ret;
614 EXPORT_SYMBOL_GPL(stop_machine);
617 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
618 * @fn: the function to run
619 * @data: the data ptr for the @fn()
620 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
622 * This is identical to stop_machine() but can be called from a CPU which
623 * is not active. The local CPU is in the process of hotplug (so no other
624 * CPU hotplug can start) and not marked active and doesn't have enough
625 * context to sleep.
627 * This function provides stop_machine() functionality for such state by
628 * using busy-wait for synchronization and executing @fn directly for local
629 * CPU.
631 * CONTEXT:
632 * Local CPU is inactive. Temporarily stops all active CPUs.
634 * RETURNS:
635 * 0 if all executions of @fn returned 0, any non zero return value if any
636 * returned non zero.
638 int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
639 const struct cpumask *cpus)
641 struct multi_stop_data msdata = { .fn = fn, .data = data,
642 .active_cpus = cpus };
643 struct cpu_stop_done done;
644 int ret;
646 /* Local CPU must be inactive and CPU hotplug in progress. */
647 BUG_ON(cpu_active(raw_smp_processor_id()));
648 msdata.num_threads = num_active_cpus() + 1; /* +1 for local */
650 /* No proper task established and can't sleep - busy wait for lock. */
651 while (!mutex_trylock(&stop_cpus_mutex))
652 cpu_relax();
654 /* Schedule work on other CPUs and execute directly for local CPU */
655 set_state(&msdata, MULTI_STOP_PREPARE);
656 cpu_stop_init_done(&done, num_active_cpus());
657 queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
658 &done);
659 ret = multi_cpu_stop(&msdata);
661 /* Busy wait for completion. */
662 while (!completion_done(&done.completion))
663 cpu_relax();
665 mutex_unlock(&stop_cpus_mutex);
666 return ret ?: done.ret;