2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 #include <linux/lockdep.h>
23 #include <linux/tick.h>
24 #include <linux/irq.h>
25 #include <linux/smpboot.h>
26 #include <linux/relay.h>
27 #include <linux/slab.h>
29 #include <trace/events/power.h>
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/cpuhp.h>
36 * cpuhp_cpu_state - Per cpu hotplug state storage
37 * @state: The current cpu state
38 * @target: The target state
39 * @thread: Pointer to the hotplug thread
40 * @should_run: Thread should execute
41 * @rollback: Perform a rollback
42 * @single: Single callback invocation
43 * @bringup: Single callback bringup or teardown selector
44 * @cb_state: The state for a single callback (install/uninstall)
45 * @result: Result of the operation
46 * @done: Signal completion to the issuer of the task
48 struct cpuhp_cpu_state
{
49 enum cpuhp_state state
;
50 enum cpuhp_state target
;
52 struct task_struct
*thread
;
57 struct hlist_node
*node
;
58 enum cpuhp_state cb_state
;
60 struct completion done
;
64 static DEFINE_PER_CPU(struct cpuhp_cpu_state
, cpuhp_state
);
67 * cpuhp_step - Hotplug state machine step
68 * @name: Name of the step
69 * @startup: Startup function of the step
70 * @teardown: Teardown function of the step
71 * @skip_onerr: Do not invoke the functions on error rollback
72 * Will go away once the notifiers are gone
73 * @cant_stop: Bringup/teardown can't be stopped at this step
78 int (*single
)(unsigned int cpu
);
79 int (*multi
)(unsigned int cpu
,
80 struct hlist_node
*node
);
83 int (*single
)(unsigned int cpu
);
84 int (*multi
)(unsigned int cpu
,
85 struct hlist_node
*node
);
87 struct hlist_head list
;
93 static DEFINE_MUTEX(cpuhp_state_mutex
);
94 static struct cpuhp_step cpuhp_bp_states
[];
95 static struct cpuhp_step cpuhp_ap_states
[];
97 static bool cpuhp_is_ap_state(enum cpuhp_state state
)
100 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
101 * purposes as that state is handled explicitly in cpu_down.
103 return state
> CPUHP_BRINGUP_CPU
&& state
!= CPUHP_TEARDOWN_CPU
;
106 static struct cpuhp_step
*cpuhp_get_step(enum cpuhp_state state
)
108 struct cpuhp_step
*sp
;
110 sp
= cpuhp_is_ap_state(state
) ? cpuhp_ap_states
: cpuhp_bp_states
;
115 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
116 * @cpu: The cpu for which the callback should be invoked
117 * @step: The step in the state machine
118 * @bringup: True if the bringup callback should be invoked
120 * Called from cpu hotplug and from the state register machinery.
122 static int cpuhp_invoke_callback(unsigned int cpu
, enum cpuhp_state state
,
123 bool bringup
, struct hlist_node
*node
)
125 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
126 struct cpuhp_step
*step
= cpuhp_get_step(state
);
127 int (*cbm
)(unsigned int cpu
, struct hlist_node
*node
);
128 int (*cb
)(unsigned int cpu
);
131 if (!step
->multi_instance
) {
132 cb
= bringup
? step
->startup
.single
: step
->teardown
.single
;
135 trace_cpuhp_enter(cpu
, st
->target
, state
, cb
);
137 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
140 cbm
= bringup
? step
->startup
.multi
: step
->teardown
.multi
;
144 /* Single invocation for instance add/remove */
146 trace_cpuhp_multi_enter(cpu
, st
->target
, state
, cbm
, node
);
147 ret
= cbm(cpu
, node
);
148 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
152 /* State transition. Invoke on all instances */
154 hlist_for_each(node
, &step
->list
) {
155 trace_cpuhp_multi_enter(cpu
, st
->target
, state
, cbm
, node
);
156 ret
= cbm(cpu
, node
);
157 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
164 /* Rollback the instances if one failed */
165 cbm
= !bringup
? step
->startup
.multi
: step
->teardown
.multi
;
169 hlist_for_each(node
, &step
->list
) {
178 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
179 static DEFINE_MUTEX(cpu_add_remove_lock
);
180 bool cpuhp_tasks_frozen
;
181 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen
);
184 * The following two APIs (cpu_maps_update_begin/done) must be used when
185 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
186 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
187 * hotplug callback (un)registration performed using __register_cpu_notifier()
188 * or __unregister_cpu_notifier().
190 void cpu_maps_update_begin(void)
192 mutex_lock(&cpu_add_remove_lock
);
194 EXPORT_SYMBOL(cpu_notifier_register_begin
);
196 void cpu_maps_update_done(void)
198 mutex_unlock(&cpu_add_remove_lock
);
200 EXPORT_SYMBOL(cpu_notifier_register_done
);
202 static RAW_NOTIFIER_HEAD(cpu_chain
);
204 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
205 * Should always be manipulated under cpu_add_remove_lock
207 static int cpu_hotplug_disabled
;
209 #ifdef CONFIG_HOTPLUG_CPU
212 struct task_struct
*active_writer
;
213 /* wait queue to wake up the active_writer */
214 wait_queue_head_t wq
;
215 /* verifies that no writer will get active while readers are active */
218 * Also blocks the new readers during
219 * an ongoing cpu hotplug operation.
223 #ifdef CONFIG_DEBUG_LOCK_ALLOC
224 struct lockdep_map dep_map
;
227 .active_writer
= NULL
,
228 .wq
= __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug
.wq
),
229 .lock
= __MUTEX_INITIALIZER(cpu_hotplug
.lock
),
230 #ifdef CONFIG_DEBUG_LOCK_ALLOC
231 .dep_map
= STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug
.dep_map
),
235 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
236 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
237 #define cpuhp_lock_acquire_tryread() \
238 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
239 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
240 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
243 void get_online_cpus(void)
246 if (cpu_hotplug
.active_writer
== current
)
248 cpuhp_lock_acquire_read();
249 mutex_lock(&cpu_hotplug
.lock
);
250 atomic_inc(&cpu_hotplug
.refcount
);
251 mutex_unlock(&cpu_hotplug
.lock
);
253 EXPORT_SYMBOL_GPL(get_online_cpus
);
255 void put_online_cpus(void)
259 if (cpu_hotplug
.active_writer
== current
)
262 refcount
= atomic_dec_return(&cpu_hotplug
.refcount
);
263 if (WARN_ON(refcount
< 0)) /* try to fix things up */
264 atomic_inc(&cpu_hotplug
.refcount
);
266 if (refcount
<= 0 && waitqueue_active(&cpu_hotplug
.wq
))
267 wake_up(&cpu_hotplug
.wq
);
269 cpuhp_lock_release();
272 EXPORT_SYMBOL_GPL(put_online_cpus
);
275 * This ensures that the hotplug operation can begin only when the
276 * refcount goes to zero.
278 * Note that during a cpu-hotplug operation, the new readers, if any,
279 * will be blocked by the cpu_hotplug.lock
281 * Since cpu_hotplug_begin() is always called after invoking
282 * cpu_maps_update_begin(), we can be sure that only one writer is active.
284 * Note that theoretically, there is a possibility of a livelock:
285 * - Refcount goes to zero, last reader wakes up the sleeping
287 * - Last reader unlocks the cpu_hotplug.lock.
288 * - A new reader arrives at this moment, bumps up the refcount.
289 * - The writer acquires the cpu_hotplug.lock finds the refcount
290 * non zero and goes to sleep again.
292 * However, this is very difficult to achieve in practice since
293 * get_online_cpus() not an api which is called all that often.
296 void cpu_hotplug_begin(void)
300 cpu_hotplug
.active_writer
= current
;
301 cpuhp_lock_acquire();
304 mutex_lock(&cpu_hotplug
.lock
);
305 prepare_to_wait(&cpu_hotplug
.wq
, &wait
, TASK_UNINTERRUPTIBLE
);
306 if (likely(!atomic_read(&cpu_hotplug
.refcount
)))
308 mutex_unlock(&cpu_hotplug
.lock
);
311 finish_wait(&cpu_hotplug
.wq
, &wait
);
314 void cpu_hotplug_done(void)
316 cpu_hotplug
.active_writer
= NULL
;
317 mutex_unlock(&cpu_hotplug
.lock
);
318 cpuhp_lock_release();
322 * Wait for currently running CPU hotplug operations to complete (if any) and
323 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
324 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
325 * hotplug path before performing hotplug operations. So acquiring that lock
326 * guarantees mutual exclusion from any currently running hotplug operations.
328 void cpu_hotplug_disable(void)
330 cpu_maps_update_begin();
331 cpu_hotplug_disabled
++;
332 cpu_maps_update_done();
334 EXPORT_SYMBOL_GPL(cpu_hotplug_disable
);
336 static void __cpu_hotplug_enable(void)
338 if (WARN_ONCE(!cpu_hotplug_disabled
, "Unbalanced cpu hotplug enable\n"))
340 cpu_hotplug_disabled
--;
343 void cpu_hotplug_enable(void)
345 cpu_maps_update_begin();
346 __cpu_hotplug_enable();
347 cpu_maps_update_done();
349 EXPORT_SYMBOL_GPL(cpu_hotplug_enable
);
350 #endif /* CONFIG_HOTPLUG_CPU */
352 /* Need to know about CPUs going up/down? */
353 int register_cpu_notifier(struct notifier_block
*nb
)
356 cpu_maps_update_begin();
357 ret
= raw_notifier_chain_register(&cpu_chain
, nb
);
358 cpu_maps_update_done();
362 int __register_cpu_notifier(struct notifier_block
*nb
)
364 return raw_notifier_chain_register(&cpu_chain
, nb
);
367 static int __cpu_notify(unsigned long val
, unsigned int cpu
, int nr_to_call
,
370 unsigned long mod
= cpuhp_tasks_frozen
? CPU_TASKS_FROZEN
: 0;
371 void *hcpu
= (void *)(long)cpu
;
375 ret
= __raw_notifier_call_chain(&cpu_chain
, val
| mod
, hcpu
, nr_to_call
,
378 return notifier_to_errno(ret
);
381 static int cpu_notify(unsigned long val
, unsigned int cpu
)
383 return __cpu_notify(val
, cpu
, -1, NULL
);
386 static void cpu_notify_nofail(unsigned long val
, unsigned int cpu
)
388 BUG_ON(cpu_notify(val
, cpu
));
391 /* Notifier wrappers for transitioning to state machine */
392 static int notify_prepare(unsigned int cpu
)
397 ret
= __cpu_notify(CPU_UP_PREPARE
, cpu
, -1, &nr_calls
);
400 printk(KERN_WARNING
"%s: attempt to bring up CPU %u failed\n",
402 __cpu_notify(CPU_UP_CANCELED
, cpu
, nr_calls
, NULL
);
407 static int notify_online(unsigned int cpu
)
409 cpu_notify(CPU_ONLINE
, cpu
);
413 static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state
*st
);
415 static int bringup_wait_for_ap(unsigned int cpu
)
417 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
419 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
420 wait_for_completion(&st
->done
);
421 if (WARN_ON_ONCE((!cpu_online(cpu
))))
424 /* Unpark the stopper thread and the hotplug thread of the target cpu */
425 stop_machine_unpark(cpu
);
426 kthread_unpark(st
->thread
);
428 /* Should we go further up ? */
429 if (st
->target
> CPUHP_AP_ONLINE_IDLE
) {
430 __cpuhp_kick_ap_work(st
);
431 wait_for_completion(&st
->done
);
436 static int bringup_cpu(unsigned int cpu
)
438 struct task_struct
*idle
= idle_thread_get(cpu
);
442 * Some architectures have to walk the irq descriptors to
443 * setup the vector space for the cpu which comes online.
444 * Prevent irq alloc/free across the bringup.
448 /* Arch-specific enabling code. */
449 ret
= __cpu_up(cpu
, idle
);
452 cpu_notify(CPU_UP_CANCELED
, cpu
);
455 return bringup_wait_for_ap(cpu
);
459 * Hotplug state machine related functions
461 static void undo_cpu_down(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
463 for (st
->state
++; st
->state
< st
->target
; st
->state
++) {
464 struct cpuhp_step
*step
= cpuhp_get_step(st
->state
);
466 if (!step
->skip_onerr
)
467 cpuhp_invoke_callback(cpu
, st
->state
, true, NULL
);
471 static int cpuhp_down_callbacks(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
472 enum cpuhp_state target
)
474 enum cpuhp_state prev_state
= st
->state
;
477 for (; st
->state
> target
; st
->state
--) {
478 ret
= cpuhp_invoke_callback(cpu
, st
->state
, false, NULL
);
480 st
->target
= prev_state
;
481 undo_cpu_down(cpu
, st
);
488 static void undo_cpu_up(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
490 for (st
->state
--; st
->state
> st
->target
; st
->state
--) {
491 struct cpuhp_step
*step
= cpuhp_get_step(st
->state
);
493 if (!step
->skip_onerr
)
494 cpuhp_invoke_callback(cpu
, st
->state
, false, NULL
);
498 static int cpuhp_up_callbacks(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
499 enum cpuhp_state target
)
501 enum cpuhp_state prev_state
= st
->state
;
504 while (st
->state
< target
) {
506 ret
= cpuhp_invoke_callback(cpu
, st
->state
, true, NULL
);
508 st
->target
= prev_state
;
509 undo_cpu_up(cpu
, st
);
517 * The cpu hotplug threads manage the bringup and teardown of the cpus
519 static void cpuhp_create(unsigned int cpu
)
521 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
523 init_completion(&st
->done
);
526 static int cpuhp_should_run(unsigned int cpu
)
528 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
530 return st
->should_run
;
533 /* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
534 static int cpuhp_ap_offline(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
536 enum cpuhp_state target
= max((int)st
->target
, CPUHP_TEARDOWN_CPU
);
538 return cpuhp_down_callbacks(cpu
, st
, target
);
541 /* Execute the online startup callbacks. Used to be CPU_ONLINE */
542 static int cpuhp_ap_online(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
544 return cpuhp_up_callbacks(cpu
, st
, st
->target
);
548 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
549 * callbacks when a state gets [un]installed at runtime.
551 static void cpuhp_thread_fun(unsigned int cpu
)
553 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
557 * Paired with the mb() in cpuhp_kick_ap_work and
558 * cpuhp_invoke_ap_callback, so the work set is consistent visible.
564 st
->should_run
= false;
566 /* Single callback invocation for [un]install ? */
568 if (st
->cb_state
< CPUHP_AP_ONLINE
) {
570 ret
= cpuhp_invoke_callback(cpu
, st
->cb_state
,
571 st
->bringup
, st
->node
);
574 ret
= cpuhp_invoke_callback(cpu
, st
->cb_state
,
575 st
->bringup
, st
->node
);
577 } else if (st
->rollback
) {
578 BUG_ON(st
->state
< CPUHP_AP_ONLINE_IDLE
);
580 undo_cpu_down(cpu
, st
);
582 * This is a momentary workaround to keep the notifier users
583 * happy. Will go away once we got rid of the notifiers.
585 cpu_notify_nofail(CPU_DOWN_FAILED
, cpu
);
586 st
->rollback
= false;
588 /* Cannot happen .... */
589 BUG_ON(st
->state
< CPUHP_AP_ONLINE_IDLE
);
591 /* Regular hotplug work */
592 if (st
->state
< st
->target
)
593 ret
= cpuhp_ap_online(cpu
, st
);
594 else if (st
->state
> st
->target
)
595 ret
= cpuhp_ap_offline(cpu
, st
);
601 /* Invoke a single callback on a remote cpu */
603 cpuhp_invoke_ap_callback(int cpu
, enum cpuhp_state state
, bool bringup
,
604 struct hlist_node
*node
)
606 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
608 if (!cpu_online(cpu
))
612 * If we are up and running, use the hotplug thread. For early calls
613 * we invoke the thread function directly.
616 return cpuhp_invoke_callback(cpu
, state
, bringup
, node
);
618 st
->cb_state
= state
;
620 st
->bringup
= bringup
;
624 * Make sure the above stores are visible before should_run becomes
625 * true. Paired with the mb() above in cpuhp_thread_fun()
628 st
->should_run
= true;
629 wake_up_process(st
->thread
);
630 wait_for_completion(&st
->done
);
634 /* Regular hotplug invocation of the AP hotplug thread */
635 static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state
*st
)
640 * Make sure the above stores are visible before should_run becomes
641 * true. Paired with the mb() above in cpuhp_thread_fun()
644 st
->should_run
= true;
645 wake_up_process(st
->thread
);
648 static int cpuhp_kick_ap_work(unsigned int cpu
)
650 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
651 enum cpuhp_state state
= st
->state
;
653 trace_cpuhp_enter(cpu
, st
->target
, state
, cpuhp_kick_ap_work
);
654 __cpuhp_kick_ap_work(st
);
655 wait_for_completion(&st
->done
);
656 trace_cpuhp_exit(cpu
, st
->state
, state
, st
->result
);
660 static struct smp_hotplug_thread cpuhp_threads
= {
661 .store
= &cpuhp_state
.thread
,
662 .create
= &cpuhp_create
,
663 .thread_should_run
= cpuhp_should_run
,
664 .thread_fn
= cpuhp_thread_fun
,
665 .thread_comm
= "cpuhp/%u",
669 void __init
cpuhp_threads_init(void)
671 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads
));
672 kthread_unpark(this_cpu_read(cpuhp_state
.thread
));
675 EXPORT_SYMBOL(register_cpu_notifier
);
676 EXPORT_SYMBOL(__register_cpu_notifier
);
677 void unregister_cpu_notifier(struct notifier_block
*nb
)
679 cpu_maps_update_begin();
680 raw_notifier_chain_unregister(&cpu_chain
, nb
);
681 cpu_maps_update_done();
683 EXPORT_SYMBOL(unregister_cpu_notifier
);
685 void __unregister_cpu_notifier(struct notifier_block
*nb
)
687 raw_notifier_chain_unregister(&cpu_chain
, nb
);
689 EXPORT_SYMBOL(__unregister_cpu_notifier
);
691 #ifdef CONFIG_HOTPLUG_CPU
693 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
696 * This function walks all processes, finds a valid mm struct for each one and
697 * then clears a corresponding bit in mm's cpumask. While this all sounds
698 * trivial, there are various non-obvious corner cases, which this function
699 * tries to solve in a safe manner.
701 * Also note that the function uses a somewhat relaxed locking scheme, so it may
702 * be called only for an already offlined CPU.
704 void clear_tasks_mm_cpumask(int cpu
)
706 struct task_struct
*p
;
709 * This function is called after the cpu is taken down and marked
710 * offline, so its not like new tasks will ever get this cpu set in
711 * their mm mask. -- Peter Zijlstra
712 * Thus, we may use rcu_read_lock() here, instead of grabbing
713 * full-fledged tasklist_lock.
715 WARN_ON(cpu_online(cpu
));
717 for_each_process(p
) {
718 struct task_struct
*t
;
721 * Main thread might exit, but other threads may still have
722 * a valid mm. Find one.
724 t
= find_lock_task_mm(p
);
727 cpumask_clear_cpu(cpu
, mm_cpumask(t
->mm
));
733 static inline void check_for_tasks(int dead_cpu
)
735 struct task_struct
*g
, *p
;
737 read_lock(&tasklist_lock
);
738 for_each_process_thread(g
, p
) {
742 * We do the check with unlocked task_rq(p)->lock.
743 * Order the reading to do not warn about a task,
744 * which was running on this cpu in the past, and
745 * it's just been woken on another cpu.
748 if (task_cpu(p
) != dead_cpu
)
751 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
752 p
->comm
, task_pid_nr(p
), dead_cpu
, p
->state
, p
->flags
);
754 read_unlock(&tasklist_lock
);
757 static int notify_down_prepare(unsigned int cpu
)
759 int err
, nr_calls
= 0;
761 err
= __cpu_notify(CPU_DOWN_PREPARE
, cpu
, -1, &nr_calls
);
764 __cpu_notify(CPU_DOWN_FAILED
, cpu
, nr_calls
, NULL
);
765 pr_warn("%s: attempt to take down CPU %u failed\n",
771 /* Take this CPU down. */
772 static int take_cpu_down(void *_param
)
774 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
775 enum cpuhp_state target
= max((int)st
->target
, CPUHP_AP_OFFLINE
);
776 int err
, cpu
= smp_processor_id();
778 /* Ensure this CPU doesn't handle any more interrupts. */
779 err
= __cpu_disable();
784 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
785 * do this step again.
787 WARN_ON(st
->state
!= CPUHP_TEARDOWN_CPU
);
789 /* Invoke the former CPU_DYING callbacks */
790 for (; st
->state
> target
; st
->state
--)
791 cpuhp_invoke_callback(cpu
, st
->state
, false, NULL
);
793 /* Give up timekeeping duties */
794 tick_handover_do_timer();
795 /* Park the stopper thread */
796 stop_machine_park(cpu
);
800 static int takedown_cpu(unsigned int cpu
)
802 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
805 /* Park the smpboot threads */
806 kthread_park(per_cpu_ptr(&cpuhp_state
, cpu
)->thread
);
807 smpboot_park_threads(cpu
);
810 * Prevent irq alloc/free while the dying cpu reorganizes the
811 * interrupt affinities.
816 * So now all preempt/rcu users must observe !cpu_active().
818 err
= stop_machine(take_cpu_down
, NULL
, cpumask_of(cpu
));
820 /* CPU refused to die */
822 /* Unpark the hotplug thread so we can rollback there */
823 kthread_unpark(per_cpu_ptr(&cpuhp_state
, cpu
)->thread
);
826 BUG_ON(cpu_online(cpu
));
829 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
830 * runnable tasks from the cpu, there's only the idle task left now
831 * that the migration thread is done doing the stop_machine thing.
833 * Wait for the stop thread to go away.
835 wait_for_completion(&st
->done
);
836 BUG_ON(st
->state
!= CPUHP_AP_IDLE_DEAD
);
838 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
841 hotplug_cpu__broadcast_tick_pull(cpu
);
842 /* This actually kills the CPU. */
845 tick_cleanup_dead_cpu(cpu
);
849 static int notify_dead(unsigned int cpu
)
851 cpu_notify_nofail(CPU_DEAD
, cpu
);
852 check_for_tasks(cpu
);
856 static void cpuhp_complete_idle_dead(void *arg
)
858 struct cpuhp_cpu_state
*st
= arg
;
863 void cpuhp_report_idle_dead(void)
865 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
867 BUG_ON(st
->state
!= CPUHP_AP_OFFLINE
);
868 rcu_report_dead(smp_processor_id());
869 st
->state
= CPUHP_AP_IDLE_DEAD
;
871 * We cannot call complete after rcu_report_dead() so we delegate it
874 smp_call_function_single(cpumask_first(cpu_online_mask
),
875 cpuhp_complete_idle_dead
, st
, 0);
879 #define notify_down_prepare NULL
880 #define takedown_cpu NULL
881 #define notify_dead NULL
884 #ifdef CONFIG_HOTPLUG_CPU
886 /* Requires cpu_add_remove_lock to be held */
887 static int __ref
_cpu_down(unsigned int cpu
, int tasks_frozen
,
888 enum cpuhp_state target
)
890 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
891 int prev_state
, ret
= 0;
892 bool hasdied
= false;
894 if (num_online_cpus() == 1)
897 if (!cpu_present(cpu
))
902 cpuhp_tasks_frozen
= tasks_frozen
;
904 prev_state
= st
->state
;
907 * If the current CPU state is in the range of the AP hotplug thread,
908 * then we need to kick the thread.
910 if (st
->state
> CPUHP_TEARDOWN_CPU
) {
911 ret
= cpuhp_kick_ap_work(cpu
);
913 * The AP side has done the error rollback already. Just
914 * return the error code..
920 * We might have stopped still in the range of the AP hotplug
921 * thread. Nothing to do anymore.
923 if (st
->state
> CPUHP_TEARDOWN_CPU
)
927 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
928 * to do the further cleanups.
930 ret
= cpuhp_down_callbacks(cpu
, st
, target
);
931 if (ret
&& st
->state
> CPUHP_TEARDOWN_CPU
&& st
->state
< prev_state
) {
932 st
->target
= prev_state
;
934 cpuhp_kick_ap_work(cpu
);
937 hasdied
= prev_state
!= st
->state
&& st
->state
== CPUHP_OFFLINE
;
940 /* This post dead nonsense must die */
942 cpu_notify_nofail(CPU_POST_DEAD
, cpu
);
946 static int do_cpu_down(unsigned int cpu
, enum cpuhp_state target
)
950 cpu_maps_update_begin();
952 if (cpu_hotplug_disabled
) {
957 err
= _cpu_down(cpu
, 0, target
);
960 cpu_maps_update_done();
963 int cpu_down(unsigned int cpu
)
965 return do_cpu_down(cpu
, CPUHP_OFFLINE
);
967 EXPORT_SYMBOL(cpu_down
);
968 #endif /*CONFIG_HOTPLUG_CPU*/
971 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
972 * @cpu: cpu that just started
974 * It must be called by the arch code on the new cpu, before the new cpu
975 * enables interrupts and before the "boot" cpu returns from __cpu_up().
977 void notify_cpu_starting(unsigned int cpu
)
979 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
980 enum cpuhp_state target
= min((int)st
->target
, CPUHP_AP_ONLINE
);
982 rcu_cpu_starting(cpu
); /* Enables RCU usage on this CPU. */
983 while (st
->state
< target
) {
985 cpuhp_invoke_callback(cpu
, st
->state
, true, NULL
);
990 * Called from the idle task. Wake up the controlling task which brings the
991 * stopper and the hotplug thread of the upcoming CPU up and then delegates
992 * the rest of the online bringup to the hotplug thread.
994 void cpuhp_online_idle(enum cpuhp_state state
)
996 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
998 /* Happens for the boot cpu */
999 if (state
!= CPUHP_AP_ONLINE_IDLE
)
1002 st
->state
= CPUHP_AP_ONLINE_IDLE
;
1003 complete(&st
->done
);
1006 /* Requires cpu_add_remove_lock to be held */
1007 static int _cpu_up(unsigned int cpu
, int tasks_frozen
, enum cpuhp_state target
)
1009 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1010 struct task_struct
*idle
;
1013 cpu_hotplug_begin();
1015 if (!cpu_present(cpu
)) {
1021 * The caller of do_cpu_up might have raced with another
1022 * caller. Ignore it for now.
1024 if (st
->state
>= target
)
1027 if (st
->state
== CPUHP_OFFLINE
) {
1028 /* Let it fail before we try to bring the cpu up */
1029 idle
= idle_thread_get(cpu
);
1031 ret
= PTR_ERR(idle
);
1036 cpuhp_tasks_frozen
= tasks_frozen
;
1038 st
->target
= target
;
1040 * If the current CPU state is in the range of the AP hotplug thread,
1041 * then we need to kick the thread once more.
1043 if (st
->state
> CPUHP_BRINGUP_CPU
) {
1044 ret
= cpuhp_kick_ap_work(cpu
);
1046 * The AP side has done the error rollback already. Just
1047 * return the error code..
1054 * Try to reach the target state. We max out on the BP at
1055 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1056 * responsible for bringing it up to the target state.
1058 target
= min((int)target
, CPUHP_BRINGUP_CPU
);
1059 ret
= cpuhp_up_callbacks(cpu
, st
, target
);
1065 static int do_cpu_up(unsigned int cpu
, enum cpuhp_state target
)
1069 if (!cpu_possible(cpu
)) {
1070 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1072 #if defined(CONFIG_IA64)
1073 pr_err("please check additional_cpus= boot parameter\n");
1078 err
= try_online_node(cpu_to_node(cpu
));
1082 cpu_maps_update_begin();
1084 if (cpu_hotplug_disabled
) {
1089 err
= _cpu_up(cpu
, 0, target
);
1091 cpu_maps_update_done();
1095 int cpu_up(unsigned int cpu
)
1097 return do_cpu_up(cpu
, CPUHP_ONLINE
);
1099 EXPORT_SYMBOL_GPL(cpu_up
);
1101 #ifdef CONFIG_PM_SLEEP_SMP
1102 static cpumask_var_t frozen_cpus
;
1104 int freeze_secondary_cpus(int primary
)
1108 cpu_maps_update_begin();
1109 if (!cpu_online(primary
))
1110 primary
= cpumask_first(cpu_online_mask
);
1112 * We take down all of the non-boot CPUs in one shot to avoid races
1113 * with the userspace trying to use the CPU hotplug at the same time
1115 cpumask_clear(frozen_cpus
);
1117 pr_info("Disabling non-boot CPUs ...\n");
1118 for_each_online_cpu(cpu
) {
1121 trace_suspend_resume(TPS("CPU_OFF"), cpu
, true);
1122 error
= _cpu_down(cpu
, 1, CPUHP_OFFLINE
);
1123 trace_suspend_resume(TPS("CPU_OFF"), cpu
, false);
1125 cpumask_set_cpu(cpu
, frozen_cpus
);
1127 pr_err("Error taking CPU%d down: %d\n", cpu
, error
);
1133 BUG_ON(num_online_cpus() > 1);
1135 pr_err("Non-boot CPUs are not disabled\n");
1138 * Make sure the CPUs won't be enabled by someone else. We need to do
1139 * this even in case of failure as all disable_nonboot_cpus() users are
1140 * supposed to do enable_nonboot_cpus() on the failure path.
1142 cpu_hotplug_disabled
++;
1144 cpu_maps_update_done();
1148 void __weak
arch_enable_nonboot_cpus_begin(void)
1152 void __weak
arch_enable_nonboot_cpus_end(void)
1156 void enable_nonboot_cpus(void)
1160 /* Allow everyone to use the CPU hotplug again */
1161 cpu_maps_update_begin();
1162 __cpu_hotplug_enable();
1163 if (cpumask_empty(frozen_cpus
))
1166 pr_info("Enabling non-boot CPUs ...\n");
1168 arch_enable_nonboot_cpus_begin();
1170 for_each_cpu(cpu
, frozen_cpus
) {
1171 trace_suspend_resume(TPS("CPU_ON"), cpu
, true);
1172 error
= _cpu_up(cpu
, 1, CPUHP_ONLINE
);
1173 trace_suspend_resume(TPS("CPU_ON"), cpu
, false);
1175 pr_info("CPU%d is up\n", cpu
);
1178 pr_warn("Error taking CPU%d up: %d\n", cpu
, error
);
1181 arch_enable_nonboot_cpus_end();
1183 cpumask_clear(frozen_cpus
);
1185 cpu_maps_update_done();
1188 static int __init
alloc_frozen_cpus(void)
1190 if (!alloc_cpumask_var(&frozen_cpus
, GFP_KERNEL
|__GFP_ZERO
))
1194 core_initcall(alloc_frozen_cpus
);
1197 * When callbacks for CPU hotplug notifications are being executed, we must
1198 * ensure that the state of the system with respect to the tasks being frozen
1199 * or not, as reported by the notification, remains unchanged *throughout the
1200 * duration* of the execution of the callbacks.
1201 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1203 * This synchronization is implemented by mutually excluding regular CPU
1204 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1205 * Hibernate notifications.
1208 cpu_hotplug_pm_callback(struct notifier_block
*nb
,
1209 unsigned long action
, void *ptr
)
1213 case PM_SUSPEND_PREPARE
:
1214 case PM_HIBERNATION_PREPARE
:
1215 cpu_hotplug_disable();
1218 case PM_POST_SUSPEND
:
1219 case PM_POST_HIBERNATION
:
1220 cpu_hotplug_enable();
1231 static int __init
cpu_hotplug_pm_sync_init(void)
1234 * cpu_hotplug_pm_callback has higher priority than x86
1235 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1236 * to disable cpu hotplug to avoid cpu hotplug race.
1238 pm_notifier(cpu_hotplug_pm_callback
, 0);
1241 core_initcall(cpu_hotplug_pm_sync_init
);
1243 #endif /* CONFIG_PM_SLEEP_SMP */
1245 #endif /* CONFIG_SMP */
1247 /* Boot processor state steps */
1248 static struct cpuhp_step cpuhp_bp_states
[] = {
1251 .startup
.single
= NULL
,
1252 .teardown
.single
= NULL
,
1255 [CPUHP_CREATE_THREADS
]= {
1256 .name
= "threads:prepare",
1257 .startup
.single
= smpboot_create_threads
,
1258 .teardown
.single
= NULL
,
1261 [CPUHP_PERF_PREPARE
] = {
1262 .name
= "perf:prepare",
1263 .startup
.single
= perf_event_init_cpu
,
1264 .teardown
.single
= perf_event_exit_cpu
,
1266 [CPUHP_WORKQUEUE_PREP
] = {
1267 .name
= "workqueue:prepare",
1268 .startup
.single
= workqueue_prepare_cpu
,
1269 .teardown
.single
= NULL
,
1271 [CPUHP_HRTIMERS_PREPARE
] = {
1272 .name
= "hrtimers:prepare",
1273 .startup
.single
= hrtimers_prepare_cpu
,
1274 .teardown
.single
= hrtimers_dead_cpu
,
1276 [CPUHP_SMPCFD_PREPARE
] = {
1277 .name
= "smpcfd:prepare",
1278 .startup
.single
= smpcfd_prepare_cpu
,
1279 .teardown
.single
= smpcfd_dead_cpu
,
1281 [CPUHP_RELAY_PREPARE
] = {
1282 .name
= "relay:prepare",
1283 .startup
.single
= relay_prepare_cpu
,
1284 .teardown
.single
= NULL
,
1286 [CPUHP_SLAB_PREPARE
] = {
1287 .name
= "slab:prepare",
1288 .startup
.single
= slab_prepare_cpu
,
1289 .teardown
.single
= slab_dead_cpu
,
1291 [CPUHP_RCUTREE_PREP
] = {
1292 .name
= "RCU/tree:prepare",
1293 .startup
.single
= rcutree_prepare_cpu
,
1294 .teardown
.single
= rcutree_dead_cpu
,
1297 * Preparatory and dead notifiers. Will be replaced once the notifiers
1298 * are converted to states.
1300 [CPUHP_NOTIFY_PREPARE
] = {
1301 .name
= "notify:prepare",
1302 .startup
.single
= notify_prepare
,
1303 .teardown
.single
= notify_dead
,
1308 * On the tear-down path, timers_dead_cpu() must be invoked
1309 * before blk_mq_queue_reinit_notify() from notify_dead(),
1310 * otherwise a RCU stall occurs.
1312 [CPUHP_TIMERS_DEAD
] = {
1313 .name
= "timers:dead",
1314 .startup
.single
= NULL
,
1315 .teardown
.single
= timers_dead_cpu
,
1317 /* Kicks the plugged cpu into life */
1318 [CPUHP_BRINGUP_CPU
] = {
1319 .name
= "cpu:bringup",
1320 .startup
.single
= bringup_cpu
,
1321 .teardown
.single
= NULL
,
1324 [CPUHP_AP_SMPCFD_DYING
] = {
1325 .name
= "smpcfd:dying",
1326 .startup
.single
= NULL
,
1327 .teardown
.single
= smpcfd_dying_cpu
,
1330 * Handled on controll processor until the plugged processor manages
1333 [CPUHP_TEARDOWN_CPU
] = {
1334 .name
= "cpu:teardown",
1335 .startup
.single
= NULL
,
1336 .teardown
.single
= takedown_cpu
,
1340 [CPUHP_BRINGUP_CPU
] = { },
1344 /* Application processor state steps */
1345 static struct cpuhp_step cpuhp_ap_states
[] = {
1347 /* Final state before CPU kills itself */
1348 [CPUHP_AP_IDLE_DEAD
] = {
1349 .name
= "idle:dead",
1352 * Last state before CPU enters the idle loop to die. Transient state
1353 * for synchronization.
1355 [CPUHP_AP_OFFLINE
] = {
1356 .name
= "ap:offline",
1359 /* First state is scheduler control. Interrupts are disabled */
1360 [CPUHP_AP_SCHED_STARTING
] = {
1361 .name
= "sched:starting",
1362 .startup
.single
= sched_cpu_starting
,
1363 .teardown
.single
= sched_cpu_dying
,
1365 [CPUHP_AP_RCUTREE_DYING
] = {
1366 .name
= "RCU/tree:dying",
1367 .startup
.single
= NULL
,
1368 .teardown
.single
= rcutree_dying_cpu
,
1370 /* Entry state on starting. Interrupts enabled from here on. Transient
1371 * state for synchronsization */
1372 [CPUHP_AP_ONLINE
] = {
1373 .name
= "ap:online",
1375 /* Handle smpboot threads park/unpark */
1376 [CPUHP_AP_SMPBOOT_THREADS
] = {
1377 .name
= "smpboot/threads:online",
1378 .startup
.single
= smpboot_unpark_threads
,
1379 .teardown
.single
= NULL
,
1381 [CPUHP_AP_PERF_ONLINE
] = {
1382 .name
= "perf:online",
1383 .startup
.single
= perf_event_init_cpu
,
1384 .teardown
.single
= perf_event_exit_cpu
,
1386 [CPUHP_AP_WORKQUEUE_ONLINE
] = {
1387 .name
= "workqueue:online",
1388 .startup
.single
= workqueue_online_cpu
,
1389 .teardown
.single
= workqueue_offline_cpu
,
1391 [CPUHP_AP_RCUTREE_ONLINE
] = {
1392 .name
= "RCU/tree:online",
1393 .startup
.single
= rcutree_online_cpu
,
1394 .teardown
.single
= rcutree_offline_cpu
,
1398 * Online/down_prepare notifiers. Will be removed once the notifiers
1399 * are converted to states.
1401 [CPUHP_AP_NOTIFY_ONLINE
] = {
1402 .name
= "notify:online",
1403 .startup
.single
= notify_online
,
1404 .teardown
.single
= notify_down_prepare
,
1409 * The dynamically registered state space is here
1413 /* Last state is scheduler control setting the cpu active */
1414 [CPUHP_AP_ACTIVE
] = {
1415 .name
= "sched:active",
1416 .startup
.single
= sched_cpu_activate
,
1417 .teardown
.single
= sched_cpu_deactivate
,
1421 /* CPU is fully up and running. */
1424 .startup
.single
= NULL
,
1425 .teardown
.single
= NULL
,
1429 /* Sanity check for callbacks */
1430 static int cpuhp_cb_check(enum cpuhp_state state
)
1432 if (state
<= CPUHP_OFFLINE
|| state
>= CPUHP_ONLINE
)
1437 static void cpuhp_store_callbacks(enum cpuhp_state state
,
1439 int (*startup
)(unsigned int cpu
),
1440 int (*teardown
)(unsigned int cpu
),
1441 bool multi_instance
)
1443 /* (Un)Install the callbacks for further cpu hotplug operations */
1444 struct cpuhp_step
*sp
;
1446 sp
= cpuhp_get_step(state
);
1447 sp
->startup
.single
= startup
;
1448 sp
->teardown
.single
= teardown
;
1450 sp
->multi_instance
= multi_instance
;
1451 INIT_HLIST_HEAD(&sp
->list
);
1454 static void *cpuhp_get_teardown_cb(enum cpuhp_state state
)
1456 return cpuhp_get_step(state
)->teardown
.single
;
1460 * Call the startup/teardown function for a step either on the AP or
1461 * on the current CPU.
1463 static int cpuhp_issue_call(int cpu
, enum cpuhp_state state
, bool bringup
,
1464 struct hlist_node
*node
)
1466 struct cpuhp_step
*sp
= cpuhp_get_step(state
);
1469 if ((bringup
&& !sp
->startup
.single
) ||
1470 (!bringup
&& !sp
->teardown
.single
))
1473 * The non AP bound callbacks can fail on bringup. On teardown
1474 * e.g. module removal we crash for now.
1477 if (cpuhp_is_ap_state(state
))
1478 ret
= cpuhp_invoke_ap_callback(cpu
, state
, bringup
, node
);
1480 ret
= cpuhp_invoke_callback(cpu
, state
, bringup
, node
);
1482 ret
= cpuhp_invoke_callback(cpu
, state
, bringup
, node
);
1484 BUG_ON(ret
&& !bringup
);
1489 * Called from __cpuhp_setup_state on a recoverable failure.
1491 * Note: The teardown callbacks for rollback are not allowed to fail!
1493 static void cpuhp_rollback_install(int failedcpu
, enum cpuhp_state state
,
1494 struct hlist_node
*node
)
1498 /* Roll back the already executed steps on the other cpus */
1499 for_each_present_cpu(cpu
) {
1500 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1501 int cpustate
= st
->state
;
1503 if (cpu
>= failedcpu
)
1506 /* Did we invoke the startup call on that cpu ? */
1507 if (cpustate
>= state
)
1508 cpuhp_issue_call(cpu
, state
, false, node
);
1513 * Returns a free for dynamic slot assignment of the Online state. The states
1514 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1515 * by having no name assigned.
1517 static int cpuhp_reserve_state(enum cpuhp_state state
)
1521 for (i
= CPUHP_AP_ONLINE_DYN
; i
<= CPUHP_AP_ONLINE_DYN_END
; i
++) {
1522 if (cpuhp_ap_states
[i
].name
)
1525 cpuhp_ap_states
[i
].name
= "Reserved";
1528 WARN(1, "No more dynamic states available for CPU hotplug\n");
1532 int __cpuhp_state_add_instance(enum cpuhp_state state
, struct hlist_node
*node
,
1535 struct cpuhp_step
*sp
;
1539 sp
= cpuhp_get_step(state
);
1540 if (sp
->multi_instance
== false)
1544 mutex_lock(&cpuhp_state_mutex
);
1546 if (!invoke
|| !sp
->startup
.multi
)
1550 * Try to call the startup callback for each present cpu
1551 * depending on the hotplug state of the cpu.
1553 for_each_present_cpu(cpu
) {
1554 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1555 int cpustate
= st
->state
;
1557 if (cpustate
< state
)
1560 ret
= cpuhp_issue_call(cpu
, state
, true, node
);
1562 if (sp
->teardown
.multi
)
1563 cpuhp_rollback_install(cpu
, state
, node
);
1569 hlist_add_head(node
, &sp
->list
);
1572 mutex_unlock(&cpuhp_state_mutex
);
1576 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance
);
1579 * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
1580 * @state: The state to setup
1581 * @invoke: If true, the startup function is invoked for cpus where
1582 * cpu state >= @state
1583 * @startup: startup callback function
1584 * @teardown: teardown callback function
1586 * Returns 0 if successful, otherwise a proper error code
1588 int __cpuhp_setup_state(enum cpuhp_state state
,
1589 const char *name
, bool invoke
,
1590 int (*startup
)(unsigned int cpu
),
1591 int (*teardown
)(unsigned int cpu
),
1592 bool multi_instance
)
1597 if (cpuhp_cb_check(state
) || !name
)
1601 mutex_lock(&cpuhp_state_mutex
);
1603 /* currently assignments for the ONLINE state are possible */
1604 if (state
== CPUHP_AP_ONLINE_DYN
) {
1606 ret
= cpuhp_reserve_state(state
);
1612 cpuhp_store_callbacks(state
, name
, startup
, teardown
, multi_instance
);
1614 if (!invoke
|| !startup
)
1618 * Try to call the startup callback for each present cpu
1619 * depending on the hotplug state of the cpu.
1621 for_each_present_cpu(cpu
) {
1622 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1623 int cpustate
= st
->state
;
1625 if (cpustate
< state
)
1628 ret
= cpuhp_issue_call(cpu
, state
, true, NULL
);
1631 cpuhp_rollback_install(cpu
, state
, NULL
);
1632 cpuhp_store_callbacks(state
, NULL
, NULL
, NULL
, false);
1637 mutex_unlock(&cpuhp_state_mutex
);
1640 if (!ret
&& dyn_state
)
1644 EXPORT_SYMBOL(__cpuhp_setup_state
);
1646 int __cpuhp_state_remove_instance(enum cpuhp_state state
,
1647 struct hlist_node
*node
, bool invoke
)
1649 struct cpuhp_step
*sp
= cpuhp_get_step(state
);
1652 BUG_ON(cpuhp_cb_check(state
));
1654 if (!sp
->multi_instance
)
1658 mutex_lock(&cpuhp_state_mutex
);
1660 if (!invoke
|| !cpuhp_get_teardown_cb(state
))
1663 * Call the teardown callback for each present cpu depending
1664 * on the hotplug state of the cpu. This function is not
1665 * allowed to fail currently!
1667 for_each_present_cpu(cpu
) {
1668 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1669 int cpustate
= st
->state
;
1671 if (cpustate
>= state
)
1672 cpuhp_issue_call(cpu
, state
, false, node
);
1677 mutex_unlock(&cpuhp_state_mutex
);
1682 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance
);
1684 * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
1685 * @state: The state to remove
1686 * @invoke: If true, the teardown function is invoked for cpus where
1687 * cpu state >= @state
1689 * The teardown callback is currently not allowed to fail. Think
1690 * about module removal!
1692 void __cpuhp_remove_state(enum cpuhp_state state
, bool invoke
)
1694 struct cpuhp_step
*sp
= cpuhp_get_step(state
);
1697 BUG_ON(cpuhp_cb_check(state
));
1700 mutex_lock(&cpuhp_state_mutex
);
1702 if (sp
->multi_instance
) {
1703 WARN(!hlist_empty(&sp
->list
),
1704 "Error: Removing state %d which has instances left.\n",
1709 if (!invoke
|| !cpuhp_get_teardown_cb(state
))
1713 * Call the teardown callback for each present cpu depending
1714 * on the hotplug state of the cpu. This function is not
1715 * allowed to fail currently!
1717 for_each_present_cpu(cpu
) {
1718 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1719 int cpustate
= st
->state
;
1721 if (cpustate
>= state
)
1722 cpuhp_issue_call(cpu
, state
, false, NULL
);
1725 cpuhp_store_callbacks(state
, NULL
, NULL
, NULL
, false);
1726 mutex_unlock(&cpuhp_state_mutex
);
1729 EXPORT_SYMBOL(__cpuhp_remove_state
);
1731 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1732 static ssize_t
show_cpuhp_state(struct device
*dev
,
1733 struct device_attribute
*attr
, char *buf
)
1735 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1737 return sprintf(buf
, "%d\n", st
->state
);
1739 static DEVICE_ATTR(state
, 0444, show_cpuhp_state
, NULL
);
1741 static ssize_t
write_cpuhp_target(struct device
*dev
,
1742 struct device_attribute
*attr
,
1743 const char *buf
, size_t count
)
1745 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1746 struct cpuhp_step
*sp
;
1749 ret
= kstrtoint(buf
, 10, &target
);
1753 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1754 if (target
< CPUHP_OFFLINE
|| target
> CPUHP_ONLINE
)
1757 if (target
!= CPUHP_OFFLINE
&& target
!= CPUHP_ONLINE
)
1761 ret
= lock_device_hotplug_sysfs();
1765 mutex_lock(&cpuhp_state_mutex
);
1766 sp
= cpuhp_get_step(target
);
1767 ret
= !sp
->name
|| sp
->cant_stop
? -EINVAL
: 0;
1768 mutex_unlock(&cpuhp_state_mutex
);
1772 if (st
->state
< target
)
1773 ret
= do_cpu_up(dev
->id
, target
);
1775 ret
= do_cpu_down(dev
->id
, target
);
1777 unlock_device_hotplug();
1778 return ret
? ret
: count
;
1781 static ssize_t
show_cpuhp_target(struct device
*dev
,
1782 struct device_attribute
*attr
, char *buf
)
1784 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1786 return sprintf(buf
, "%d\n", st
->target
);
1788 static DEVICE_ATTR(target
, 0644, show_cpuhp_target
, write_cpuhp_target
);
1790 static struct attribute
*cpuhp_cpu_attrs
[] = {
1791 &dev_attr_state
.attr
,
1792 &dev_attr_target
.attr
,
1796 static struct attribute_group cpuhp_cpu_attr_group
= {
1797 .attrs
= cpuhp_cpu_attrs
,
1802 static ssize_t
show_cpuhp_states(struct device
*dev
,
1803 struct device_attribute
*attr
, char *buf
)
1805 ssize_t cur
, res
= 0;
1808 mutex_lock(&cpuhp_state_mutex
);
1809 for (i
= CPUHP_OFFLINE
; i
<= CPUHP_ONLINE
; i
++) {
1810 struct cpuhp_step
*sp
= cpuhp_get_step(i
);
1813 cur
= sprintf(buf
, "%3d: %s\n", i
, sp
->name
);
1818 mutex_unlock(&cpuhp_state_mutex
);
1821 static DEVICE_ATTR(states
, 0444, show_cpuhp_states
, NULL
);
1823 static struct attribute
*cpuhp_cpu_root_attrs
[] = {
1824 &dev_attr_states
.attr
,
1828 static struct attribute_group cpuhp_cpu_root_attr_group
= {
1829 .attrs
= cpuhp_cpu_root_attrs
,
1834 static int __init
cpuhp_sysfs_init(void)
1838 ret
= sysfs_create_group(&cpu_subsys
.dev_root
->kobj
,
1839 &cpuhp_cpu_root_attr_group
);
1843 for_each_possible_cpu(cpu
) {
1844 struct device
*dev
= get_cpu_device(cpu
);
1848 ret
= sysfs_create_group(&dev
->kobj
, &cpuhp_cpu_attr_group
);
1854 device_initcall(cpuhp_sysfs_init
);
1858 * cpu_bit_bitmap[] is a special, "compressed" data structure that
1859 * represents all NR_CPUS bits binary values of 1<<nr.
1861 * It is used by cpumask_of() to get a constant address to a CPU
1862 * mask value that has a single bit set only.
1865 /* cpu_bit_bitmap[0] is empty - so we can back into it */
1866 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
1867 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
1868 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
1869 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
1871 const unsigned long cpu_bit_bitmap
[BITS_PER_LONG
+1][BITS_TO_LONGS(NR_CPUS
)] = {
1873 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
1874 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
1875 #if BITS_PER_LONG > 32
1876 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
1877 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
1880 EXPORT_SYMBOL_GPL(cpu_bit_bitmap
);
1882 const DECLARE_BITMAP(cpu_all_bits
, NR_CPUS
) = CPU_BITS_ALL
;
1883 EXPORT_SYMBOL(cpu_all_bits
);
1885 #ifdef CONFIG_INIT_ALL_POSSIBLE
1886 struct cpumask __cpu_possible_mask __read_mostly
1889 struct cpumask __cpu_possible_mask __read_mostly
;
1891 EXPORT_SYMBOL(__cpu_possible_mask
);
1893 struct cpumask __cpu_online_mask __read_mostly
;
1894 EXPORT_SYMBOL(__cpu_online_mask
);
1896 struct cpumask __cpu_present_mask __read_mostly
;
1897 EXPORT_SYMBOL(__cpu_present_mask
);
1899 struct cpumask __cpu_active_mask __read_mostly
;
1900 EXPORT_SYMBOL(__cpu_active_mask
);
1902 void init_cpu_present(const struct cpumask
*src
)
1904 cpumask_copy(&__cpu_present_mask
, src
);
1907 void init_cpu_possible(const struct cpumask
*src
)
1909 cpumask_copy(&__cpu_possible_mask
, src
);
1912 void init_cpu_online(const struct cpumask
*src
)
1914 cpumask_copy(&__cpu_online_mask
, src
);
1918 * Activate the first processor.
1920 void __init
boot_cpu_init(void)
1922 int cpu
= smp_processor_id();
1924 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
1925 set_cpu_online(cpu
, true);
1926 set_cpu_active(cpu
, true);
1927 set_cpu_present(cpu
, true);
1928 set_cpu_possible(cpu
, true);
1932 * Must be called _AFTER_ setting up the per_cpu areas
1934 void __init
boot_cpu_state_init(void)
1936 per_cpu_ptr(&cpuhp_state
, smp_processor_id())->state
= CPUHP_ONLINE
;