2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 #include <linux/lockdep.h>
23 #include <linux/tick.h>
24 #include <linux/irq.h>
25 #include <linux/smpboot.h>
27 #include <trace/events/power.h>
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/cpuhp.h>
34 * cpuhp_cpu_state - Per cpu hotplug state storage
35 * @state: The current cpu state
36 * @target: The target state
37 * @thread: Pointer to the hotplug thread
38 * @should_run: Thread should execute
39 * @rollback: Perform a rollback
40 * @cb_stat: The state for a single callback (install/uninstall)
41 * @cb: Single callback function (install/uninstall)
42 * @result: Result of the operation
43 * @done: Signal completion to the issuer of the task
45 struct cpuhp_cpu_state
{
46 enum cpuhp_state state
;
47 enum cpuhp_state target
;
49 struct task_struct
*thread
;
52 enum cpuhp_state cb_state
;
53 int (*cb
)(unsigned int cpu
);
55 struct completion done
;
59 static DEFINE_PER_CPU(struct cpuhp_cpu_state
, cpuhp_state
);
62 * cpuhp_step - Hotplug state machine step
63 * @name: Name of the step
64 * @startup: Startup function of the step
65 * @teardown: Teardown function of the step
66 * @skip_onerr: Do not invoke the functions on error rollback
67 * Will go away once the notifiers are gone
68 * @cant_stop: Bringup/teardown can't be stopped at this step
72 int (*startup
)(unsigned int cpu
);
73 int (*teardown
)(unsigned int cpu
);
78 static DEFINE_MUTEX(cpuhp_state_mutex
);
79 static struct cpuhp_step cpuhp_bp_states
[];
80 static struct cpuhp_step cpuhp_ap_states
[];
83 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
84 * @cpu: The cpu for which the callback should be invoked
85 * @step: The step in the state machine
86 * @cb: The callback function to invoke
88 * Called from cpu hotplug and from the state register machinery
90 static int cpuhp_invoke_callback(unsigned int cpu
, enum cpuhp_state step
,
91 int (*cb
)(unsigned int))
93 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
97 trace_cpuhp_enter(cpu
, st
->target
, step
, cb
);
99 trace_cpuhp_exit(cpu
, st
->state
, step
, ret
);
105 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
106 static DEFINE_MUTEX(cpu_add_remove_lock
);
107 bool cpuhp_tasks_frozen
;
108 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen
);
111 * The following two APIs (cpu_maps_update_begin/done) must be used when
112 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
113 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
114 * hotplug callback (un)registration performed using __register_cpu_notifier()
115 * or __unregister_cpu_notifier().
117 void cpu_maps_update_begin(void)
119 mutex_lock(&cpu_add_remove_lock
);
121 EXPORT_SYMBOL(cpu_notifier_register_begin
);
123 void cpu_maps_update_done(void)
125 mutex_unlock(&cpu_add_remove_lock
);
127 EXPORT_SYMBOL(cpu_notifier_register_done
);
129 static RAW_NOTIFIER_HEAD(cpu_chain
);
131 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
132 * Should always be manipulated under cpu_add_remove_lock
134 static int cpu_hotplug_disabled
;
136 #ifdef CONFIG_HOTPLUG_CPU
139 struct task_struct
*active_writer
;
140 /* wait queue to wake up the active_writer */
141 wait_queue_head_t wq
;
142 /* verifies that no writer will get active while readers are active */
145 * Also blocks the new readers during
146 * an ongoing cpu hotplug operation.
150 #ifdef CONFIG_DEBUG_LOCK_ALLOC
151 struct lockdep_map dep_map
;
154 .active_writer
= NULL
,
155 .wq
= __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug
.wq
),
156 .lock
= __MUTEX_INITIALIZER(cpu_hotplug
.lock
),
157 #ifdef CONFIG_DEBUG_LOCK_ALLOC
158 .dep_map
= {.name
= "cpu_hotplug.lock" },
162 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
163 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
164 #define cpuhp_lock_acquire_tryread() \
165 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
166 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
167 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
170 void get_online_cpus(void)
173 if (cpu_hotplug
.active_writer
== current
)
175 cpuhp_lock_acquire_read();
176 mutex_lock(&cpu_hotplug
.lock
);
177 atomic_inc(&cpu_hotplug
.refcount
);
178 mutex_unlock(&cpu_hotplug
.lock
);
180 EXPORT_SYMBOL_GPL(get_online_cpus
);
182 void put_online_cpus(void)
186 if (cpu_hotplug
.active_writer
== current
)
189 refcount
= atomic_dec_return(&cpu_hotplug
.refcount
);
190 if (WARN_ON(refcount
< 0)) /* try to fix things up */
191 atomic_inc(&cpu_hotplug
.refcount
);
193 if (refcount
<= 0 && waitqueue_active(&cpu_hotplug
.wq
))
194 wake_up(&cpu_hotplug
.wq
);
196 cpuhp_lock_release();
199 EXPORT_SYMBOL_GPL(put_online_cpus
);
202 * This ensures that the hotplug operation can begin only when the
203 * refcount goes to zero.
205 * Note that during a cpu-hotplug operation, the new readers, if any,
206 * will be blocked by the cpu_hotplug.lock
208 * Since cpu_hotplug_begin() is always called after invoking
209 * cpu_maps_update_begin(), we can be sure that only one writer is active.
211 * Note that theoretically, there is a possibility of a livelock:
212 * - Refcount goes to zero, last reader wakes up the sleeping
214 * - Last reader unlocks the cpu_hotplug.lock.
215 * - A new reader arrives at this moment, bumps up the refcount.
216 * - The writer acquires the cpu_hotplug.lock finds the refcount
217 * non zero and goes to sleep again.
219 * However, this is very difficult to achieve in practice since
220 * get_online_cpus() not an api which is called all that often.
223 void cpu_hotplug_begin(void)
227 cpu_hotplug
.active_writer
= current
;
228 cpuhp_lock_acquire();
231 mutex_lock(&cpu_hotplug
.lock
);
232 prepare_to_wait(&cpu_hotplug
.wq
, &wait
, TASK_UNINTERRUPTIBLE
);
233 if (likely(!atomic_read(&cpu_hotplug
.refcount
)))
235 mutex_unlock(&cpu_hotplug
.lock
);
238 finish_wait(&cpu_hotplug
.wq
, &wait
);
241 void cpu_hotplug_done(void)
243 cpu_hotplug
.active_writer
= NULL
;
244 mutex_unlock(&cpu_hotplug
.lock
);
245 cpuhp_lock_release();
249 * Wait for currently running CPU hotplug operations to complete (if any) and
250 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
251 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
252 * hotplug path before performing hotplug operations. So acquiring that lock
253 * guarantees mutual exclusion from any currently running hotplug operations.
255 void cpu_hotplug_disable(void)
257 cpu_maps_update_begin();
258 cpu_hotplug_disabled
++;
259 cpu_maps_update_done();
261 EXPORT_SYMBOL_GPL(cpu_hotplug_disable
);
263 void cpu_hotplug_enable(void)
265 cpu_maps_update_begin();
266 WARN_ON(--cpu_hotplug_disabled
< 0);
267 cpu_maps_update_done();
269 EXPORT_SYMBOL_GPL(cpu_hotplug_enable
);
270 #endif /* CONFIG_HOTPLUG_CPU */
272 /* Need to know about CPUs going up/down? */
273 int register_cpu_notifier(struct notifier_block
*nb
)
276 cpu_maps_update_begin();
277 ret
= raw_notifier_chain_register(&cpu_chain
, nb
);
278 cpu_maps_update_done();
282 int __register_cpu_notifier(struct notifier_block
*nb
)
284 return raw_notifier_chain_register(&cpu_chain
, nb
);
287 static int __cpu_notify(unsigned long val
, unsigned int cpu
, int nr_to_call
,
290 unsigned long mod
= cpuhp_tasks_frozen
? CPU_TASKS_FROZEN
: 0;
291 void *hcpu
= (void *)(long)cpu
;
295 ret
= __raw_notifier_call_chain(&cpu_chain
, val
| mod
, hcpu
, nr_to_call
,
298 return notifier_to_errno(ret
);
301 static int cpu_notify(unsigned long val
, unsigned int cpu
)
303 return __cpu_notify(val
, cpu
, -1, NULL
);
306 static void cpu_notify_nofail(unsigned long val
, unsigned int cpu
)
308 BUG_ON(cpu_notify(val
, cpu
));
311 /* Notifier wrappers for transitioning to state machine */
312 static int notify_prepare(unsigned int cpu
)
317 ret
= __cpu_notify(CPU_UP_PREPARE
, cpu
, -1, &nr_calls
);
320 printk(KERN_WARNING
"%s: attempt to bring up CPU %u failed\n",
322 __cpu_notify(CPU_UP_CANCELED
, cpu
, nr_calls
, NULL
);
327 static int notify_online(unsigned int cpu
)
329 cpu_notify(CPU_ONLINE
, cpu
);
333 static int notify_starting(unsigned int cpu
)
335 cpu_notify(CPU_STARTING
, cpu
);
339 static int bringup_wait_for_ap(unsigned int cpu
)
341 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
343 wait_for_completion(&st
->done
);
347 static int bringup_cpu(unsigned int cpu
)
349 struct task_struct
*idle
= idle_thread_get(cpu
);
352 /* Arch-specific enabling code. */
353 ret
= __cpu_up(cpu
, idle
);
355 cpu_notify(CPU_UP_CANCELED
, cpu
);
358 ret
= bringup_wait_for_ap(cpu
);
359 BUG_ON(!cpu_online(cpu
));
364 * Hotplug state machine related functions
366 static void undo_cpu_down(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
367 struct cpuhp_step
*steps
)
369 for (st
->state
++; st
->state
< st
->target
; st
->state
++) {
370 struct cpuhp_step
*step
= steps
+ st
->state
;
372 if (!step
->skip_onerr
)
373 cpuhp_invoke_callback(cpu
, st
->state
, step
->startup
);
377 static int cpuhp_down_callbacks(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
378 struct cpuhp_step
*steps
, enum cpuhp_state target
)
380 enum cpuhp_state prev_state
= st
->state
;
383 for (; st
->state
> target
; st
->state
--) {
384 struct cpuhp_step
*step
= steps
+ st
->state
;
386 ret
= cpuhp_invoke_callback(cpu
, st
->state
, step
->teardown
);
388 st
->target
= prev_state
;
389 undo_cpu_down(cpu
, st
, steps
);
396 static void undo_cpu_up(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
397 struct cpuhp_step
*steps
)
399 for (st
->state
--; st
->state
> st
->target
; st
->state
--) {
400 struct cpuhp_step
*step
= steps
+ st
->state
;
402 if (!step
->skip_onerr
)
403 cpuhp_invoke_callback(cpu
, st
->state
, step
->teardown
);
407 static int cpuhp_up_callbacks(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
408 struct cpuhp_step
*steps
, enum cpuhp_state target
)
410 enum cpuhp_state prev_state
= st
->state
;
413 while (st
->state
< target
) {
414 struct cpuhp_step
*step
;
417 step
= steps
+ st
->state
;
418 ret
= cpuhp_invoke_callback(cpu
, st
->state
, step
->startup
);
420 st
->target
= prev_state
;
421 undo_cpu_up(cpu
, st
, steps
);
429 * The cpu hotplug threads manage the bringup and teardown of the cpus
431 static void cpuhp_create(unsigned int cpu
)
433 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
435 init_completion(&st
->done
);
438 static int cpuhp_should_run(unsigned int cpu
)
440 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
442 return st
->should_run
;
445 /* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
446 static int cpuhp_ap_offline(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
448 enum cpuhp_state target
= max((int)st
->target
, CPUHP_TEARDOWN_CPU
);
450 return cpuhp_down_callbacks(cpu
, st
, cpuhp_ap_states
, target
);
453 /* Execute the online startup callbacks. Used to be CPU_ONLINE */
454 static int cpuhp_ap_online(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
456 return cpuhp_up_callbacks(cpu
, st
, cpuhp_ap_states
, st
->target
);
460 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
461 * callbacks when a state gets [un]installed at runtime.
463 static void cpuhp_thread_fun(unsigned int cpu
)
465 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
469 * Paired with the mb() in cpuhp_kick_ap_work and
470 * cpuhp_invoke_ap_callback, so the work set is consistent visible.
476 st
->should_run
= false;
478 /* Single callback invocation for [un]install ? */
480 if (st
->cb_state
< CPUHP_AP_ONLINE
) {
482 ret
= cpuhp_invoke_callback(cpu
, st
->cb_state
, st
->cb
);
485 ret
= cpuhp_invoke_callback(cpu
, st
->cb_state
, st
->cb
);
487 } else if (st
->rollback
) {
488 BUG_ON(st
->state
< CPUHP_AP_ONLINE_IDLE
);
490 undo_cpu_down(cpu
, st
, cpuhp_ap_states
);
492 * This is a momentary workaround to keep the notifier users
493 * happy. Will go away once we got rid of the notifiers.
495 cpu_notify_nofail(CPU_DOWN_FAILED
, cpu
);
496 st
->rollback
= false;
498 /* Cannot happen .... */
499 BUG_ON(st
->state
< CPUHP_AP_ONLINE_IDLE
);
501 /* Regular hotplug work */
502 if (st
->state
< st
->target
)
503 ret
= cpuhp_ap_online(cpu
, st
);
504 else if (st
->state
> st
->target
)
505 ret
= cpuhp_ap_offline(cpu
, st
);
511 /* Invoke a single callback on a remote cpu */
512 static int cpuhp_invoke_ap_callback(int cpu
, enum cpuhp_state state
,
513 int (*cb
)(unsigned int))
515 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
517 if (!cpu_online(cpu
))
520 st
->cb_state
= state
;
523 * Make sure the above stores are visible before should_run becomes
524 * true. Paired with the mb() above in cpuhp_thread_fun()
527 st
->should_run
= true;
528 wake_up_process(st
->thread
);
529 wait_for_completion(&st
->done
);
533 /* Regular hotplug invocation of the AP hotplug thread */
534 static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state
*st
)
539 * Make sure the above stores are visible before should_run becomes
540 * true. Paired with the mb() above in cpuhp_thread_fun()
543 st
->should_run
= true;
544 wake_up_process(st
->thread
);
547 static int cpuhp_kick_ap_work(unsigned int cpu
)
549 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
550 enum cpuhp_state state
= st
->state
;
552 trace_cpuhp_enter(cpu
, st
->target
, state
, cpuhp_kick_ap_work
);
553 __cpuhp_kick_ap_work(st
);
554 wait_for_completion(&st
->done
);
555 trace_cpuhp_exit(cpu
, st
->state
, state
, st
->result
);
559 static struct smp_hotplug_thread cpuhp_threads
= {
560 .store
= &cpuhp_state
.thread
,
561 .create
= &cpuhp_create
,
562 .thread_should_run
= cpuhp_should_run
,
563 .thread_fn
= cpuhp_thread_fun
,
564 .thread_comm
= "cpuhp/%u",
568 void __init
cpuhp_threads_init(void)
570 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads
));
571 kthread_unpark(this_cpu_read(cpuhp_state
.thread
));
574 #ifdef CONFIG_HOTPLUG_CPU
575 EXPORT_SYMBOL(register_cpu_notifier
);
576 EXPORT_SYMBOL(__register_cpu_notifier
);
577 void unregister_cpu_notifier(struct notifier_block
*nb
)
579 cpu_maps_update_begin();
580 raw_notifier_chain_unregister(&cpu_chain
, nb
);
581 cpu_maps_update_done();
583 EXPORT_SYMBOL(unregister_cpu_notifier
);
585 void __unregister_cpu_notifier(struct notifier_block
*nb
)
587 raw_notifier_chain_unregister(&cpu_chain
, nb
);
589 EXPORT_SYMBOL(__unregister_cpu_notifier
);
592 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
595 * This function walks all processes, finds a valid mm struct for each one and
596 * then clears a corresponding bit in mm's cpumask. While this all sounds
597 * trivial, there are various non-obvious corner cases, which this function
598 * tries to solve in a safe manner.
600 * Also note that the function uses a somewhat relaxed locking scheme, so it may
601 * be called only for an already offlined CPU.
603 void clear_tasks_mm_cpumask(int cpu
)
605 struct task_struct
*p
;
608 * This function is called after the cpu is taken down and marked
609 * offline, so its not like new tasks will ever get this cpu set in
610 * their mm mask. -- Peter Zijlstra
611 * Thus, we may use rcu_read_lock() here, instead of grabbing
612 * full-fledged tasklist_lock.
614 WARN_ON(cpu_online(cpu
));
616 for_each_process(p
) {
617 struct task_struct
*t
;
620 * Main thread might exit, but other threads may still have
621 * a valid mm. Find one.
623 t
= find_lock_task_mm(p
);
626 cpumask_clear_cpu(cpu
, mm_cpumask(t
->mm
));
632 static inline void check_for_tasks(int dead_cpu
)
634 struct task_struct
*g
, *p
;
636 read_lock(&tasklist_lock
);
637 for_each_process_thread(g
, p
) {
641 * We do the check with unlocked task_rq(p)->lock.
642 * Order the reading to do not warn about a task,
643 * which was running on this cpu in the past, and
644 * it's just been woken on another cpu.
647 if (task_cpu(p
) != dead_cpu
)
650 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
651 p
->comm
, task_pid_nr(p
), dead_cpu
, p
->state
, p
->flags
);
653 read_unlock(&tasklist_lock
);
656 static int notify_down_prepare(unsigned int cpu
)
658 int err
, nr_calls
= 0;
660 err
= __cpu_notify(CPU_DOWN_PREPARE
, cpu
, -1, &nr_calls
);
663 __cpu_notify(CPU_DOWN_FAILED
, cpu
, nr_calls
, NULL
);
664 pr_warn("%s: attempt to take down CPU %u failed\n",
670 static int notify_dying(unsigned int cpu
)
672 cpu_notify(CPU_DYING
, cpu
);
676 /* Take this CPU down. */
677 static int take_cpu_down(void *_param
)
679 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
680 enum cpuhp_state target
= max((int)st
->target
, CPUHP_AP_OFFLINE
);
681 int err
, cpu
= smp_processor_id();
683 /* Ensure this CPU doesn't handle any more interrupts. */
684 err
= __cpu_disable();
688 /* Invoke the former CPU_DYING callbacks */
689 for (; st
->state
> target
; st
->state
--) {
690 struct cpuhp_step
*step
= cpuhp_ap_states
+ st
->state
;
692 cpuhp_invoke_callback(cpu
, st
->state
, step
->teardown
);
694 /* Give up timekeeping duties */
695 tick_handover_do_timer();
696 /* Park the stopper thread */
697 stop_machine_park(cpu
);
701 static int takedown_cpu(unsigned int cpu
)
703 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
707 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
708 * and RCU users of this state to go away such that all new such users
711 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
712 * not imply sync_sched(), so wait for both.
714 * Do sync before park smpboot threads to take care the rcu boost case.
716 if (IS_ENABLED(CONFIG_PREEMPT
))
717 synchronize_rcu_mult(call_rcu
, call_rcu_sched
);
721 /* Park the smpboot threads */
722 kthread_park(per_cpu_ptr(&cpuhp_state
, cpu
)->thread
);
723 smpboot_park_threads(cpu
);
726 * Prevent irq alloc/free while the dying cpu reorganizes the
727 * interrupt affinities.
732 * So now all preempt/rcu users must observe !cpu_active().
734 err
= stop_machine(take_cpu_down
, NULL
, cpumask_of(cpu
));
736 /* CPU refused to die */
738 /* Unpark the hotplug thread so we can rollback there */
739 kthread_unpark(per_cpu_ptr(&cpuhp_state
, cpu
)->thread
);
742 BUG_ON(cpu_online(cpu
));
745 * The migration_call() CPU_DYING callback will have removed all
746 * runnable tasks from the cpu, there's only the idle task left now
747 * that the migration thread is done doing the stop_machine thing.
749 * Wait for the stop thread to go away.
751 wait_for_completion(&st
->done
);
752 BUG_ON(st
->state
!= CPUHP_AP_IDLE_DEAD
);
754 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
757 hotplug_cpu__broadcast_tick_pull(cpu
);
758 /* This actually kills the CPU. */
761 tick_cleanup_dead_cpu(cpu
);
765 static int notify_dead(unsigned int cpu
)
767 cpu_notify_nofail(CPU_DEAD
, cpu
);
768 check_for_tasks(cpu
);
772 static void cpuhp_complete_idle_dead(void *arg
)
774 struct cpuhp_cpu_state
*st
= arg
;
779 void cpuhp_report_idle_dead(void)
781 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
783 BUG_ON(st
->state
!= CPUHP_AP_OFFLINE
);
784 rcu_report_dead(smp_processor_id());
785 st
->state
= CPUHP_AP_IDLE_DEAD
;
787 * We cannot call complete after rcu_report_dead() so we delegate it
790 smp_call_function_single(cpumask_first(cpu_online_mask
),
791 cpuhp_complete_idle_dead
, st
, 0);
795 #define notify_down_prepare NULL
796 #define takedown_cpu NULL
797 #define notify_dead NULL
798 #define notify_dying NULL
801 #ifdef CONFIG_HOTPLUG_CPU
803 /* Requires cpu_add_remove_lock to be held */
804 static int __ref
_cpu_down(unsigned int cpu
, int tasks_frozen
,
805 enum cpuhp_state target
)
807 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
808 int prev_state
, ret
= 0;
809 bool hasdied
= false;
811 if (num_online_cpus() == 1)
814 if (!cpu_present(cpu
))
819 cpuhp_tasks_frozen
= tasks_frozen
;
821 prev_state
= st
->state
;
824 * If the current CPU state is in the range of the AP hotplug thread,
825 * then we need to kick the thread.
827 if (st
->state
> CPUHP_TEARDOWN_CPU
) {
828 ret
= cpuhp_kick_ap_work(cpu
);
830 * The AP side has done the error rollback already. Just
831 * return the error code..
837 * We might have stopped still in the range of the AP hotplug
838 * thread. Nothing to do anymore.
840 if (st
->state
> CPUHP_TEARDOWN_CPU
)
844 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
845 * to do the further cleanups.
847 ret
= cpuhp_down_callbacks(cpu
, st
, cpuhp_bp_states
, target
);
848 if (ret
&& st
->state
> CPUHP_TEARDOWN_CPU
&& st
->state
< prev_state
) {
849 st
->target
= prev_state
;
851 cpuhp_kick_ap_work(cpu
);
854 hasdied
= prev_state
!= st
->state
&& st
->state
== CPUHP_OFFLINE
;
857 /* This post dead nonsense must die */
859 cpu_notify_nofail(CPU_POST_DEAD
, cpu
);
863 static int do_cpu_down(unsigned int cpu
, enum cpuhp_state target
)
867 cpu_maps_update_begin();
869 if (cpu_hotplug_disabled
) {
874 err
= _cpu_down(cpu
, 0, target
);
877 cpu_maps_update_done();
880 int cpu_down(unsigned int cpu
)
882 return do_cpu_down(cpu
, CPUHP_OFFLINE
);
884 EXPORT_SYMBOL(cpu_down
);
885 #endif /*CONFIG_HOTPLUG_CPU*/
888 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
889 * @cpu: cpu that just started
891 * This function calls the cpu_chain notifiers with CPU_STARTING.
892 * It must be called by the arch code on the new cpu, before the new cpu
893 * enables interrupts and before the "boot" cpu returns from __cpu_up().
895 void notify_cpu_starting(unsigned int cpu
)
897 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
898 enum cpuhp_state target
= min((int)st
->target
, CPUHP_AP_ONLINE
);
900 while (st
->state
< target
) {
901 struct cpuhp_step
*step
;
904 step
= cpuhp_ap_states
+ st
->state
;
905 cpuhp_invoke_callback(cpu
, st
->state
, step
->startup
);
910 * Called from the idle task. We need to set active here, so we can kick off
911 * the stopper thread and unpark the smpboot threads. If the target state is
912 * beyond CPUHP_AP_ONLINE_IDLE we kick cpuhp thread and let it bring up the
915 void cpuhp_online_idle(enum cpuhp_state state
)
917 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
918 unsigned int cpu
= smp_processor_id();
920 /* Happens for the boot cpu */
921 if (state
!= CPUHP_AP_ONLINE_IDLE
)
924 st
->state
= CPUHP_AP_ONLINE_IDLE
;
926 /* The cpu is marked online, set it active now */
927 set_cpu_active(cpu
, true);
928 /* Unpark the stopper thread and the hotplug thread of this cpu */
929 stop_machine_unpark(cpu
);
930 kthread_unpark(st
->thread
);
932 /* Should we go further up ? */
933 if (st
->target
> CPUHP_AP_ONLINE_IDLE
)
934 __cpuhp_kick_ap_work(st
);
939 /* Requires cpu_add_remove_lock to be held */
940 static int _cpu_up(unsigned int cpu
, int tasks_frozen
, enum cpuhp_state target
)
942 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
943 struct task_struct
*idle
;
948 if (!cpu_present(cpu
)) {
954 * The caller of do_cpu_up might have raced with another
955 * caller. Ignore it for now.
957 if (st
->state
>= target
)
960 if (st
->state
== CPUHP_OFFLINE
) {
961 /* Let it fail before we try to bring the cpu up */
962 idle
= idle_thread_get(cpu
);
969 cpuhp_tasks_frozen
= tasks_frozen
;
973 * If the current CPU state is in the range of the AP hotplug thread,
974 * then we need to kick the thread once more.
976 if (st
->state
> CPUHP_BRINGUP_CPU
) {
977 ret
= cpuhp_kick_ap_work(cpu
);
979 * The AP side has done the error rollback already. Just
980 * return the error code..
987 * Try to reach the target state. We max out on the BP at
988 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
989 * responsible for bringing it up to the target state.
991 target
= min((int)target
, CPUHP_BRINGUP_CPU
);
992 ret
= cpuhp_up_callbacks(cpu
, st
, cpuhp_bp_states
, target
);
998 static int do_cpu_up(unsigned int cpu
, enum cpuhp_state target
)
1002 if (!cpu_possible(cpu
)) {
1003 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1005 #if defined(CONFIG_IA64)
1006 pr_err("please check additional_cpus= boot parameter\n");
1011 err
= try_online_node(cpu_to_node(cpu
));
1015 cpu_maps_update_begin();
1017 if (cpu_hotplug_disabled
) {
1022 err
= _cpu_up(cpu
, 0, target
);
1024 cpu_maps_update_done();
1028 int cpu_up(unsigned int cpu
)
1030 return do_cpu_up(cpu
, CPUHP_ONLINE
);
1032 EXPORT_SYMBOL_GPL(cpu_up
);
1034 #ifdef CONFIG_PM_SLEEP_SMP
1035 static cpumask_var_t frozen_cpus
;
1037 int disable_nonboot_cpus(void)
1039 int cpu
, first_cpu
, error
= 0;
1041 cpu_maps_update_begin();
1042 first_cpu
= cpumask_first(cpu_online_mask
);
1044 * We take down all of the non-boot CPUs in one shot to avoid races
1045 * with the userspace trying to use the CPU hotplug at the same time
1047 cpumask_clear(frozen_cpus
);
1049 pr_info("Disabling non-boot CPUs ...\n");
1050 for_each_online_cpu(cpu
) {
1051 if (cpu
== first_cpu
)
1053 trace_suspend_resume(TPS("CPU_OFF"), cpu
, true);
1054 error
= _cpu_down(cpu
, 1, CPUHP_OFFLINE
);
1055 trace_suspend_resume(TPS("CPU_OFF"), cpu
, false);
1057 cpumask_set_cpu(cpu
, frozen_cpus
);
1059 pr_err("Error taking CPU%d down: %d\n", cpu
, error
);
1065 BUG_ON(num_online_cpus() > 1);
1067 pr_err("Non-boot CPUs are not disabled\n");
1070 * Make sure the CPUs won't be enabled by someone else. We need to do
1071 * this even in case of failure as all disable_nonboot_cpus() users are
1072 * supposed to do enable_nonboot_cpus() on the failure path.
1074 cpu_hotplug_disabled
++;
1076 cpu_maps_update_done();
1080 void __weak
arch_enable_nonboot_cpus_begin(void)
1084 void __weak
arch_enable_nonboot_cpus_end(void)
1088 void enable_nonboot_cpus(void)
1092 /* Allow everyone to use the CPU hotplug again */
1093 cpu_maps_update_begin();
1094 WARN_ON(--cpu_hotplug_disabled
< 0);
1095 if (cpumask_empty(frozen_cpus
))
1098 pr_info("Enabling non-boot CPUs ...\n");
1100 arch_enable_nonboot_cpus_begin();
1102 for_each_cpu(cpu
, frozen_cpus
) {
1103 trace_suspend_resume(TPS("CPU_ON"), cpu
, true);
1104 error
= _cpu_up(cpu
, 1, CPUHP_ONLINE
);
1105 trace_suspend_resume(TPS("CPU_ON"), cpu
, false);
1107 pr_info("CPU%d is up\n", cpu
);
1110 pr_warn("Error taking CPU%d up: %d\n", cpu
, error
);
1113 arch_enable_nonboot_cpus_end();
1115 cpumask_clear(frozen_cpus
);
1117 cpu_maps_update_done();
1120 static int __init
alloc_frozen_cpus(void)
1122 if (!alloc_cpumask_var(&frozen_cpus
, GFP_KERNEL
|__GFP_ZERO
))
1126 core_initcall(alloc_frozen_cpus
);
1129 * When callbacks for CPU hotplug notifications are being executed, we must
1130 * ensure that the state of the system with respect to the tasks being frozen
1131 * or not, as reported by the notification, remains unchanged *throughout the
1132 * duration* of the execution of the callbacks.
1133 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1135 * This synchronization is implemented by mutually excluding regular CPU
1136 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1137 * Hibernate notifications.
1140 cpu_hotplug_pm_callback(struct notifier_block
*nb
,
1141 unsigned long action
, void *ptr
)
1145 case PM_SUSPEND_PREPARE
:
1146 case PM_HIBERNATION_PREPARE
:
1147 cpu_hotplug_disable();
1150 case PM_POST_SUSPEND
:
1151 case PM_POST_HIBERNATION
:
1152 cpu_hotplug_enable();
1163 static int __init
cpu_hotplug_pm_sync_init(void)
1166 * cpu_hotplug_pm_callback has higher priority than x86
1167 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1168 * to disable cpu hotplug to avoid cpu hotplug race.
1170 pm_notifier(cpu_hotplug_pm_callback
, 0);
1173 core_initcall(cpu_hotplug_pm_sync_init
);
1175 #endif /* CONFIG_PM_SLEEP_SMP */
1177 #endif /* CONFIG_SMP */
1179 /* Boot processor state steps */
1180 static struct cpuhp_step cpuhp_bp_states
[] = {
1187 [CPUHP_CREATE_THREADS
]= {
1188 .name
= "threads:create",
1189 .startup
= smpboot_create_threads
,
1194 * Preparatory and dead notifiers. Will be replaced once the notifiers
1195 * are converted to states.
1197 [CPUHP_NOTIFY_PREPARE
] = {
1198 .name
= "notify:prepare",
1199 .startup
= notify_prepare
,
1200 .teardown
= notify_dead
,
1204 /* Kicks the plugged cpu into life */
1205 [CPUHP_BRINGUP_CPU
] = {
1206 .name
= "cpu:bringup",
1207 .startup
= bringup_cpu
,
1212 * Handled on controll processor until the plugged processor manages
1215 [CPUHP_TEARDOWN_CPU
] = {
1216 .name
= "cpu:teardown",
1218 .teardown
= takedown_cpu
,
1224 /* Application processor state steps */
1225 static struct cpuhp_step cpuhp_ap_states
[] = {
1227 /* Final state before CPU kills itself */
1228 [CPUHP_AP_IDLE_DEAD
] = {
1229 .name
= "idle:dead",
1232 * Last state before CPU enters the idle loop to die. Transient state
1233 * for synchronization.
1235 [CPUHP_AP_OFFLINE
] = {
1236 .name
= "ap:offline",
1240 * Low level startup/teardown notifiers. Run with interrupts
1241 * disabled. Will be removed once the notifiers are converted to
1244 [CPUHP_AP_NOTIFY_STARTING
] = {
1245 .name
= "notify:starting",
1246 .startup
= notify_starting
,
1247 .teardown
= notify_dying
,
1251 /* Entry state on starting. Interrupts enabled from here on. Transient
1252 * state for synchronsization */
1253 [CPUHP_AP_ONLINE
] = {
1254 .name
= "ap:online",
1256 /* Handle smpboot threads park/unpark */
1257 [CPUHP_AP_SMPBOOT_THREADS
] = {
1258 .name
= "smpboot:threads",
1259 .startup
= smpboot_unpark_threads
,
1263 * Online/down_prepare notifiers. Will be removed once the notifiers
1264 * are converted to states.
1266 [CPUHP_AP_NOTIFY_ONLINE
] = {
1267 .name
= "notify:online",
1268 .startup
= notify_online
,
1269 .teardown
= notify_down_prepare
,
1274 * The dynamically registered state space is here
1277 /* CPU is fully up and running. */
1285 /* Sanity check for callbacks */
1286 static int cpuhp_cb_check(enum cpuhp_state state
)
1288 if (state
<= CPUHP_OFFLINE
|| state
>= CPUHP_ONLINE
)
1293 static bool cpuhp_is_ap_state(enum cpuhp_state state
)
1296 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
1297 * purposes as that state is handled explicitely in cpu_down.
1299 return state
> CPUHP_BRINGUP_CPU
&& state
!= CPUHP_TEARDOWN_CPU
;
1302 static struct cpuhp_step
*cpuhp_get_step(enum cpuhp_state state
)
1304 struct cpuhp_step
*sp
;
1306 sp
= cpuhp_is_ap_state(state
) ? cpuhp_ap_states
: cpuhp_bp_states
;
1310 static void cpuhp_store_callbacks(enum cpuhp_state state
,
1312 int (*startup
)(unsigned int cpu
),
1313 int (*teardown
)(unsigned int cpu
))
1315 /* (Un)Install the callbacks for further cpu hotplug operations */
1316 struct cpuhp_step
*sp
;
1318 mutex_lock(&cpuhp_state_mutex
);
1319 sp
= cpuhp_get_step(state
);
1320 sp
->startup
= startup
;
1321 sp
->teardown
= teardown
;
1323 mutex_unlock(&cpuhp_state_mutex
);
1326 static void *cpuhp_get_teardown_cb(enum cpuhp_state state
)
1328 return cpuhp_get_step(state
)->teardown
;
1332 * Call the startup/teardown function for a step either on the AP or
1333 * on the current CPU.
1335 static int cpuhp_issue_call(int cpu
, enum cpuhp_state state
,
1336 int (*cb
)(unsigned int), bool bringup
)
1343 * The non AP bound callbacks can fail on bringup. On teardown
1344 * e.g. module removal we crash for now.
1347 if (cpuhp_is_ap_state(state
))
1348 ret
= cpuhp_invoke_ap_callback(cpu
, state
, cb
);
1350 ret
= cpuhp_invoke_callback(cpu
, state
, cb
);
1352 ret
= cpuhp_invoke_callback(cpu
, state
, cb
);
1354 BUG_ON(ret
&& !bringup
);
1359 * Called from __cpuhp_setup_state on a recoverable failure.
1361 * Note: The teardown callbacks for rollback are not allowed to fail!
1363 static void cpuhp_rollback_install(int failedcpu
, enum cpuhp_state state
,
1364 int (*teardown
)(unsigned int cpu
))
1371 /* Roll back the already executed steps on the other cpus */
1372 for_each_present_cpu(cpu
) {
1373 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1374 int cpustate
= st
->state
;
1376 if (cpu
>= failedcpu
)
1379 /* Did we invoke the startup call on that cpu ? */
1380 if (cpustate
>= state
)
1381 cpuhp_issue_call(cpu
, state
, teardown
, false);
1386 * Returns a free for dynamic slot assignment of the Online state. The states
1387 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1388 * by having no name assigned.
1390 static int cpuhp_reserve_state(enum cpuhp_state state
)
1394 mutex_lock(&cpuhp_state_mutex
);
1395 for (i
= CPUHP_AP_ONLINE_DYN
; i
<= CPUHP_AP_ONLINE_DYN_END
; i
++) {
1396 if (cpuhp_ap_states
[i
].name
)
1399 cpuhp_ap_states
[i
].name
= "Reserved";
1400 mutex_unlock(&cpuhp_state_mutex
);
1403 mutex_unlock(&cpuhp_state_mutex
);
1404 WARN(1, "No more dynamic states available for CPU hotplug\n");
1409 * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
1410 * @state: The state to setup
1411 * @invoke: If true, the startup function is invoked for cpus where
1412 * cpu state >= @state
1413 * @startup: startup callback function
1414 * @teardown: teardown callback function
1416 * Returns 0 if successful, otherwise a proper error code
1418 int __cpuhp_setup_state(enum cpuhp_state state
,
1419 const char *name
, bool invoke
,
1420 int (*startup
)(unsigned int cpu
),
1421 int (*teardown
)(unsigned int cpu
))
1426 if (cpuhp_cb_check(state
) || !name
)
1431 /* currently assignments for the ONLINE state are possible */
1432 if (state
== CPUHP_AP_ONLINE_DYN
) {
1434 ret
= cpuhp_reserve_state(state
);
1440 cpuhp_store_callbacks(state
, name
, startup
, teardown
);
1442 if (!invoke
|| !startup
)
1446 * Try to call the startup callback for each present cpu
1447 * depending on the hotplug state of the cpu.
1449 for_each_present_cpu(cpu
) {
1450 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1451 int cpustate
= st
->state
;
1453 if (cpustate
< state
)
1456 ret
= cpuhp_issue_call(cpu
, state
, startup
, true);
1458 cpuhp_rollback_install(cpu
, state
, teardown
);
1459 cpuhp_store_callbacks(state
, NULL
, NULL
, NULL
);
1465 if (!ret
&& dyn_state
)
1469 EXPORT_SYMBOL(__cpuhp_setup_state
);
1472 * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
1473 * @state: The state to remove
1474 * @invoke: If true, the teardown function is invoked for cpus where
1475 * cpu state >= @state
1477 * The teardown callback is currently not allowed to fail. Think
1478 * about module removal!
1480 void __cpuhp_remove_state(enum cpuhp_state state
, bool invoke
)
1482 int (*teardown
)(unsigned int cpu
) = cpuhp_get_teardown_cb(state
);
1485 BUG_ON(cpuhp_cb_check(state
));
1489 if (!invoke
|| !teardown
)
1493 * Call the teardown callback for each present cpu depending
1494 * on the hotplug state of the cpu. This function is not
1495 * allowed to fail currently!
1497 for_each_present_cpu(cpu
) {
1498 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1499 int cpustate
= st
->state
;
1501 if (cpustate
>= state
)
1502 cpuhp_issue_call(cpu
, state
, teardown
, false);
1505 cpuhp_store_callbacks(state
, NULL
, NULL
, NULL
);
1508 EXPORT_SYMBOL(__cpuhp_remove_state
);
1510 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1511 static ssize_t
show_cpuhp_state(struct device
*dev
,
1512 struct device_attribute
*attr
, char *buf
)
1514 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1516 return sprintf(buf
, "%d\n", st
->state
);
1518 static DEVICE_ATTR(state
, 0444, show_cpuhp_state
, NULL
);
1520 static ssize_t
write_cpuhp_target(struct device
*dev
,
1521 struct device_attribute
*attr
,
1522 const char *buf
, size_t count
)
1524 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1525 struct cpuhp_step
*sp
;
1528 ret
= kstrtoint(buf
, 10, &target
);
1532 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1533 if (target
< CPUHP_OFFLINE
|| target
> CPUHP_ONLINE
)
1536 if (target
!= CPUHP_OFFLINE
&& target
!= CPUHP_ONLINE
)
1540 ret
= lock_device_hotplug_sysfs();
1544 mutex_lock(&cpuhp_state_mutex
);
1545 sp
= cpuhp_get_step(target
);
1546 ret
= !sp
->name
|| sp
->cant_stop
? -EINVAL
: 0;
1547 mutex_unlock(&cpuhp_state_mutex
);
1551 if (st
->state
< target
)
1552 ret
= do_cpu_up(dev
->id
, target
);
1554 ret
= do_cpu_down(dev
->id
, target
);
1556 unlock_device_hotplug();
1557 return ret
? ret
: count
;
1560 static ssize_t
show_cpuhp_target(struct device
*dev
,
1561 struct device_attribute
*attr
, char *buf
)
1563 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1565 return sprintf(buf
, "%d\n", st
->target
);
1567 static DEVICE_ATTR(target
, 0644, show_cpuhp_target
, write_cpuhp_target
);
1569 static struct attribute
*cpuhp_cpu_attrs
[] = {
1570 &dev_attr_state
.attr
,
1571 &dev_attr_target
.attr
,
1575 static struct attribute_group cpuhp_cpu_attr_group
= {
1576 .attrs
= cpuhp_cpu_attrs
,
1581 static ssize_t
show_cpuhp_states(struct device
*dev
,
1582 struct device_attribute
*attr
, char *buf
)
1584 ssize_t cur
, res
= 0;
1587 mutex_lock(&cpuhp_state_mutex
);
1588 for (i
= CPUHP_OFFLINE
; i
<= CPUHP_ONLINE
; i
++) {
1589 struct cpuhp_step
*sp
= cpuhp_get_step(i
);
1592 cur
= sprintf(buf
, "%3d: %s\n", i
, sp
->name
);
1597 mutex_unlock(&cpuhp_state_mutex
);
1600 static DEVICE_ATTR(states
, 0444, show_cpuhp_states
, NULL
);
1602 static struct attribute
*cpuhp_cpu_root_attrs
[] = {
1603 &dev_attr_states
.attr
,
1607 static struct attribute_group cpuhp_cpu_root_attr_group
= {
1608 .attrs
= cpuhp_cpu_root_attrs
,
1613 static int __init
cpuhp_sysfs_init(void)
1617 ret
= sysfs_create_group(&cpu_subsys
.dev_root
->kobj
,
1618 &cpuhp_cpu_root_attr_group
);
1622 for_each_possible_cpu(cpu
) {
1623 struct device
*dev
= get_cpu_device(cpu
);
1627 ret
= sysfs_create_group(&dev
->kobj
, &cpuhp_cpu_attr_group
);
1633 device_initcall(cpuhp_sysfs_init
);
1637 * cpu_bit_bitmap[] is a special, "compressed" data structure that
1638 * represents all NR_CPUS bits binary values of 1<<nr.
1640 * It is used by cpumask_of() to get a constant address to a CPU
1641 * mask value that has a single bit set only.
1644 /* cpu_bit_bitmap[0] is empty - so we can back into it */
1645 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
1646 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
1647 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
1648 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
1650 const unsigned long cpu_bit_bitmap
[BITS_PER_LONG
+1][BITS_TO_LONGS(NR_CPUS
)] = {
1652 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
1653 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
1654 #if BITS_PER_LONG > 32
1655 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
1656 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
1659 EXPORT_SYMBOL_GPL(cpu_bit_bitmap
);
1661 const DECLARE_BITMAP(cpu_all_bits
, NR_CPUS
) = CPU_BITS_ALL
;
1662 EXPORT_SYMBOL(cpu_all_bits
);
1664 #ifdef CONFIG_INIT_ALL_POSSIBLE
1665 struct cpumask __cpu_possible_mask __read_mostly
1668 struct cpumask __cpu_possible_mask __read_mostly
;
1670 EXPORT_SYMBOL(__cpu_possible_mask
);
1672 struct cpumask __cpu_online_mask __read_mostly
;
1673 EXPORT_SYMBOL(__cpu_online_mask
);
1675 struct cpumask __cpu_present_mask __read_mostly
;
1676 EXPORT_SYMBOL(__cpu_present_mask
);
1678 struct cpumask __cpu_active_mask __read_mostly
;
1679 EXPORT_SYMBOL(__cpu_active_mask
);
1681 void init_cpu_present(const struct cpumask
*src
)
1683 cpumask_copy(&__cpu_present_mask
, src
);
1686 void init_cpu_possible(const struct cpumask
*src
)
1688 cpumask_copy(&__cpu_possible_mask
, src
);
1691 void init_cpu_online(const struct cpumask
*src
)
1693 cpumask_copy(&__cpu_online_mask
, src
);
1697 * Activate the first processor.
1699 void __init
boot_cpu_init(void)
1701 int cpu
= smp_processor_id();
1703 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
1704 set_cpu_online(cpu
, true);
1705 set_cpu_active(cpu
, true);
1706 set_cpu_present(cpu
, true);
1707 set_cpu_possible(cpu
, true);
1711 * Must be called _AFTER_ setting up the per_cpu areas
1713 void __init
boot_cpu_state_init(void)
1715 per_cpu_ptr(&cpuhp_state
, smp_processor_id())->state
= CPUHP_ONLINE
;