Linux 4.9.215
[linux/fpc-iii.git] / kernel / cpu.c
blob1fbe93fefc1fa99b7131c34e1803961762184ca5
1 /* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
5 */
6 #include <linux/proc_fs.h>
7 #include <linux/smp.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/sched/smt.h>
12 #include <linux/unistd.h>
13 #include <linux/cpu.h>
14 #include <linux/oom.h>
15 #include <linux/rcupdate.h>
16 #include <linux/export.h>
17 #include <linux/bug.h>
18 #include <linux/kthread.h>
19 #include <linux/stop_machine.h>
20 #include <linux/mutex.h>
21 #include <linux/gfp.h>
22 #include <linux/suspend.h>
23 #include <linux/lockdep.h>
24 #include <linux/tick.h>
25 #include <linux/irq.h>
26 #include <linux/smpboot.h>
27 #include <linux/relay.h>
28 #include <linux/slab.h>
30 #include <trace/events/power.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/cpuhp.h>
34 #include "smpboot.h"
36 /**
37 * cpuhp_cpu_state - Per cpu hotplug state storage
38 * @state: The current cpu state
39 * @target: The target state
40 * @thread: Pointer to the hotplug thread
41 * @should_run: Thread should execute
42 * @rollback: Perform a rollback
43 * @single: Single callback invocation
44 * @bringup: Single callback bringup or teardown selector
45 * @cb_state: The state for a single callback (install/uninstall)
46 * @result: Result of the operation
47 * @done: Signal completion to the issuer of the task
49 struct cpuhp_cpu_state {
50 enum cpuhp_state state;
51 enum cpuhp_state target;
52 #ifdef CONFIG_SMP
53 struct task_struct *thread;
54 bool should_run;
55 bool rollback;
56 bool single;
57 bool bringup;
58 bool booted_once;
59 struct hlist_node *node;
60 enum cpuhp_state cb_state;
61 int result;
62 struct completion done;
63 #endif
66 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
68 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
69 static struct lock_class_key cpuhp_state_key;
70 static struct lockdep_map cpuhp_state_lock_map =
71 STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key);
72 #endif
74 /**
75 * cpuhp_step - Hotplug state machine step
76 * @name: Name of the step
77 * @startup: Startup function of the step
78 * @teardown: Teardown function of the step
79 * @skip_onerr: Do not invoke the functions on error rollback
80 * Will go away once the notifiers are gone
81 * @cant_stop: Bringup/teardown can't be stopped at this step
83 struct cpuhp_step {
84 const char *name;
85 union {
86 int (*single)(unsigned int cpu);
87 int (*multi)(unsigned int cpu,
88 struct hlist_node *node);
89 } startup;
90 union {
91 int (*single)(unsigned int cpu);
92 int (*multi)(unsigned int cpu,
93 struct hlist_node *node);
94 } teardown;
95 struct hlist_head list;
96 bool skip_onerr;
97 bool cant_stop;
98 bool multi_instance;
101 static DEFINE_MUTEX(cpuhp_state_mutex);
102 static struct cpuhp_step cpuhp_bp_states[];
103 static struct cpuhp_step cpuhp_ap_states[];
105 static bool cpuhp_is_ap_state(enum cpuhp_state state)
108 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
109 * purposes as that state is handled explicitly in cpu_down.
111 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
114 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
116 struct cpuhp_step *sp;
118 sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
119 return sp + state;
123 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
124 * @cpu: The cpu for which the callback should be invoked
125 * @step: The step in the state machine
126 * @bringup: True if the bringup callback should be invoked
128 * Called from cpu hotplug and from the state register machinery.
130 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
131 bool bringup, struct hlist_node *node)
133 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
134 struct cpuhp_step *step = cpuhp_get_step(state);
135 int (*cbm)(unsigned int cpu, struct hlist_node *node);
136 int (*cb)(unsigned int cpu);
137 int ret, cnt;
139 if (!step->multi_instance) {
140 cb = bringup ? step->startup.single : step->teardown.single;
141 if (!cb)
142 return 0;
143 trace_cpuhp_enter(cpu, st->target, state, cb);
144 ret = cb(cpu);
145 trace_cpuhp_exit(cpu, st->state, state, ret);
146 return ret;
148 cbm = bringup ? step->startup.multi : step->teardown.multi;
149 if (!cbm)
150 return 0;
152 /* Single invocation for instance add/remove */
153 if (node) {
154 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
155 ret = cbm(cpu, node);
156 trace_cpuhp_exit(cpu, st->state, state, ret);
157 return ret;
160 /* State transition. Invoke on all instances */
161 cnt = 0;
162 hlist_for_each(node, &step->list) {
163 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
164 ret = cbm(cpu, node);
165 trace_cpuhp_exit(cpu, st->state, state, ret);
166 if (ret)
167 goto err;
168 cnt++;
170 return 0;
171 err:
172 /* Rollback the instances if one failed */
173 cbm = !bringup ? step->startup.multi : step->teardown.multi;
174 if (!cbm)
175 return ret;
177 hlist_for_each(node, &step->list) {
178 if (!cnt--)
179 break;
180 cbm(cpu, node);
182 return ret;
185 #ifdef CONFIG_SMP
186 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
187 static DEFINE_MUTEX(cpu_add_remove_lock);
188 bool cpuhp_tasks_frozen;
189 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
192 * The following two APIs (cpu_maps_update_begin/done) must be used when
193 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
194 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
195 * hotplug callback (un)registration performed using __register_cpu_notifier()
196 * or __unregister_cpu_notifier().
198 void cpu_maps_update_begin(void)
200 mutex_lock(&cpu_add_remove_lock);
202 EXPORT_SYMBOL(cpu_notifier_register_begin);
204 void cpu_maps_update_done(void)
206 mutex_unlock(&cpu_add_remove_lock);
208 EXPORT_SYMBOL(cpu_notifier_register_done);
210 static RAW_NOTIFIER_HEAD(cpu_chain);
212 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
213 * Should always be manipulated under cpu_add_remove_lock
215 static int cpu_hotplug_disabled;
217 #ifdef CONFIG_HOTPLUG_CPU
219 static struct {
220 struct task_struct *active_writer;
221 /* wait queue to wake up the active_writer */
222 wait_queue_head_t wq;
223 /* verifies that no writer will get active while readers are active */
224 struct mutex lock;
226 * Also blocks the new readers during
227 * an ongoing cpu hotplug operation.
229 atomic_t refcount;
231 #ifdef CONFIG_DEBUG_LOCK_ALLOC
232 struct lockdep_map dep_map;
233 #endif
234 } cpu_hotplug = {
235 .active_writer = NULL,
236 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
237 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
238 #ifdef CONFIG_DEBUG_LOCK_ALLOC
239 .dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
240 #endif
243 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
244 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
245 #define cpuhp_lock_acquire_tryread() \
246 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
247 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
248 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
251 void get_online_cpus(void)
253 might_sleep();
254 if (cpu_hotplug.active_writer == current)
255 return;
256 cpuhp_lock_acquire_read();
257 mutex_lock(&cpu_hotplug.lock);
258 atomic_inc(&cpu_hotplug.refcount);
259 mutex_unlock(&cpu_hotplug.lock);
261 EXPORT_SYMBOL_GPL(get_online_cpus);
263 void put_online_cpus(void)
265 int refcount;
267 if (cpu_hotplug.active_writer == current)
268 return;
270 refcount = atomic_dec_return(&cpu_hotplug.refcount);
271 if (WARN_ON(refcount < 0)) /* try to fix things up */
272 atomic_inc(&cpu_hotplug.refcount);
274 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
275 wake_up(&cpu_hotplug.wq);
277 cpuhp_lock_release();
280 EXPORT_SYMBOL_GPL(put_online_cpus);
283 * This ensures that the hotplug operation can begin only when the
284 * refcount goes to zero.
286 * Note that during a cpu-hotplug operation, the new readers, if any,
287 * will be blocked by the cpu_hotplug.lock
289 * Since cpu_hotplug_begin() is always called after invoking
290 * cpu_maps_update_begin(), we can be sure that only one writer is active.
292 * Note that theoretically, there is a possibility of a livelock:
293 * - Refcount goes to zero, last reader wakes up the sleeping
294 * writer.
295 * - Last reader unlocks the cpu_hotplug.lock.
296 * - A new reader arrives at this moment, bumps up the refcount.
297 * - The writer acquires the cpu_hotplug.lock finds the refcount
298 * non zero and goes to sleep again.
300 * However, this is very difficult to achieve in practice since
301 * get_online_cpus() not an api which is called all that often.
304 void cpu_hotplug_begin(void)
306 DEFINE_WAIT(wait);
308 cpu_hotplug.active_writer = current;
309 cpuhp_lock_acquire();
311 for (;;) {
312 mutex_lock(&cpu_hotplug.lock);
313 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
314 if (likely(!atomic_read(&cpu_hotplug.refcount)))
315 break;
316 mutex_unlock(&cpu_hotplug.lock);
317 schedule();
319 finish_wait(&cpu_hotplug.wq, &wait);
322 void cpu_hotplug_done(void)
324 cpu_hotplug.active_writer = NULL;
325 mutex_unlock(&cpu_hotplug.lock);
326 cpuhp_lock_release();
330 * Wait for currently running CPU hotplug operations to complete (if any) and
331 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
332 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
333 * hotplug path before performing hotplug operations. So acquiring that lock
334 * guarantees mutual exclusion from any currently running hotplug operations.
336 void cpu_hotplug_disable(void)
338 cpu_maps_update_begin();
339 cpu_hotplug_disabled++;
340 cpu_maps_update_done();
342 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
344 static void __cpu_hotplug_enable(void)
346 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
347 return;
348 cpu_hotplug_disabled--;
351 void cpu_hotplug_enable(void)
353 cpu_maps_update_begin();
354 __cpu_hotplug_enable();
355 cpu_maps_update_done();
357 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
358 #endif /* CONFIG_HOTPLUG_CPU */
361 * Architectures that need SMT-specific errata handling during SMT hotplug
362 * should override this.
364 void __weak arch_smt_update(void) { }
366 #ifdef CONFIG_HOTPLUG_SMT
367 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
368 EXPORT_SYMBOL_GPL(cpu_smt_control);
370 static bool cpu_smt_available __read_mostly;
372 void __init cpu_smt_disable(bool force)
374 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
375 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
376 return;
378 if (force) {
379 pr_info("SMT: Force disabled\n");
380 cpu_smt_control = CPU_SMT_FORCE_DISABLED;
381 } else {
382 pr_info("SMT: disabled\n");
383 cpu_smt_control = CPU_SMT_DISABLED;
388 * The decision whether SMT is supported can only be done after the full
389 * CPU identification. Called from architecture code before non boot CPUs
390 * are brought up.
392 void __init cpu_smt_check_topology_early(void)
394 if (!topology_smt_supported())
395 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
399 * If SMT was disabled by BIOS, detect it here, after the CPUs have been
400 * brought online. This ensures the smt/l1tf sysfs entries are consistent
401 * with reality. cpu_smt_available is set to true during the bringup of non
402 * boot CPUs when a SMT sibling is detected. Note, this may overwrite
403 * cpu_smt_control's previous setting.
405 void __init cpu_smt_check_topology(void)
407 if (!cpu_smt_available)
408 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
411 static int __init smt_cmdline_disable(char *str)
413 cpu_smt_disable(str && !strcmp(str, "force"));
414 return 0;
416 early_param("nosmt", smt_cmdline_disable);
418 static inline bool cpu_smt_allowed(unsigned int cpu)
420 if (topology_is_primary_thread(cpu))
421 return true;
424 * If the CPU is not a 'primary' thread and the booted_once bit is
425 * set then the processor has SMT support. Store this information
426 * for the late check of SMT support in cpu_smt_check_topology().
428 if (per_cpu(cpuhp_state, cpu).booted_once)
429 cpu_smt_available = true;
431 if (cpu_smt_control == CPU_SMT_ENABLED)
432 return true;
435 * On x86 it's required to boot all logical CPUs at least once so
436 * that the init code can get a chance to set CR4.MCE on each
437 * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
438 * core will shutdown the machine.
440 return !per_cpu(cpuhp_state, cpu).booted_once;
442 #else
443 static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
444 #endif
446 /* Need to know about CPUs going up/down? */
447 int register_cpu_notifier(struct notifier_block *nb)
449 int ret;
450 cpu_maps_update_begin();
451 ret = raw_notifier_chain_register(&cpu_chain, nb);
452 cpu_maps_update_done();
453 return ret;
456 int __register_cpu_notifier(struct notifier_block *nb)
458 return raw_notifier_chain_register(&cpu_chain, nb);
461 static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
462 int *nr_calls)
464 unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
465 void *hcpu = (void *)(long)cpu;
467 int ret;
469 ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
470 nr_calls);
472 return notifier_to_errno(ret);
475 static int cpu_notify(unsigned long val, unsigned int cpu)
477 return __cpu_notify(val, cpu, -1, NULL);
480 static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
482 BUG_ON(cpu_notify(val, cpu));
485 /* Notifier wrappers for transitioning to state machine */
486 static int notify_prepare(unsigned int cpu)
488 int nr_calls = 0;
489 int ret;
491 ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
492 if (ret) {
493 nr_calls--;
494 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
495 __func__, cpu);
496 __cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
498 return ret;
501 static int notify_online(unsigned int cpu)
503 cpu_notify(CPU_ONLINE, cpu);
504 return 0;
507 static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st);
509 static int bringup_wait_for_ap(unsigned int cpu)
511 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
513 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
514 wait_for_completion(&st->done);
515 if (WARN_ON_ONCE((!cpu_online(cpu))))
516 return -ECANCELED;
518 /* Unpark the hotplug thread of the target cpu */
519 kthread_unpark(st->thread);
522 * SMT soft disabling on X86 requires to bring the CPU out of the
523 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
524 * CPU marked itself as booted_once in cpu_notify_starting() so the
525 * cpu_smt_allowed() check will now return false if this is not the
526 * primary sibling.
528 if (!cpu_smt_allowed(cpu))
529 return -ECANCELED;
531 /* Should we go further up ? */
532 if (st->target > CPUHP_AP_ONLINE_IDLE) {
533 __cpuhp_kick_ap_work(st);
534 wait_for_completion(&st->done);
536 return st->result;
539 static int bringup_cpu(unsigned int cpu)
541 struct task_struct *idle = idle_thread_get(cpu);
542 int ret;
545 * Some architectures have to walk the irq descriptors to
546 * setup the vector space for the cpu which comes online.
547 * Prevent irq alloc/free across the bringup.
549 irq_lock_sparse();
551 /* Arch-specific enabling code. */
552 ret = __cpu_up(cpu, idle);
553 irq_unlock_sparse();
554 if (ret) {
555 cpu_notify(CPU_UP_CANCELED, cpu);
556 return ret;
558 return bringup_wait_for_ap(cpu);
562 * Hotplug state machine related functions
564 static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
566 for (st->state++; st->state < st->target; st->state++) {
567 struct cpuhp_step *step = cpuhp_get_step(st->state);
569 if (!step->skip_onerr)
570 cpuhp_invoke_callback(cpu, st->state, true, NULL);
574 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
575 enum cpuhp_state target)
577 enum cpuhp_state prev_state = st->state;
578 int ret = 0;
580 for (; st->state > target; st->state--) {
581 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL);
582 if (ret) {
583 st->target = prev_state;
584 undo_cpu_down(cpu, st);
585 break;
588 return ret;
591 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
593 for (st->state--; st->state > st->target; st->state--) {
594 struct cpuhp_step *step = cpuhp_get_step(st->state);
596 if (!step->skip_onerr)
597 cpuhp_invoke_callback(cpu, st->state, false, NULL);
601 static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
603 if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
604 return true;
606 * When CPU hotplug is disabled, then taking the CPU down is not
607 * possible because takedown_cpu() and the architecture and
608 * subsystem specific mechanisms are not available. So the CPU
609 * which would be completely unplugged again needs to stay around
610 * in the current state.
612 return st->state <= CPUHP_BRINGUP_CPU;
615 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
616 enum cpuhp_state target)
618 enum cpuhp_state prev_state = st->state;
619 int ret = 0;
621 while (st->state < target) {
622 st->state++;
623 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL);
624 if (ret) {
625 if (can_rollback_cpu(st)) {
626 st->target = prev_state;
627 undo_cpu_up(cpu, st);
629 break;
632 return ret;
636 * The cpu hotplug threads manage the bringup and teardown of the cpus
638 static void cpuhp_create(unsigned int cpu)
640 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
642 init_completion(&st->done);
645 static int cpuhp_should_run(unsigned int cpu)
647 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
649 return st->should_run;
652 /* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
653 static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
655 enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
657 return cpuhp_down_callbacks(cpu, st, target);
660 /* Execute the online startup callbacks. Used to be CPU_ONLINE */
661 static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
663 return cpuhp_up_callbacks(cpu, st, st->target);
667 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
668 * callbacks when a state gets [un]installed at runtime.
670 static void cpuhp_thread_fun(unsigned int cpu)
672 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
673 int ret = 0;
676 * Paired with the mb() in cpuhp_kick_ap_work and
677 * cpuhp_invoke_ap_callback, so the work set is consistent visible.
679 smp_mb();
680 if (!st->should_run)
681 return;
683 st->should_run = false;
685 lock_map_acquire(&cpuhp_state_lock_map);
686 /* Single callback invocation for [un]install ? */
687 if (st->single) {
688 if (st->cb_state < CPUHP_AP_ONLINE) {
689 local_irq_disable();
690 ret = cpuhp_invoke_callback(cpu, st->cb_state,
691 st->bringup, st->node);
692 local_irq_enable();
693 } else {
694 ret = cpuhp_invoke_callback(cpu, st->cb_state,
695 st->bringup, st->node);
697 } else if (st->rollback) {
698 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
700 undo_cpu_down(cpu, st);
702 * This is a momentary workaround to keep the notifier users
703 * happy. Will go away once we got rid of the notifiers.
705 cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
706 st->rollback = false;
707 } else {
708 /* Cannot happen .... */
709 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
711 /* Regular hotplug work */
712 if (st->state < st->target)
713 ret = cpuhp_ap_online(cpu, st);
714 else if (st->state > st->target)
715 ret = cpuhp_ap_offline(cpu, st);
717 lock_map_release(&cpuhp_state_lock_map);
718 st->result = ret;
719 complete(&st->done);
722 /* Invoke a single callback on a remote cpu */
723 static int
724 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
725 struct hlist_node *node)
727 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
729 if (!cpu_online(cpu))
730 return 0;
732 lock_map_acquire(&cpuhp_state_lock_map);
733 lock_map_release(&cpuhp_state_lock_map);
736 * If we are up and running, use the hotplug thread. For early calls
737 * we invoke the thread function directly.
739 if (!st->thread)
740 return cpuhp_invoke_callback(cpu, state, bringup, node);
742 st->cb_state = state;
743 st->single = true;
744 st->bringup = bringup;
745 st->node = node;
748 * Make sure the above stores are visible before should_run becomes
749 * true. Paired with the mb() above in cpuhp_thread_fun()
751 smp_mb();
752 st->should_run = true;
753 wake_up_process(st->thread);
754 wait_for_completion(&st->done);
755 return st->result;
758 /* Regular hotplug invocation of the AP hotplug thread */
759 static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
761 st->result = 0;
762 st->single = false;
764 * Make sure the above stores are visible before should_run becomes
765 * true. Paired with the mb() above in cpuhp_thread_fun()
767 smp_mb();
768 st->should_run = true;
769 wake_up_process(st->thread);
772 static int cpuhp_kick_ap_work(unsigned int cpu)
774 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
775 enum cpuhp_state state = st->state;
777 trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
778 lock_map_acquire(&cpuhp_state_lock_map);
779 lock_map_release(&cpuhp_state_lock_map);
780 __cpuhp_kick_ap_work(st);
781 wait_for_completion(&st->done);
782 trace_cpuhp_exit(cpu, st->state, state, st->result);
783 return st->result;
786 static struct smp_hotplug_thread cpuhp_threads = {
787 .store = &cpuhp_state.thread,
788 .create = &cpuhp_create,
789 .thread_should_run = cpuhp_should_run,
790 .thread_fn = cpuhp_thread_fun,
791 .thread_comm = "cpuhp/%u",
792 .selfparking = true,
795 void __init cpuhp_threads_init(void)
797 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
798 kthread_unpark(this_cpu_read(cpuhp_state.thread));
801 EXPORT_SYMBOL(register_cpu_notifier);
802 EXPORT_SYMBOL(__register_cpu_notifier);
803 void unregister_cpu_notifier(struct notifier_block *nb)
805 cpu_maps_update_begin();
806 raw_notifier_chain_unregister(&cpu_chain, nb);
807 cpu_maps_update_done();
809 EXPORT_SYMBOL(unregister_cpu_notifier);
811 void __unregister_cpu_notifier(struct notifier_block *nb)
813 raw_notifier_chain_unregister(&cpu_chain, nb);
815 EXPORT_SYMBOL(__unregister_cpu_notifier);
817 #ifdef CONFIG_HOTPLUG_CPU
819 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
820 * @cpu: a CPU id
822 * This function walks all processes, finds a valid mm struct for each one and
823 * then clears a corresponding bit in mm's cpumask. While this all sounds
824 * trivial, there are various non-obvious corner cases, which this function
825 * tries to solve in a safe manner.
827 * Also note that the function uses a somewhat relaxed locking scheme, so it may
828 * be called only for an already offlined CPU.
830 void clear_tasks_mm_cpumask(int cpu)
832 struct task_struct *p;
835 * This function is called after the cpu is taken down and marked
836 * offline, so its not like new tasks will ever get this cpu set in
837 * their mm mask. -- Peter Zijlstra
838 * Thus, we may use rcu_read_lock() here, instead of grabbing
839 * full-fledged tasklist_lock.
841 WARN_ON(cpu_online(cpu));
842 rcu_read_lock();
843 for_each_process(p) {
844 struct task_struct *t;
847 * Main thread might exit, but other threads may still have
848 * a valid mm. Find one.
850 t = find_lock_task_mm(p);
851 if (!t)
852 continue;
853 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
854 task_unlock(t);
856 rcu_read_unlock();
859 static inline void check_for_tasks(int dead_cpu)
861 struct task_struct *g, *p;
863 read_lock(&tasklist_lock);
864 for_each_process_thread(g, p) {
865 if (!p->on_rq)
866 continue;
868 * We do the check with unlocked task_rq(p)->lock.
869 * Order the reading to do not warn about a task,
870 * which was running on this cpu in the past, and
871 * it's just been woken on another cpu.
873 rmb();
874 if (task_cpu(p) != dead_cpu)
875 continue;
877 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
878 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
880 read_unlock(&tasklist_lock);
883 static int notify_down_prepare(unsigned int cpu)
885 int err, nr_calls = 0;
887 err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
888 if (err) {
889 nr_calls--;
890 __cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
891 pr_warn("%s: attempt to take down CPU %u failed\n",
892 __func__, cpu);
894 return err;
897 /* Take this CPU down. */
898 static int take_cpu_down(void *_param)
900 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
901 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
902 int err, cpu = smp_processor_id();
904 /* Ensure this CPU doesn't handle any more interrupts. */
905 err = __cpu_disable();
906 if (err < 0)
907 return err;
910 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
911 * do this step again.
913 WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
914 st->state--;
915 /* Invoke the former CPU_DYING callbacks */
916 for (; st->state > target; st->state--)
917 cpuhp_invoke_callback(cpu, st->state, false, NULL);
919 /* Give up timekeeping duties */
920 tick_handover_do_timer();
921 /* Park the stopper thread */
922 stop_machine_park(cpu);
923 return 0;
926 static int takedown_cpu(unsigned int cpu)
928 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
929 int err;
931 /* Park the smpboot threads */
932 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
935 * Prevent irq alloc/free while the dying cpu reorganizes the
936 * interrupt affinities.
938 irq_lock_sparse();
941 * So now all preempt/rcu users must observe !cpu_active().
943 err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
944 if (err) {
945 /* CPU refused to die */
946 irq_unlock_sparse();
947 /* Unpark the hotplug thread so we can rollback there */
948 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
949 return err;
951 BUG_ON(cpu_online(cpu));
954 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
955 * runnable tasks from the cpu, there's only the idle task left now
956 * that the migration thread is done doing the stop_machine thing.
958 * Wait for the stop thread to go away.
960 wait_for_completion(&st->done);
961 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
963 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
964 irq_unlock_sparse();
966 hotplug_cpu__broadcast_tick_pull(cpu);
967 /* This actually kills the CPU. */
968 __cpu_die(cpu);
970 tick_cleanup_dead_cpu(cpu);
971 return 0;
974 static int notify_dead(unsigned int cpu)
976 cpu_notify_nofail(CPU_DEAD, cpu);
977 check_for_tasks(cpu);
978 return 0;
981 static void cpuhp_complete_idle_dead(void *arg)
983 struct cpuhp_cpu_state *st = arg;
985 complete(&st->done);
988 void cpuhp_report_idle_dead(void)
990 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
992 BUG_ON(st->state != CPUHP_AP_OFFLINE);
993 rcu_report_dead(smp_processor_id());
994 st->state = CPUHP_AP_IDLE_DEAD;
996 * We cannot call complete after rcu_report_dead() so we delegate it
997 * to an online cpu.
999 smp_call_function_single(cpumask_first(cpu_online_mask),
1000 cpuhp_complete_idle_dead, st, 0);
1003 #else
1004 #define notify_down_prepare NULL
1005 #define takedown_cpu NULL
1006 #define notify_dead NULL
1007 #endif
1009 #ifdef CONFIG_HOTPLUG_CPU
1011 /* Requires cpu_add_remove_lock to be held */
1012 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
1013 enum cpuhp_state target)
1015 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1016 int prev_state, ret = 0;
1017 bool hasdied = false;
1019 if (num_online_cpus() == 1)
1020 return -EBUSY;
1022 if (!cpu_present(cpu))
1023 return -EINVAL;
1025 cpu_hotplug_begin();
1027 cpuhp_tasks_frozen = tasks_frozen;
1029 prev_state = st->state;
1030 st->target = target;
1032 * If the current CPU state is in the range of the AP hotplug thread,
1033 * then we need to kick the thread.
1035 if (st->state > CPUHP_TEARDOWN_CPU) {
1036 ret = cpuhp_kick_ap_work(cpu);
1038 * The AP side has done the error rollback already. Just
1039 * return the error code..
1041 if (ret)
1042 goto out;
1045 * We might have stopped still in the range of the AP hotplug
1046 * thread. Nothing to do anymore.
1048 if (st->state > CPUHP_TEARDOWN_CPU)
1049 goto out;
1052 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1053 * to do the further cleanups.
1055 ret = cpuhp_down_callbacks(cpu, st, target);
1056 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
1057 st->target = prev_state;
1058 st->rollback = true;
1059 cpuhp_kick_ap_work(cpu);
1062 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
1063 out:
1064 cpu_hotplug_done();
1065 /* This post dead nonsense must die */
1066 if (!ret && hasdied)
1067 cpu_notify_nofail(CPU_POST_DEAD, cpu);
1068 arch_smt_update();
1069 return ret;
1072 static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1074 if (cpu_hotplug_disabled)
1075 return -EBUSY;
1076 return _cpu_down(cpu, 0, target);
1079 static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
1081 int err;
1083 cpu_maps_update_begin();
1084 err = cpu_down_maps_locked(cpu, target);
1085 cpu_maps_update_done();
1086 return err;
1088 int cpu_down(unsigned int cpu)
1090 return do_cpu_down(cpu, CPUHP_OFFLINE);
1092 EXPORT_SYMBOL(cpu_down);
1093 #endif /*CONFIG_HOTPLUG_CPU*/
1096 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1097 * @cpu: cpu that just started
1099 * It must be called by the arch code on the new cpu, before the new cpu
1100 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1102 void notify_cpu_starting(unsigned int cpu)
1104 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1105 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1107 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
1108 st->booted_once = true;
1109 while (st->state < target) {
1110 st->state++;
1111 cpuhp_invoke_callback(cpu, st->state, true, NULL);
1116 * Called from the idle task. Wake up the controlling task which brings the
1117 * hotplug thread of the upcoming CPU up and then delegates the rest of the
1118 * online bringup to the hotplug thread.
1120 void cpuhp_online_idle(enum cpuhp_state state)
1122 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1124 /* Happens for the boot cpu */
1125 if (state != CPUHP_AP_ONLINE_IDLE)
1126 return;
1129 * Unpart the stopper thread before we start the idle loop (and start
1130 * scheduling); this ensures the stopper task is always available.
1132 stop_machine_unpark(smp_processor_id());
1134 st->state = CPUHP_AP_ONLINE_IDLE;
1135 complete(&st->done);
1138 /* Requires cpu_add_remove_lock to be held */
1139 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1141 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1142 struct task_struct *idle;
1143 int ret = 0;
1145 cpu_hotplug_begin();
1147 if (!cpu_present(cpu)) {
1148 ret = -EINVAL;
1149 goto out;
1153 * The caller of do_cpu_up might have raced with another
1154 * caller. Ignore it for now.
1156 if (st->state >= target)
1157 goto out;
1159 if (st->state == CPUHP_OFFLINE) {
1160 /* Let it fail before we try to bring the cpu up */
1161 idle = idle_thread_get(cpu);
1162 if (IS_ERR(idle)) {
1163 ret = PTR_ERR(idle);
1164 goto out;
1168 cpuhp_tasks_frozen = tasks_frozen;
1170 st->target = target;
1172 * If the current CPU state is in the range of the AP hotplug thread,
1173 * then we need to kick the thread once more.
1175 if (st->state > CPUHP_BRINGUP_CPU) {
1176 ret = cpuhp_kick_ap_work(cpu);
1178 * The AP side has done the error rollback already. Just
1179 * return the error code..
1181 if (ret)
1182 goto out;
1186 * Try to reach the target state. We max out on the BP at
1187 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1188 * responsible for bringing it up to the target state.
1190 target = min((int)target, CPUHP_BRINGUP_CPU);
1191 ret = cpuhp_up_callbacks(cpu, st, target);
1192 out:
1193 cpu_hotplug_done();
1194 arch_smt_update();
1195 return ret;
1198 static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
1200 int err = 0;
1202 if (!cpu_possible(cpu)) {
1203 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1204 cpu);
1205 #if defined(CONFIG_IA64)
1206 pr_err("please check additional_cpus= boot parameter\n");
1207 #endif
1208 return -EINVAL;
1211 err = try_online_node(cpu_to_node(cpu));
1212 if (err)
1213 return err;
1215 cpu_maps_update_begin();
1217 if (cpu_hotplug_disabled) {
1218 err = -EBUSY;
1219 goto out;
1221 if (!cpu_smt_allowed(cpu)) {
1222 err = -EPERM;
1223 goto out;
1226 err = _cpu_up(cpu, 0, target);
1227 out:
1228 cpu_maps_update_done();
1229 return err;
1232 int cpu_up(unsigned int cpu)
1234 return do_cpu_up(cpu, CPUHP_ONLINE);
1236 EXPORT_SYMBOL_GPL(cpu_up);
1238 #ifdef CONFIG_PM_SLEEP_SMP
1239 static cpumask_var_t frozen_cpus;
1241 int freeze_secondary_cpus(int primary)
1243 int cpu, error = 0;
1245 cpu_maps_update_begin();
1246 if (!cpu_online(primary))
1247 primary = cpumask_first(cpu_online_mask);
1249 * We take down all of the non-boot CPUs in one shot to avoid races
1250 * with the userspace trying to use the CPU hotplug at the same time
1252 cpumask_clear(frozen_cpus);
1254 pr_info("Disabling non-boot CPUs ...\n");
1255 for_each_online_cpu(cpu) {
1256 if (cpu == primary)
1257 continue;
1258 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1259 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1260 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1261 if (!error)
1262 cpumask_set_cpu(cpu, frozen_cpus);
1263 else {
1264 pr_err("Error taking CPU%d down: %d\n", cpu, error);
1265 break;
1269 if (!error)
1270 BUG_ON(num_online_cpus() > 1);
1271 else
1272 pr_err("Non-boot CPUs are not disabled\n");
1275 * Make sure the CPUs won't be enabled by someone else. We need to do
1276 * this even in case of failure as all disable_nonboot_cpus() users are
1277 * supposed to do enable_nonboot_cpus() on the failure path.
1279 cpu_hotplug_disabled++;
1281 cpu_maps_update_done();
1282 return error;
1285 void __weak arch_enable_nonboot_cpus_begin(void)
1289 void __weak arch_enable_nonboot_cpus_end(void)
1293 void enable_nonboot_cpus(void)
1295 int cpu, error;
1297 /* Allow everyone to use the CPU hotplug again */
1298 cpu_maps_update_begin();
1299 __cpu_hotplug_enable();
1300 if (cpumask_empty(frozen_cpus))
1301 goto out;
1303 pr_info("Enabling non-boot CPUs ...\n");
1305 arch_enable_nonboot_cpus_begin();
1307 for_each_cpu(cpu, frozen_cpus) {
1308 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1309 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1310 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1311 if (!error) {
1312 pr_info("CPU%d is up\n", cpu);
1313 continue;
1315 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1318 arch_enable_nonboot_cpus_end();
1320 cpumask_clear(frozen_cpus);
1321 out:
1322 cpu_maps_update_done();
1325 static int __init alloc_frozen_cpus(void)
1327 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1328 return -ENOMEM;
1329 return 0;
1331 core_initcall(alloc_frozen_cpus);
1334 * When callbacks for CPU hotplug notifications are being executed, we must
1335 * ensure that the state of the system with respect to the tasks being frozen
1336 * or not, as reported by the notification, remains unchanged *throughout the
1337 * duration* of the execution of the callbacks.
1338 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1340 * This synchronization is implemented by mutually excluding regular CPU
1341 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1342 * Hibernate notifications.
1344 static int
1345 cpu_hotplug_pm_callback(struct notifier_block *nb,
1346 unsigned long action, void *ptr)
1348 switch (action) {
1350 case PM_SUSPEND_PREPARE:
1351 case PM_HIBERNATION_PREPARE:
1352 cpu_hotplug_disable();
1353 break;
1355 case PM_POST_SUSPEND:
1356 case PM_POST_HIBERNATION:
1357 cpu_hotplug_enable();
1358 break;
1360 default:
1361 return NOTIFY_DONE;
1364 return NOTIFY_OK;
1368 static int __init cpu_hotplug_pm_sync_init(void)
1371 * cpu_hotplug_pm_callback has higher priority than x86
1372 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1373 * to disable cpu hotplug to avoid cpu hotplug race.
1375 pm_notifier(cpu_hotplug_pm_callback, 0);
1376 return 0;
1378 core_initcall(cpu_hotplug_pm_sync_init);
1380 #endif /* CONFIG_PM_SLEEP_SMP */
1382 #endif /* CONFIG_SMP */
1384 /* Boot processor state steps */
1385 static struct cpuhp_step cpuhp_bp_states[] = {
1386 [CPUHP_OFFLINE] = {
1387 .name = "offline",
1388 .startup.single = NULL,
1389 .teardown.single = NULL,
1391 #ifdef CONFIG_SMP
1392 [CPUHP_CREATE_THREADS]= {
1393 .name = "threads:prepare",
1394 .startup.single = smpboot_create_threads,
1395 .teardown.single = NULL,
1396 .cant_stop = true,
1398 [CPUHP_PERF_PREPARE] = {
1399 .name = "perf:prepare",
1400 .startup.single = perf_event_init_cpu,
1401 .teardown.single = perf_event_exit_cpu,
1403 [CPUHP_WORKQUEUE_PREP] = {
1404 .name = "workqueue:prepare",
1405 .startup.single = workqueue_prepare_cpu,
1406 .teardown.single = NULL,
1408 [CPUHP_HRTIMERS_PREPARE] = {
1409 .name = "hrtimers:prepare",
1410 .startup.single = hrtimers_prepare_cpu,
1411 .teardown.single = hrtimers_dead_cpu,
1413 [CPUHP_SMPCFD_PREPARE] = {
1414 .name = "smpcfd:prepare",
1415 .startup.single = smpcfd_prepare_cpu,
1416 .teardown.single = smpcfd_dead_cpu,
1418 [CPUHP_RELAY_PREPARE] = {
1419 .name = "relay:prepare",
1420 .startup.single = relay_prepare_cpu,
1421 .teardown.single = NULL,
1423 [CPUHP_SLAB_PREPARE] = {
1424 .name = "slab:prepare",
1425 .startup.single = slab_prepare_cpu,
1426 .teardown.single = slab_dead_cpu,
1428 [CPUHP_RCUTREE_PREP] = {
1429 .name = "RCU/tree:prepare",
1430 .startup.single = rcutree_prepare_cpu,
1431 .teardown.single = rcutree_dead_cpu,
1434 * Preparatory and dead notifiers. Will be replaced once the notifiers
1435 * are converted to states.
1437 [CPUHP_NOTIFY_PREPARE] = {
1438 .name = "notify:prepare",
1439 .startup.single = notify_prepare,
1440 .teardown.single = notify_dead,
1441 .skip_onerr = true,
1442 .cant_stop = true,
1445 * On the tear-down path, timers_dead_cpu() must be invoked
1446 * before blk_mq_queue_reinit_notify() from notify_dead(),
1447 * otherwise a RCU stall occurs.
1449 [CPUHP_TIMERS_PREPARE] = {
1450 .name = "timers:dead",
1451 .startup.single = timers_prepare_cpu,
1452 .teardown.single = timers_dead_cpu,
1454 /* Kicks the plugged cpu into life */
1455 [CPUHP_BRINGUP_CPU] = {
1456 .name = "cpu:bringup",
1457 .startup.single = bringup_cpu,
1458 .teardown.single = NULL,
1459 .cant_stop = true,
1462 * Handled on controll processor until the plugged processor manages
1463 * this itself.
1465 [CPUHP_TEARDOWN_CPU] = {
1466 .name = "cpu:teardown",
1467 .startup.single = NULL,
1468 .teardown.single = takedown_cpu,
1469 .cant_stop = true,
1471 #else
1472 [CPUHP_BRINGUP_CPU] = { },
1473 #endif
1476 /* Application processor state steps */
1477 static struct cpuhp_step cpuhp_ap_states[] = {
1478 #ifdef CONFIG_SMP
1479 /* Final state before CPU kills itself */
1480 [CPUHP_AP_IDLE_DEAD] = {
1481 .name = "idle:dead",
1484 * Last state before CPU enters the idle loop to die. Transient state
1485 * for synchronization.
1487 [CPUHP_AP_OFFLINE] = {
1488 .name = "ap:offline",
1489 .cant_stop = true,
1491 /* First state is scheduler control. Interrupts are disabled */
1492 [CPUHP_AP_SCHED_STARTING] = {
1493 .name = "sched:starting",
1494 .startup.single = sched_cpu_starting,
1495 .teardown.single = sched_cpu_dying,
1497 [CPUHP_AP_RCUTREE_DYING] = {
1498 .name = "RCU/tree:dying",
1499 .startup.single = NULL,
1500 .teardown.single = rcutree_dying_cpu,
1502 [CPUHP_AP_SMPCFD_DYING] = {
1503 .name = "smpcfd:dying",
1504 .startup.single = NULL,
1505 .teardown.single = smpcfd_dying_cpu,
1507 /* Entry state on starting. Interrupts enabled from here on. Transient
1508 * state for synchronsization */
1509 [CPUHP_AP_ONLINE] = {
1510 .name = "ap:online",
1512 /* Handle smpboot threads park/unpark */
1513 [CPUHP_AP_SMPBOOT_THREADS] = {
1514 .name = "smpboot/threads:online",
1515 .startup.single = smpboot_unpark_threads,
1516 .teardown.single = smpboot_park_threads,
1518 [CPUHP_AP_PERF_ONLINE] = {
1519 .name = "perf:online",
1520 .startup.single = perf_event_init_cpu,
1521 .teardown.single = perf_event_exit_cpu,
1523 [CPUHP_AP_WORKQUEUE_ONLINE] = {
1524 .name = "workqueue:online",
1525 .startup.single = workqueue_online_cpu,
1526 .teardown.single = workqueue_offline_cpu,
1528 [CPUHP_AP_RCUTREE_ONLINE] = {
1529 .name = "RCU/tree:online",
1530 .startup.single = rcutree_online_cpu,
1531 .teardown.single = rcutree_offline_cpu,
1535 * Online/down_prepare notifiers. Will be removed once the notifiers
1536 * are converted to states.
1538 [CPUHP_AP_NOTIFY_ONLINE] = {
1539 .name = "notify:online",
1540 .startup.single = notify_online,
1541 .teardown.single = notify_down_prepare,
1542 .skip_onerr = true,
1544 #endif
1546 * The dynamically registered state space is here
1549 #ifdef CONFIG_SMP
1550 /* Last state is scheduler control setting the cpu active */
1551 [CPUHP_AP_ACTIVE] = {
1552 .name = "sched:active",
1553 .startup.single = sched_cpu_activate,
1554 .teardown.single = sched_cpu_deactivate,
1556 #endif
1558 /* CPU is fully up and running. */
1559 [CPUHP_ONLINE] = {
1560 .name = "online",
1561 .startup.single = NULL,
1562 .teardown.single = NULL,
1566 /* Sanity check for callbacks */
1567 static int cpuhp_cb_check(enum cpuhp_state state)
1569 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1570 return -EINVAL;
1571 return 0;
1574 static void cpuhp_store_callbacks(enum cpuhp_state state,
1575 const char *name,
1576 int (*startup)(unsigned int cpu),
1577 int (*teardown)(unsigned int cpu),
1578 bool multi_instance)
1580 /* (Un)Install the callbacks for further cpu hotplug operations */
1581 struct cpuhp_step *sp;
1583 sp = cpuhp_get_step(state);
1584 sp->startup.single = startup;
1585 sp->teardown.single = teardown;
1586 sp->name = name;
1587 sp->multi_instance = multi_instance;
1588 INIT_HLIST_HEAD(&sp->list);
1591 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1593 return cpuhp_get_step(state)->teardown.single;
1597 * Call the startup/teardown function for a step either on the AP or
1598 * on the current CPU.
1600 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1601 struct hlist_node *node)
1603 struct cpuhp_step *sp = cpuhp_get_step(state);
1604 int ret;
1606 if ((bringup && !sp->startup.single) ||
1607 (!bringup && !sp->teardown.single))
1608 return 0;
1610 * The non AP bound callbacks can fail on bringup. On teardown
1611 * e.g. module removal we crash for now.
1613 #ifdef CONFIG_SMP
1614 if (cpuhp_is_ap_state(state))
1615 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1616 else
1617 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
1618 #else
1619 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
1620 #endif
1621 BUG_ON(ret && !bringup);
1622 return ret;
1626 * Called from __cpuhp_setup_state on a recoverable failure.
1628 * Note: The teardown callbacks for rollback are not allowed to fail!
1630 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1631 struct hlist_node *node)
1633 int cpu;
1635 /* Roll back the already executed steps on the other cpus */
1636 for_each_present_cpu(cpu) {
1637 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1638 int cpustate = st->state;
1640 if (cpu >= failedcpu)
1641 break;
1643 /* Did we invoke the startup call on that cpu ? */
1644 if (cpustate >= state)
1645 cpuhp_issue_call(cpu, state, false, node);
1650 * Returns a free for dynamic slot assignment of the Online state. The states
1651 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1652 * by having no name assigned.
1654 static int cpuhp_reserve_state(enum cpuhp_state state)
1656 enum cpuhp_state i;
1658 for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
1659 if (cpuhp_ap_states[i].name)
1660 continue;
1662 cpuhp_ap_states[i].name = "Reserved";
1663 return i;
1665 WARN(1, "No more dynamic states available for CPU hotplug\n");
1666 return -ENOSPC;
1669 int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1670 bool invoke)
1672 struct cpuhp_step *sp;
1673 int cpu;
1674 int ret;
1676 sp = cpuhp_get_step(state);
1677 if (sp->multi_instance == false)
1678 return -EINVAL;
1680 get_online_cpus();
1681 mutex_lock(&cpuhp_state_mutex);
1683 if (!invoke || !sp->startup.multi)
1684 goto add_node;
1687 * Try to call the startup callback for each present cpu
1688 * depending on the hotplug state of the cpu.
1690 for_each_present_cpu(cpu) {
1691 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1692 int cpustate = st->state;
1694 if (cpustate < state)
1695 continue;
1697 ret = cpuhp_issue_call(cpu, state, true, node);
1698 if (ret) {
1699 if (sp->teardown.multi)
1700 cpuhp_rollback_install(cpu, state, node);
1701 goto err;
1704 add_node:
1705 ret = 0;
1706 hlist_add_head(node, &sp->list);
1708 err:
1709 mutex_unlock(&cpuhp_state_mutex);
1710 put_online_cpus();
1711 return ret;
1713 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1716 * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
1717 * @state: The state to setup
1718 * @invoke: If true, the startup function is invoked for cpus where
1719 * cpu state >= @state
1720 * @startup: startup callback function
1721 * @teardown: teardown callback function
1723 * Returns 0 if successful, otherwise a proper error code
1725 int __cpuhp_setup_state(enum cpuhp_state state,
1726 const char *name, bool invoke,
1727 int (*startup)(unsigned int cpu),
1728 int (*teardown)(unsigned int cpu),
1729 bool multi_instance)
1731 int cpu, ret = 0;
1732 int dyn_state = 0;
1734 if (cpuhp_cb_check(state) || !name)
1735 return -EINVAL;
1737 get_online_cpus();
1738 mutex_lock(&cpuhp_state_mutex);
1740 /* currently assignments for the ONLINE state are possible */
1741 if (state == CPUHP_AP_ONLINE_DYN) {
1742 dyn_state = 1;
1743 ret = cpuhp_reserve_state(state);
1744 if (ret < 0)
1745 goto out;
1746 state = ret;
1749 cpuhp_store_callbacks(state, name, startup, teardown, multi_instance);
1751 if (!invoke || !startup)
1752 goto out;
1755 * Try to call the startup callback for each present cpu
1756 * depending on the hotplug state of the cpu.
1758 for_each_present_cpu(cpu) {
1759 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1760 int cpustate = st->state;
1762 if (cpustate < state)
1763 continue;
1765 ret = cpuhp_issue_call(cpu, state, true, NULL);
1766 if (ret) {
1767 if (teardown)
1768 cpuhp_rollback_install(cpu, state, NULL);
1769 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1770 goto out;
1773 out:
1774 mutex_unlock(&cpuhp_state_mutex);
1776 put_online_cpus();
1777 if (!ret && dyn_state)
1778 return state;
1779 return ret;
1781 EXPORT_SYMBOL(__cpuhp_setup_state);
1783 int __cpuhp_state_remove_instance(enum cpuhp_state state,
1784 struct hlist_node *node, bool invoke)
1786 struct cpuhp_step *sp = cpuhp_get_step(state);
1787 int cpu;
1789 BUG_ON(cpuhp_cb_check(state));
1791 if (!sp->multi_instance)
1792 return -EINVAL;
1794 get_online_cpus();
1795 mutex_lock(&cpuhp_state_mutex);
1797 if (!invoke || !cpuhp_get_teardown_cb(state))
1798 goto remove;
1800 * Call the teardown callback for each present cpu depending
1801 * on the hotplug state of the cpu. This function is not
1802 * allowed to fail currently!
1804 for_each_present_cpu(cpu) {
1805 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1806 int cpustate = st->state;
1808 if (cpustate >= state)
1809 cpuhp_issue_call(cpu, state, false, node);
1812 remove:
1813 hlist_del(node);
1814 mutex_unlock(&cpuhp_state_mutex);
1815 put_online_cpus();
1817 return 0;
1819 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
1821 * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
1822 * @state: The state to remove
1823 * @invoke: If true, the teardown function is invoked for cpus where
1824 * cpu state >= @state
1826 * The teardown callback is currently not allowed to fail. Think
1827 * about module removal!
1829 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1831 struct cpuhp_step *sp = cpuhp_get_step(state);
1832 int cpu;
1834 BUG_ON(cpuhp_cb_check(state));
1836 get_online_cpus();
1837 mutex_lock(&cpuhp_state_mutex);
1839 if (sp->multi_instance) {
1840 WARN(!hlist_empty(&sp->list),
1841 "Error: Removing state %d which has instances left.\n",
1842 state);
1843 goto remove;
1846 if (!invoke || !cpuhp_get_teardown_cb(state))
1847 goto remove;
1850 * Call the teardown callback for each present cpu depending
1851 * on the hotplug state of the cpu. This function is not
1852 * allowed to fail currently!
1854 for_each_present_cpu(cpu) {
1855 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1856 int cpustate = st->state;
1858 if (cpustate >= state)
1859 cpuhp_issue_call(cpu, state, false, NULL);
1861 remove:
1862 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1863 mutex_unlock(&cpuhp_state_mutex);
1864 put_online_cpus();
1866 EXPORT_SYMBOL(__cpuhp_remove_state);
1868 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1869 static ssize_t show_cpuhp_state(struct device *dev,
1870 struct device_attribute *attr, char *buf)
1872 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1874 return sprintf(buf, "%d\n", st->state);
1876 static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1878 static ssize_t write_cpuhp_target(struct device *dev,
1879 struct device_attribute *attr,
1880 const char *buf, size_t count)
1882 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1883 struct cpuhp_step *sp;
1884 int target, ret;
1886 ret = kstrtoint(buf, 10, &target);
1887 if (ret)
1888 return ret;
1890 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1891 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1892 return -EINVAL;
1893 #else
1894 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1895 return -EINVAL;
1896 #endif
1898 ret = lock_device_hotplug_sysfs();
1899 if (ret)
1900 return ret;
1902 mutex_lock(&cpuhp_state_mutex);
1903 sp = cpuhp_get_step(target);
1904 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1905 mutex_unlock(&cpuhp_state_mutex);
1906 if (ret)
1907 goto out;
1909 if (st->state < target)
1910 ret = do_cpu_up(dev->id, target);
1911 else
1912 ret = do_cpu_down(dev->id, target);
1913 out:
1914 unlock_device_hotplug();
1915 return ret ? ret : count;
1918 static ssize_t show_cpuhp_target(struct device *dev,
1919 struct device_attribute *attr, char *buf)
1921 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1923 return sprintf(buf, "%d\n", st->target);
1925 static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
1927 static struct attribute *cpuhp_cpu_attrs[] = {
1928 &dev_attr_state.attr,
1929 &dev_attr_target.attr,
1930 NULL
1933 static struct attribute_group cpuhp_cpu_attr_group = {
1934 .attrs = cpuhp_cpu_attrs,
1935 .name = "hotplug",
1936 NULL
1939 static ssize_t show_cpuhp_states(struct device *dev,
1940 struct device_attribute *attr, char *buf)
1942 ssize_t cur, res = 0;
1943 int i;
1945 mutex_lock(&cpuhp_state_mutex);
1946 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
1947 struct cpuhp_step *sp = cpuhp_get_step(i);
1949 if (sp->name) {
1950 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1951 buf += cur;
1952 res += cur;
1955 mutex_unlock(&cpuhp_state_mutex);
1956 return res;
1958 static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1960 static struct attribute *cpuhp_cpu_root_attrs[] = {
1961 &dev_attr_states.attr,
1962 NULL
1965 static struct attribute_group cpuhp_cpu_root_attr_group = {
1966 .attrs = cpuhp_cpu_root_attrs,
1967 .name = "hotplug",
1968 NULL
1971 #ifdef CONFIG_HOTPLUG_SMT
1973 static const char *smt_states[] = {
1974 [CPU_SMT_ENABLED] = "on",
1975 [CPU_SMT_DISABLED] = "off",
1976 [CPU_SMT_FORCE_DISABLED] = "forceoff",
1977 [CPU_SMT_NOT_SUPPORTED] = "notsupported",
1980 static ssize_t
1981 show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
1983 return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
1986 static void cpuhp_offline_cpu_device(unsigned int cpu)
1988 struct device *dev = get_cpu_device(cpu);
1990 dev->offline = true;
1991 /* Tell user space about the state change */
1992 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
1995 static void cpuhp_online_cpu_device(unsigned int cpu)
1997 struct device *dev = get_cpu_device(cpu);
1999 dev->offline = false;
2000 /* Tell user space about the state change */
2001 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2004 int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2006 int cpu, ret = 0;
2008 cpu_maps_update_begin();
2009 for_each_online_cpu(cpu) {
2010 if (topology_is_primary_thread(cpu))
2011 continue;
2012 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2013 if (ret)
2014 break;
2016 * As this needs to hold the cpu maps lock it's impossible
2017 * to call device_offline() because that ends up calling
2018 * cpu_down() which takes cpu maps lock. cpu maps lock
2019 * needs to be held as this might race against in kernel
2020 * abusers of the hotplug machinery (thermal management).
2022 * So nothing would update device:offline state. That would
2023 * leave the sysfs entry stale and prevent onlining after
2024 * smt control has been changed to 'off' again. This is
2025 * called under the sysfs hotplug lock, so it is properly
2026 * serialized against the regular offline usage.
2028 cpuhp_offline_cpu_device(cpu);
2030 if (!ret) {
2031 cpu_smt_control = ctrlval;
2032 arch_smt_update();
2034 cpu_maps_update_done();
2035 return ret;
2038 int cpuhp_smt_enable(void)
2040 int cpu, ret = 0;
2042 cpu_maps_update_begin();
2043 cpu_smt_control = CPU_SMT_ENABLED;
2044 arch_smt_update();
2045 for_each_present_cpu(cpu) {
2046 /* Skip online CPUs and CPUs on offline nodes */
2047 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2048 continue;
2049 ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2050 if (ret)
2051 break;
2052 /* See comment in cpuhp_smt_disable() */
2053 cpuhp_online_cpu_device(cpu);
2055 cpu_maps_update_done();
2056 return ret;
2059 static ssize_t
2060 store_smt_control(struct device *dev, struct device_attribute *attr,
2061 const char *buf, size_t count)
2063 int ctrlval, ret;
2065 if (sysfs_streq(buf, "on"))
2066 ctrlval = CPU_SMT_ENABLED;
2067 else if (sysfs_streq(buf, "off"))
2068 ctrlval = CPU_SMT_DISABLED;
2069 else if (sysfs_streq(buf, "forceoff"))
2070 ctrlval = CPU_SMT_FORCE_DISABLED;
2071 else
2072 return -EINVAL;
2074 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2075 return -EPERM;
2077 if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2078 return -ENODEV;
2080 ret = lock_device_hotplug_sysfs();
2081 if (ret)
2082 return ret;
2084 if (ctrlval != cpu_smt_control) {
2085 switch (ctrlval) {
2086 case CPU_SMT_ENABLED:
2087 ret = cpuhp_smt_enable();
2088 break;
2089 case CPU_SMT_DISABLED:
2090 case CPU_SMT_FORCE_DISABLED:
2091 ret = cpuhp_smt_disable(ctrlval);
2092 break;
2096 unlock_device_hotplug();
2097 return ret ? ret : count;
2099 static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
2101 static ssize_t
2102 show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
2104 bool active = topology_max_smt_threads() > 1;
2106 return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
2108 static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
2110 static struct attribute *cpuhp_smt_attrs[] = {
2111 &dev_attr_control.attr,
2112 &dev_attr_active.attr,
2113 NULL
2116 static const struct attribute_group cpuhp_smt_attr_group = {
2117 .attrs = cpuhp_smt_attrs,
2118 .name = "smt",
2119 NULL
2122 static int __init cpu_smt_state_init(void)
2124 return sysfs_create_group(&cpu_subsys.dev_root->kobj,
2125 &cpuhp_smt_attr_group);
2128 #else
2129 static inline int cpu_smt_state_init(void) { return 0; }
2130 #endif
2132 static int __init cpuhp_sysfs_init(void)
2134 int cpu, ret;
2136 ret = cpu_smt_state_init();
2137 if (ret)
2138 return ret;
2140 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2141 &cpuhp_cpu_root_attr_group);
2142 if (ret)
2143 return ret;
2145 for_each_possible_cpu(cpu) {
2146 struct device *dev = get_cpu_device(cpu);
2148 if (!dev)
2149 continue;
2150 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2151 if (ret)
2152 return ret;
2154 return 0;
2156 device_initcall(cpuhp_sysfs_init);
2157 #endif
2160 * cpu_bit_bitmap[] is a special, "compressed" data structure that
2161 * represents all NR_CPUS bits binary values of 1<<nr.
2163 * It is used by cpumask_of() to get a constant address to a CPU
2164 * mask value that has a single bit set only.
2167 /* cpu_bit_bitmap[0] is empty - so we can back into it */
2168 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
2169 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2170 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2171 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2173 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
2175 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
2176 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
2177 #if BITS_PER_LONG > 32
2178 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
2179 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
2180 #endif
2182 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2184 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2185 EXPORT_SYMBOL(cpu_all_bits);
2187 #ifdef CONFIG_INIT_ALL_POSSIBLE
2188 struct cpumask __cpu_possible_mask __read_mostly
2189 = {CPU_BITS_ALL};
2190 #else
2191 struct cpumask __cpu_possible_mask __read_mostly;
2192 #endif
2193 EXPORT_SYMBOL(__cpu_possible_mask);
2195 struct cpumask __cpu_online_mask __read_mostly;
2196 EXPORT_SYMBOL(__cpu_online_mask);
2198 struct cpumask __cpu_present_mask __read_mostly;
2199 EXPORT_SYMBOL(__cpu_present_mask);
2201 struct cpumask __cpu_active_mask __read_mostly;
2202 EXPORT_SYMBOL(__cpu_active_mask);
2204 void init_cpu_present(const struct cpumask *src)
2206 cpumask_copy(&__cpu_present_mask, src);
2209 void init_cpu_possible(const struct cpumask *src)
2211 cpumask_copy(&__cpu_possible_mask, src);
2214 void init_cpu_online(const struct cpumask *src)
2216 cpumask_copy(&__cpu_online_mask, src);
2220 * Activate the first processor.
2222 void __init boot_cpu_init(void)
2224 int cpu = smp_processor_id();
2226 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2227 set_cpu_online(cpu, true);
2228 set_cpu_active(cpu, true);
2229 set_cpu_present(cpu, true);
2230 set_cpu_possible(cpu, true);
2234 * Must be called _AFTER_ setting up the per_cpu areas
2236 void __init boot_cpu_hotplug_init(void)
2238 #ifdef CONFIG_SMP
2239 this_cpu_write(cpuhp_state.booted_once, true);
2240 #endif
2241 this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
2245 * These are used for a global "mitigations=" cmdline option for toggling
2246 * optional CPU mitigations.
2248 enum cpu_mitigations {
2249 CPU_MITIGATIONS_OFF,
2250 CPU_MITIGATIONS_AUTO,
2251 CPU_MITIGATIONS_AUTO_NOSMT,
2254 static enum cpu_mitigations cpu_mitigations __ro_after_init =
2255 CPU_MITIGATIONS_AUTO;
2257 static int __init mitigations_parse_cmdline(char *arg)
2259 if (!strcmp(arg, "off"))
2260 cpu_mitigations = CPU_MITIGATIONS_OFF;
2261 else if (!strcmp(arg, "auto"))
2262 cpu_mitigations = CPU_MITIGATIONS_AUTO;
2263 else if (!strcmp(arg, "auto,nosmt"))
2264 cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
2265 else
2266 pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
2267 arg);
2269 return 0;
2271 early_param("mitigations", mitigations_parse_cmdline);
2273 /* mitigations=off */
2274 bool cpu_mitigations_off(void)
2276 return cpu_mitigations == CPU_MITIGATIONS_OFF;
2278 EXPORT_SYMBOL_GPL(cpu_mitigations_off);
2280 /* mitigations=auto,nosmt */
2281 bool cpu_mitigations_auto_nosmt(void)
2283 return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
2285 EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);