PCI: Prevent out of bounds access in numa_node override
[linux/fpc-iii.git] / kernel / cpu.c
blob94bbe4695232cd2fa2e9c0def32de7fa27644971
1 /* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
5 */
6 #include <linux/proc_fs.h>
7 #include <linux/smp.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 #include <linux/lockdep.h>
23 #include <linux/tick.h>
24 #include <trace/events/power.h>
26 #include "smpboot.h"
28 #ifdef CONFIG_SMP
29 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
30 static DEFINE_MUTEX(cpu_add_remove_lock);
33 * The following two APIs (cpu_maps_update_begin/done) must be used when
34 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
35 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
36 * hotplug callback (un)registration performed using __register_cpu_notifier()
37 * or __unregister_cpu_notifier().
39 void cpu_maps_update_begin(void)
41 mutex_lock(&cpu_add_remove_lock);
43 EXPORT_SYMBOL(cpu_notifier_register_begin);
45 void cpu_maps_update_done(void)
47 mutex_unlock(&cpu_add_remove_lock);
49 EXPORT_SYMBOL(cpu_notifier_register_done);
51 static RAW_NOTIFIER_HEAD(cpu_chain);
53 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
54 * Should always be manipulated under cpu_add_remove_lock
56 static int cpu_hotplug_disabled;
58 #ifdef CONFIG_HOTPLUG_CPU
60 static struct {
61 struct task_struct *active_writer;
62 /* wait queue to wake up the active_writer */
63 wait_queue_head_t wq;
64 /* verifies that no writer will get active while readers are active */
65 struct mutex lock;
67 * Also blocks the new readers during
68 * an ongoing cpu hotplug operation.
70 atomic_t refcount;
72 #ifdef CONFIG_DEBUG_LOCK_ALLOC
73 struct lockdep_map dep_map;
74 #endif
75 } cpu_hotplug = {
76 .active_writer = NULL,
77 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
78 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
79 #ifdef CONFIG_DEBUG_LOCK_ALLOC
80 .dep_map = {.name = "cpu_hotplug.lock" },
81 #endif
84 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
85 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
86 #define cpuhp_lock_acquire_tryread() \
87 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
88 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
89 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
92 void get_online_cpus(void)
94 might_sleep();
95 if (cpu_hotplug.active_writer == current)
96 return;
97 cpuhp_lock_acquire_read();
98 mutex_lock(&cpu_hotplug.lock);
99 atomic_inc(&cpu_hotplug.refcount);
100 mutex_unlock(&cpu_hotplug.lock);
102 EXPORT_SYMBOL_GPL(get_online_cpus);
104 bool try_get_online_cpus(void)
106 if (cpu_hotplug.active_writer == current)
107 return true;
108 if (!mutex_trylock(&cpu_hotplug.lock))
109 return false;
110 cpuhp_lock_acquire_tryread();
111 atomic_inc(&cpu_hotplug.refcount);
112 mutex_unlock(&cpu_hotplug.lock);
113 return true;
115 EXPORT_SYMBOL_GPL(try_get_online_cpus);
117 void put_online_cpus(void)
119 int refcount;
121 if (cpu_hotplug.active_writer == current)
122 return;
124 refcount = atomic_dec_return(&cpu_hotplug.refcount);
125 if (WARN_ON(refcount < 0)) /* try to fix things up */
126 atomic_inc(&cpu_hotplug.refcount);
128 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
129 wake_up(&cpu_hotplug.wq);
131 cpuhp_lock_release();
134 EXPORT_SYMBOL_GPL(put_online_cpus);
137 * This ensures that the hotplug operation can begin only when the
138 * refcount goes to zero.
140 * Note that during a cpu-hotplug operation, the new readers, if any,
141 * will be blocked by the cpu_hotplug.lock
143 * Since cpu_hotplug_begin() is always called after invoking
144 * cpu_maps_update_begin(), we can be sure that only one writer is active.
146 * Note that theoretically, there is a possibility of a livelock:
147 * - Refcount goes to zero, last reader wakes up the sleeping
148 * writer.
149 * - Last reader unlocks the cpu_hotplug.lock.
150 * - A new reader arrives at this moment, bumps up the refcount.
151 * - The writer acquires the cpu_hotplug.lock finds the refcount
152 * non zero and goes to sleep again.
154 * However, this is very difficult to achieve in practice since
155 * get_online_cpus() not an api which is called all that often.
158 void cpu_hotplug_begin(void)
160 DEFINE_WAIT(wait);
162 cpu_hotplug.active_writer = current;
163 cpuhp_lock_acquire();
165 for (;;) {
166 mutex_lock(&cpu_hotplug.lock);
167 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
168 if (likely(!atomic_read(&cpu_hotplug.refcount)))
169 break;
170 mutex_unlock(&cpu_hotplug.lock);
171 schedule();
173 finish_wait(&cpu_hotplug.wq, &wait);
176 void cpu_hotplug_done(void)
178 cpu_hotplug.active_writer = NULL;
179 mutex_unlock(&cpu_hotplug.lock);
180 cpuhp_lock_release();
184 * Wait for currently running CPU hotplug operations to complete (if any) and
185 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
186 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
187 * hotplug path before performing hotplug operations. So acquiring that lock
188 * guarantees mutual exclusion from any currently running hotplug operations.
190 void cpu_hotplug_disable(void)
192 cpu_maps_update_begin();
193 cpu_hotplug_disabled = 1;
194 cpu_maps_update_done();
197 void cpu_hotplug_enable(void)
199 cpu_maps_update_begin();
200 cpu_hotplug_disabled = 0;
201 cpu_maps_update_done();
204 #endif /* CONFIG_HOTPLUG_CPU */
206 /* Need to know about CPUs going up/down? */
207 int __ref register_cpu_notifier(struct notifier_block *nb)
209 int ret;
210 cpu_maps_update_begin();
211 ret = raw_notifier_chain_register(&cpu_chain, nb);
212 cpu_maps_update_done();
213 return ret;
216 int __ref __register_cpu_notifier(struct notifier_block *nb)
218 return raw_notifier_chain_register(&cpu_chain, nb);
221 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
222 int *nr_calls)
224 int ret;
226 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
227 nr_calls);
229 return notifier_to_errno(ret);
232 static int cpu_notify(unsigned long val, void *v)
234 return __cpu_notify(val, v, -1, NULL);
237 #ifdef CONFIG_HOTPLUG_CPU
239 static void cpu_notify_nofail(unsigned long val, void *v)
241 BUG_ON(cpu_notify(val, v));
243 EXPORT_SYMBOL(register_cpu_notifier);
244 EXPORT_SYMBOL(__register_cpu_notifier);
246 void __ref unregister_cpu_notifier(struct notifier_block *nb)
248 cpu_maps_update_begin();
249 raw_notifier_chain_unregister(&cpu_chain, nb);
250 cpu_maps_update_done();
252 EXPORT_SYMBOL(unregister_cpu_notifier);
254 void __ref __unregister_cpu_notifier(struct notifier_block *nb)
256 raw_notifier_chain_unregister(&cpu_chain, nb);
258 EXPORT_SYMBOL(__unregister_cpu_notifier);
261 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
262 * @cpu: a CPU id
264 * This function walks all processes, finds a valid mm struct for each one and
265 * then clears a corresponding bit in mm's cpumask. While this all sounds
266 * trivial, there are various non-obvious corner cases, which this function
267 * tries to solve in a safe manner.
269 * Also note that the function uses a somewhat relaxed locking scheme, so it may
270 * be called only for an already offlined CPU.
272 void clear_tasks_mm_cpumask(int cpu)
274 struct task_struct *p;
277 * This function is called after the cpu is taken down and marked
278 * offline, so its not like new tasks will ever get this cpu set in
279 * their mm mask. -- Peter Zijlstra
280 * Thus, we may use rcu_read_lock() here, instead of grabbing
281 * full-fledged tasklist_lock.
283 WARN_ON(cpu_online(cpu));
284 rcu_read_lock();
285 for_each_process(p) {
286 struct task_struct *t;
289 * Main thread might exit, but other threads may still have
290 * a valid mm. Find one.
292 t = find_lock_task_mm(p);
293 if (!t)
294 continue;
295 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
296 task_unlock(t);
298 rcu_read_unlock();
301 static inline void check_for_tasks(int dead_cpu)
303 struct task_struct *g, *p;
305 read_lock_irq(&tasklist_lock);
306 do_each_thread(g, p) {
307 if (!p->on_rq)
308 continue;
310 * We do the check with unlocked task_rq(p)->lock.
311 * Order the reading to do not warn about a task,
312 * which was running on this cpu in the past, and
313 * it's just been woken on another cpu.
315 rmb();
316 if (task_cpu(p) != dead_cpu)
317 continue;
319 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
320 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
321 } while_each_thread(g, p);
322 read_unlock_irq(&tasklist_lock);
325 struct take_cpu_down_param {
326 unsigned long mod;
327 void *hcpu;
330 /* Take this CPU down. */
331 static int __ref take_cpu_down(void *_param)
333 struct take_cpu_down_param *param = _param;
334 int err;
336 /* Ensure this CPU doesn't handle any more interrupts. */
337 err = __cpu_disable();
338 if (err < 0)
339 return err;
341 cpu_notify(CPU_DYING | param->mod, param->hcpu);
342 /* Give up timekeeping duties */
343 tick_handover_do_timer();
344 /* Park the stopper thread */
345 kthread_park(current);
346 return 0;
349 /* Requires cpu_add_remove_lock to be held */
350 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
352 int err, nr_calls = 0;
353 void *hcpu = (void *)(long)cpu;
354 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
355 struct take_cpu_down_param tcd_param = {
356 .mod = mod,
357 .hcpu = hcpu,
360 if (num_online_cpus() == 1)
361 return -EBUSY;
363 if (!cpu_online(cpu))
364 return -EINVAL;
366 cpu_hotplug_begin();
368 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
369 if (err) {
370 nr_calls--;
371 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
372 pr_warn("%s: attempt to take down CPU %u failed\n",
373 __func__, cpu);
374 goto out_release;
378 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
379 * and RCU users of this state to go away such that all new such users
380 * will observe it.
382 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
383 * not imply sync_sched(), so explicitly call both.
385 * Do sync before park smpboot threads to take care the rcu boost case.
387 #ifdef CONFIG_PREEMPT
388 synchronize_sched();
389 #endif
390 synchronize_rcu();
392 smpboot_park_threads(cpu);
395 * So now all preempt/rcu users must observe !cpu_active().
398 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
399 if (err) {
400 /* CPU didn't die: tell everyone. Can't complain. */
401 smpboot_unpark_threads(cpu);
402 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
403 goto out_release;
405 BUG_ON(cpu_online(cpu));
408 * The migration_call() CPU_DYING callback will have removed all
409 * runnable tasks from the cpu, there's only the idle task left now
410 * that the migration thread is done doing the stop_machine thing.
412 * Wait for the stop thread to go away.
414 while (!per_cpu(cpu_dead_idle, cpu))
415 cpu_relax();
416 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
417 per_cpu(cpu_dead_idle, cpu) = false;
419 hotplug_cpu__broadcast_tick_pull(cpu);
420 /* This actually kills the CPU. */
421 __cpu_die(cpu);
423 /* CPU is completely dead: tell everyone. Too late to complain. */
424 tick_cleanup_dead_cpu(cpu);
425 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
427 check_for_tasks(cpu);
429 out_release:
430 cpu_hotplug_done();
431 if (!err)
432 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
433 return err;
436 int __ref cpu_down(unsigned int cpu)
438 int err;
440 cpu_maps_update_begin();
442 if (cpu_hotplug_disabled) {
443 err = -EBUSY;
444 goto out;
447 err = _cpu_down(cpu, 0);
449 out:
450 cpu_maps_update_done();
451 return err;
453 EXPORT_SYMBOL(cpu_down);
454 #endif /*CONFIG_HOTPLUG_CPU*/
457 * Unpark per-CPU smpboot kthreads at CPU-online time.
459 static int smpboot_thread_call(struct notifier_block *nfb,
460 unsigned long action, void *hcpu)
462 int cpu = (long)hcpu;
464 switch (action & ~CPU_TASKS_FROZEN) {
466 case CPU_ONLINE:
467 smpboot_unpark_threads(cpu);
468 break;
470 default:
471 break;
474 return NOTIFY_OK;
477 static struct notifier_block smpboot_thread_notifier = {
478 .notifier_call = smpboot_thread_call,
479 .priority = CPU_PRI_SMPBOOT,
482 void __cpuinit smpboot_thread_init(void)
484 register_cpu_notifier(&smpboot_thread_notifier);
487 /* Requires cpu_add_remove_lock to be held */
488 static int _cpu_up(unsigned int cpu, int tasks_frozen)
490 int ret, nr_calls = 0;
491 void *hcpu = (void *)(long)cpu;
492 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
493 struct task_struct *idle;
495 cpu_hotplug_begin();
497 if (cpu_online(cpu) || !cpu_present(cpu)) {
498 ret = -EINVAL;
499 goto out;
502 idle = idle_thread_get(cpu);
503 if (IS_ERR(idle)) {
504 ret = PTR_ERR(idle);
505 goto out;
508 ret = smpboot_create_threads(cpu);
509 if (ret)
510 goto out;
512 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
513 if (ret) {
514 nr_calls--;
515 pr_warn("%s: attempt to bring up CPU %u failed\n",
516 __func__, cpu);
517 goto out_notify;
520 /* Arch-specific enabling code. */
521 ret = __cpu_up(cpu, idle);
522 if (ret != 0)
523 goto out_notify;
524 BUG_ON(!cpu_online(cpu));
526 /* Now call notifier in preparation. */
527 cpu_notify(CPU_ONLINE | mod, hcpu);
529 out_notify:
530 if (ret != 0)
531 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
532 out:
533 cpu_hotplug_done();
535 return ret;
538 int cpu_up(unsigned int cpu)
540 int err = 0;
542 if (!cpu_possible(cpu)) {
543 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
544 cpu);
545 #if defined(CONFIG_IA64)
546 pr_err("please check additional_cpus= boot parameter\n");
547 #endif
548 return -EINVAL;
551 err = try_online_node(cpu_to_node(cpu));
552 if (err)
553 return err;
555 cpu_maps_update_begin();
557 if (cpu_hotplug_disabled) {
558 err = -EBUSY;
559 goto out;
562 err = _cpu_up(cpu, 0);
564 out:
565 cpu_maps_update_done();
566 return err;
568 EXPORT_SYMBOL_GPL(cpu_up);
570 #ifdef CONFIG_PM_SLEEP_SMP
571 static cpumask_var_t frozen_cpus;
573 int disable_nonboot_cpus(void)
575 int cpu, first_cpu, error = 0;
577 cpu_maps_update_begin();
578 first_cpu = cpumask_first(cpu_online_mask);
580 * We take down all of the non-boot CPUs in one shot to avoid races
581 * with the userspace trying to use the CPU hotplug at the same time
583 cpumask_clear(frozen_cpus);
585 pr_info("Disabling non-boot CPUs ...\n");
586 for_each_online_cpu(cpu) {
587 if (cpu == first_cpu)
588 continue;
589 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
590 error = _cpu_down(cpu, 1);
591 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
592 if (!error)
593 cpumask_set_cpu(cpu, frozen_cpus);
594 else {
595 pr_err("Error taking CPU%d down: %d\n", cpu, error);
596 break;
600 if (!error) {
601 BUG_ON(num_online_cpus() > 1);
602 /* Make sure the CPUs won't be enabled by someone else */
603 cpu_hotplug_disabled = 1;
604 } else {
605 pr_err("Non-boot CPUs are not disabled\n");
607 cpu_maps_update_done();
608 return error;
611 void __weak arch_enable_nonboot_cpus_begin(void)
615 void __weak arch_enable_nonboot_cpus_end(void)
619 void __ref enable_nonboot_cpus(void)
621 int cpu, error;
623 /* Allow everyone to use the CPU hotplug again */
624 cpu_maps_update_begin();
625 cpu_hotplug_disabled = 0;
626 if (cpumask_empty(frozen_cpus))
627 goto out;
629 pr_info("Enabling non-boot CPUs ...\n");
631 arch_enable_nonboot_cpus_begin();
633 for_each_cpu(cpu, frozen_cpus) {
634 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
635 error = _cpu_up(cpu, 1);
636 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
637 if (!error) {
638 pr_info("CPU%d is up\n", cpu);
639 continue;
641 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
644 arch_enable_nonboot_cpus_end();
646 cpumask_clear(frozen_cpus);
647 out:
648 cpu_maps_update_done();
651 static int __init alloc_frozen_cpus(void)
653 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
654 return -ENOMEM;
655 return 0;
657 core_initcall(alloc_frozen_cpus);
660 * When callbacks for CPU hotplug notifications are being executed, we must
661 * ensure that the state of the system with respect to the tasks being frozen
662 * or not, as reported by the notification, remains unchanged *throughout the
663 * duration* of the execution of the callbacks.
664 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
666 * This synchronization is implemented by mutually excluding regular CPU
667 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
668 * Hibernate notifications.
670 static int
671 cpu_hotplug_pm_callback(struct notifier_block *nb,
672 unsigned long action, void *ptr)
674 switch (action) {
676 case PM_SUSPEND_PREPARE:
677 case PM_HIBERNATION_PREPARE:
678 cpu_hotplug_disable();
679 break;
681 case PM_POST_SUSPEND:
682 case PM_POST_HIBERNATION:
683 cpu_hotplug_enable();
684 break;
686 default:
687 return NOTIFY_DONE;
690 return NOTIFY_OK;
694 static int __init cpu_hotplug_pm_sync_init(void)
697 * cpu_hotplug_pm_callback has higher priority than x86
698 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
699 * to disable cpu hotplug to avoid cpu hotplug race.
701 pm_notifier(cpu_hotplug_pm_callback, 0);
702 return 0;
704 core_initcall(cpu_hotplug_pm_sync_init);
706 #endif /* CONFIG_PM_SLEEP_SMP */
709 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
710 * @cpu: cpu that just started
712 * This function calls the cpu_chain notifiers with CPU_STARTING.
713 * It must be called by the arch code on the new cpu, before the new cpu
714 * enables interrupts and before the "boot" cpu returns from __cpu_up().
716 void notify_cpu_starting(unsigned int cpu)
718 unsigned long val = CPU_STARTING;
720 #ifdef CONFIG_PM_SLEEP_SMP
721 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
722 val = CPU_STARTING_FROZEN;
723 #endif /* CONFIG_PM_SLEEP_SMP */
724 cpu_notify(val, (void *)(long)cpu);
727 #endif /* CONFIG_SMP */
730 * cpu_bit_bitmap[] is a special, "compressed" data structure that
731 * represents all NR_CPUS bits binary values of 1<<nr.
733 * It is used by cpumask_of() to get a constant address to a CPU
734 * mask value that has a single bit set only.
737 /* cpu_bit_bitmap[0] is empty - so we can back into it */
738 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
739 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
740 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
741 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
743 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
745 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
746 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
747 #if BITS_PER_LONG > 32
748 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
749 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
750 #endif
752 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
754 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
755 EXPORT_SYMBOL(cpu_all_bits);
757 #ifdef CONFIG_INIT_ALL_POSSIBLE
758 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
759 = CPU_BITS_ALL;
760 #else
761 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
762 #endif
763 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
764 EXPORT_SYMBOL(cpu_possible_mask);
766 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
767 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
768 EXPORT_SYMBOL(cpu_online_mask);
770 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
771 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
772 EXPORT_SYMBOL(cpu_present_mask);
774 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
775 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
776 EXPORT_SYMBOL(cpu_active_mask);
778 void set_cpu_possible(unsigned int cpu, bool possible)
780 if (possible)
781 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
782 else
783 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
786 void set_cpu_present(unsigned int cpu, bool present)
788 if (present)
789 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
790 else
791 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
794 void set_cpu_online(unsigned int cpu, bool online)
796 if (online) {
797 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
798 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
799 } else {
800 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
804 void set_cpu_active(unsigned int cpu, bool active)
806 if (active)
807 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
808 else
809 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
812 void init_cpu_present(const struct cpumask *src)
814 cpumask_copy(to_cpumask(cpu_present_bits), src);
817 void init_cpu_possible(const struct cpumask *src)
819 cpumask_copy(to_cpumask(cpu_possible_bits), src);
822 void init_cpu_online(const struct cpumask *src)
824 cpumask_copy(to_cpumask(cpu_online_bits), src);