2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/export.h>
14 #include <linux/kthread.h>
15 #include <linux/stop_machine.h>
16 #include <linux/mutex.h>
17 #include <linux/gfp.h>
20 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
21 static DEFINE_MUTEX(cpu_add_remove_lock
);
24 * The following two API's must be used when attempting
25 * to serialize the updates to cpu_online_mask, cpu_present_mask.
27 void cpu_maps_update_begin(void)
29 mutex_lock(&cpu_add_remove_lock
);
32 void cpu_maps_update_done(void)
34 mutex_unlock(&cpu_add_remove_lock
);
37 static RAW_NOTIFIER_HEAD(cpu_chain
);
39 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
40 * Should always be manipulated under cpu_add_remove_lock
42 static int cpu_hotplug_disabled
;
44 #ifdef CONFIG_HOTPLUG_CPU
47 struct task_struct
*active_writer
;
48 struct mutex lock
; /* Synchronizes accesses to refcount, */
50 * Also blocks the new readers during
51 * an ongoing cpu hotplug operation.
55 .active_writer
= NULL
,
56 .lock
= __MUTEX_INITIALIZER(cpu_hotplug
.lock
),
60 void get_online_cpus(void)
63 if (cpu_hotplug
.active_writer
== current
)
65 mutex_lock(&cpu_hotplug
.lock
);
66 cpu_hotplug
.refcount
++;
67 mutex_unlock(&cpu_hotplug
.lock
);
70 EXPORT_SYMBOL_GPL(get_online_cpus
);
72 void put_online_cpus(void)
74 if (cpu_hotplug
.active_writer
== current
)
76 mutex_lock(&cpu_hotplug
.lock
);
77 if (!--cpu_hotplug
.refcount
&& unlikely(cpu_hotplug
.active_writer
))
78 wake_up_process(cpu_hotplug
.active_writer
);
79 mutex_unlock(&cpu_hotplug
.lock
);
82 EXPORT_SYMBOL_GPL(put_online_cpus
);
85 * This ensures that the hotplug operation can begin only when the
86 * refcount goes to zero.
88 * Note that during a cpu-hotplug operation, the new readers, if any,
89 * will be blocked by the cpu_hotplug.lock
91 * Since cpu_hotplug_begin() is always called after invoking
92 * cpu_maps_update_begin(), we can be sure that only one writer is active.
94 * Note that theoretically, there is a possibility of a livelock:
95 * - Refcount goes to zero, last reader wakes up the sleeping
97 * - Last reader unlocks the cpu_hotplug.lock.
98 * - A new reader arrives at this moment, bumps up the refcount.
99 * - The writer acquires the cpu_hotplug.lock finds the refcount
100 * non zero and goes to sleep again.
102 * However, this is very difficult to achieve in practice since
103 * get_online_cpus() not an api which is called all that often.
106 static void cpu_hotplug_begin(void)
108 cpu_hotplug
.active_writer
= current
;
111 mutex_lock(&cpu_hotplug
.lock
);
112 if (likely(!cpu_hotplug
.refcount
))
114 __set_current_state(TASK_UNINTERRUPTIBLE
);
115 mutex_unlock(&cpu_hotplug
.lock
);
120 static void cpu_hotplug_done(void)
122 cpu_hotplug
.active_writer
= NULL
;
123 mutex_unlock(&cpu_hotplug
.lock
);
126 #else /* #if CONFIG_HOTPLUG_CPU */
127 static void cpu_hotplug_begin(void) {}
128 static void cpu_hotplug_done(void) {}
129 #endif /* #else #if CONFIG_HOTPLUG_CPU */
131 /* Need to know about CPUs going up/down? */
132 int __ref
register_cpu_notifier(struct notifier_block
*nb
)
135 cpu_maps_update_begin();
136 ret
= raw_notifier_chain_register(&cpu_chain
, nb
);
137 cpu_maps_update_done();
141 static int __cpu_notify(unsigned long val
, void *v
, int nr_to_call
,
146 ret
= __raw_notifier_call_chain(&cpu_chain
, val
, v
, nr_to_call
,
149 return notifier_to_errno(ret
);
152 static int cpu_notify(unsigned long val
, void *v
)
154 return __cpu_notify(val
, v
, -1, NULL
);
157 #ifdef CONFIG_HOTPLUG_CPU
159 static void cpu_notify_nofail(unsigned long val
, void *v
)
161 BUG_ON(cpu_notify(val
, v
));
163 EXPORT_SYMBOL(register_cpu_notifier
);
165 void __ref
unregister_cpu_notifier(struct notifier_block
*nb
)
167 cpu_maps_update_begin();
168 raw_notifier_chain_unregister(&cpu_chain
, nb
);
169 cpu_maps_update_done();
171 EXPORT_SYMBOL(unregister_cpu_notifier
);
173 static inline void check_for_tasks(int cpu
)
175 struct task_struct
*p
;
177 write_lock_irq(&tasklist_lock
);
178 for_each_process(p
) {
179 if (task_cpu(p
) == cpu
&& p
->state
== TASK_RUNNING
&&
180 (!cputime_eq(p
->utime
, cputime_zero
) ||
181 !cputime_eq(p
->stime
, cputime_zero
)))
182 printk(KERN_WARNING
"Task %s (pid = %d) is on cpu %d "
183 "(state = %ld, flags = %x)\n",
184 p
->comm
, task_pid_nr(p
), cpu
,
187 write_unlock_irq(&tasklist_lock
);
190 struct take_cpu_down_param
{
195 /* Take this CPU down. */
196 static int __ref
take_cpu_down(void *_param
)
198 struct take_cpu_down_param
*param
= _param
;
201 /* Ensure this CPU doesn't handle any more interrupts. */
202 err
= __cpu_disable();
206 cpu_notify(CPU_DYING
| param
->mod
, param
->hcpu
);
210 /* Requires cpu_add_remove_lock to be held */
211 static int __ref
_cpu_down(unsigned int cpu
, int tasks_frozen
)
213 int err
, nr_calls
= 0;
214 void *hcpu
= (void *)(long)cpu
;
215 unsigned long mod
= tasks_frozen
? CPU_TASKS_FROZEN
: 0;
216 struct take_cpu_down_param tcd_param
= {
221 if (num_online_cpus() == 1)
224 if (!cpu_online(cpu
))
229 err
= __cpu_notify(CPU_DOWN_PREPARE
| mod
, hcpu
, -1, &nr_calls
);
232 __cpu_notify(CPU_DOWN_FAILED
| mod
, hcpu
, nr_calls
, NULL
);
233 printk("%s: attempt to take down CPU %u failed\n",
238 err
= __stop_machine(take_cpu_down
, &tcd_param
, cpumask_of(cpu
));
240 /* CPU didn't die: tell everyone. Can't complain. */
241 cpu_notify_nofail(CPU_DOWN_FAILED
| mod
, hcpu
);
245 BUG_ON(cpu_online(cpu
));
248 * The migration_call() CPU_DYING callback will have removed all
249 * runnable tasks from the cpu, there's only the idle task left now
250 * that the migration thread is done doing the stop_machine thing.
252 * Wait for the stop thread to go away.
254 while (!idle_cpu(cpu
))
257 /* This actually kills the CPU. */
260 /* CPU is completely dead: tell everyone. Too late to complain. */
261 cpu_notify_nofail(CPU_DEAD
| mod
, hcpu
);
263 check_for_tasks(cpu
);
268 cpu_notify_nofail(CPU_POST_DEAD
| mod
, hcpu
);
272 int __ref
cpu_down(unsigned int cpu
)
276 cpu_maps_update_begin();
278 if (cpu_hotplug_disabled
) {
283 err
= _cpu_down(cpu
, 0);
286 cpu_maps_update_done();
289 EXPORT_SYMBOL(cpu_down
);
290 #endif /*CONFIG_HOTPLUG_CPU*/
292 /* Requires cpu_add_remove_lock to be held */
293 static int __cpuinit
_cpu_up(unsigned int cpu
, int tasks_frozen
)
295 int ret
, nr_calls
= 0;
296 void *hcpu
= (void *)(long)cpu
;
297 unsigned long mod
= tasks_frozen
? CPU_TASKS_FROZEN
: 0;
299 if (cpu_online(cpu
) || !cpu_present(cpu
))
303 ret
= __cpu_notify(CPU_UP_PREPARE
| mod
, hcpu
, -1, &nr_calls
);
306 printk(KERN_WARNING
"%s: attempt to bring up CPU %u failed\n",
311 /* Arch-specific enabling code. */
315 BUG_ON(!cpu_online(cpu
));
317 /* Now call notifier in preparation. */
318 cpu_notify(CPU_ONLINE
| mod
, hcpu
);
322 __cpu_notify(CPU_UP_CANCELED
| mod
, hcpu
, nr_calls
, NULL
);
328 int __cpuinit
cpu_up(unsigned int cpu
)
332 #ifdef CONFIG_MEMORY_HOTPLUG
337 if (!cpu_possible(cpu
)) {
338 printk(KERN_ERR
"can't online cpu %d because it is not "
339 "configured as may-hotadd at boot time\n", cpu
);
340 #if defined(CONFIG_IA64)
341 printk(KERN_ERR
"please check additional_cpus= boot "
347 #ifdef CONFIG_MEMORY_HOTPLUG
348 nid
= cpu_to_node(cpu
);
349 if (!node_online(nid
)) {
350 err
= mem_online_node(nid
);
355 pgdat
= NODE_DATA(nid
);
358 "Can't online cpu %d due to NULL pgdat\n", cpu
);
362 if (pgdat
->node_zonelists
->_zonerefs
->zone
== NULL
) {
363 mutex_lock(&zonelists_mutex
);
364 build_all_zonelists(NULL
);
365 mutex_unlock(&zonelists_mutex
);
369 cpu_maps_update_begin();
371 if (cpu_hotplug_disabled
) {
376 err
= _cpu_up(cpu
, 0);
379 cpu_maps_update_done();
383 #ifdef CONFIG_PM_SLEEP_SMP
384 static cpumask_var_t frozen_cpus
;
386 void __weak
arch_disable_nonboot_cpus_begin(void)
390 void __weak
arch_disable_nonboot_cpus_end(void)
394 int disable_nonboot_cpus(void)
396 int cpu
, first_cpu
, error
= 0;
398 cpu_maps_update_begin();
399 first_cpu
= cpumask_first(cpu_online_mask
);
401 * We take down all of the non-boot CPUs in one shot to avoid races
402 * with the userspace trying to use the CPU hotplug at the same time
404 cpumask_clear(frozen_cpus
);
405 arch_disable_nonboot_cpus_begin();
407 printk("Disabling non-boot CPUs ...\n");
408 for_each_online_cpu(cpu
) {
409 if (cpu
== first_cpu
)
411 error
= _cpu_down(cpu
, 1);
413 cpumask_set_cpu(cpu
, frozen_cpus
);
415 printk(KERN_ERR
"Error taking CPU%d down: %d\n",
421 arch_disable_nonboot_cpus_end();
424 BUG_ON(num_online_cpus() > 1);
425 /* Make sure the CPUs won't be enabled by someone else */
426 cpu_hotplug_disabled
= 1;
428 printk(KERN_ERR
"Non-boot CPUs are not disabled\n");
430 cpu_maps_update_done();
434 void __weak
arch_enable_nonboot_cpus_begin(void)
438 void __weak
arch_enable_nonboot_cpus_end(void)
442 void __ref
enable_nonboot_cpus(void)
446 /* Allow everyone to use the CPU hotplug again */
447 cpu_maps_update_begin();
448 cpu_hotplug_disabled
= 0;
449 if (cpumask_empty(frozen_cpus
))
452 printk(KERN_INFO
"Enabling non-boot CPUs ...\n");
454 arch_enable_nonboot_cpus_begin();
456 for_each_cpu(cpu
, frozen_cpus
) {
457 error
= _cpu_up(cpu
, 1);
459 printk(KERN_INFO
"CPU%d is up\n", cpu
);
462 printk(KERN_WARNING
"Error taking CPU%d up: %d\n", cpu
, error
);
465 arch_enable_nonboot_cpus_end();
467 cpumask_clear(frozen_cpus
);
469 cpu_maps_update_done();
472 static int alloc_frozen_cpus(void)
474 if (!alloc_cpumask_var(&frozen_cpus
, GFP_KERNEL
|__GFP_ZERO
))
478 core_initcall(alloc_frozen_cpus
);
479 #endif /* CONFIG_PM_SLEEP_SMP */
482 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
483 * @cpu: cpu that just started
485 * This function calls the cpu_chain notifiers with CPU_STARTING.
486 * It must be called by the arch code on the new cpu, before the new cpu
487 * enables interrupts and before the "boot" cpu returns from __cpu_up().
489 void __cpuinit
notify_cpu_starting(unsigned int cpu
)
491 unsigned long val
= CPU_STARTING
;
493 #ifdef CONFIG_PM_SLEEP_SMP
494 if (frozen_cpus
!= NULL
&& cpumask_test_cpu(cpu
, frozen_cpus
))
495 val
= CPU_STARTING_FROZEN
;
496 #endif /* CONFIG_PM_SLEEP_SMP */
497 cpu_notify(val
, (void *)(long)cpu
);
500 #endif /* CONFIG_SMP */
503 * cpu_bit_bitmap[] is a special, "compressed" data structure that
504 * represents all NR_CPUS bits binary values of 1<<nr.
506 * It is used by cpumask_of() to get a constant address to a CPU
507 * mask value that has a single bit set only.
510 /* cpu_bit_bitmap[0] is empty - so we can back into it */
511 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
512 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
513 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
514 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
516 const unsigned long cpu_bit_bitmap
[BITS_PER_LONG
+1][BITS_TO_LONGS(NR_CPUS
)] = {
518 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
519 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
520 #if BITS_PER_LONG > 32
521 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
522 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
525 EXPORT_SYMBOL_GPL(cpu_bit_bitmap
);
527 const DECLARE_BITMAP(cpu_all_bits
, NR_CPUS
) = CPU_BITS_ALL
;
528 EXPORT_SYMBOL(cpu_all_bits
);
530 #ifdef CONFIG_INIT_ALL_POSSIBLE
531 static DECLARE_BITMAP(cpu_possible_bits
, CONFIG_NR_CPUS
) __read_mostly
534 static DECLARE_BITMAP(cpu_possible_bits
, CONFIG_NR_CPUS
) __read_mostly
;
536 const struct cpumask
*const cpu_possible_mask
= to_cpumask(cpu_possible_bits
);
537 EXPORT_SYMBOL(cpu_possible_mask
);
539 static DECLARE_BITMAP(cpu_online_bits
, CONFIG_NR_CPUS
) __read_mostly
;
540 const struct cpumask
*const cpu_online_mask
= to_cpumask(cpu_online_bits
);
541 EXPORT_SYMBOL(cpu_online_mask
);
543 static DECLARE_BITMAP(cpu_present_bits
, CONFIG_NR_CPUS
) __read_mostly
;
544 const struct cpumask
*const cpu_present_mask
= to_cpumask(cpu_present_bits
);
545 EXPORT_SYMBOL(cpu_present_mask
);
547 static DECLARE_BITMAP(cpu_active_bits
, CONFIG_NR_CPUS
) __read_mostly
;
548 const struct cpumask
*const cpu_active_mask
= to_cpumask(cpu_active_bits
);
549 EXPORT_SYMBOL(cpu_active_mask
);
551 void set_cpu_possible(unsigned int cpu
, bool possible
)
554 cpumask_set_cpu(cpu
, to_cpumask(cpu_possible_bits
));
556 cpumask_clear_cpu(cpu
, to_cpumask(cpu_possible_bits
));
559 void set_cpu_present(unsigned int cpu
, bool present
)
562 cpumask_set_cpu(cpu
, to_cpumask(cpu_present_bits
));
564 cpumask_clear_cpu(cpu
, to_cpumask(cpu_present_bits
));
567 void set_cpu_online(unsigned int cpu
, bool online
)
570 cpumask_set_cpu(cpu
, to_cpumask(cpu_online_bits
));
572 cpumask_clear_cpu(cpu
, to_cpumask(cpu_online_bits
));
575 void set_cpu_active(unsigned int cpu
, bool active
)
578 cpumask_set_cpu(cpu
, to_cpumask(cpu_active_bits
));
580 cpumask_clear_cpu(cpu
, to_cpumask(cpu_active_bits
));
583 void init_cpu_present(const struct cpumask
*src
)
585 cpumask_copy(to_cpumask(cpu_present_bits
), src
);
588 void init_cpu_possible(const struct cpumask
*src
)
590 cpumask_copy(to_cpumask(cpu_possible_bits
), src
);
593 void init_cpu_online(const struct cpumask
*src
)
595 cpumask_copy(to_cpumask(cpu_online_bits
), src
);