drm: add in-kernel entry points for rest of AGP ioctls
[linux/fpc-iii.git] / kernel / cpu.c
blobd61ba88f34e57b90edae90342ed1db59ebfbb59c
1 /* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
5 */
6 #include <linux/proc_fs.h>
7 #include <linux/smp.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/kthread.h>
15 #include <linux/stop_machine.h>
16 #include <asm/semaphore.h>
18 /* This protects CPUs going up and down... */
19 DECLARE_MUTEX(cpucontrol);
20 EXPORT_SYMBOL_GPL(cpucontrol);
22 static struct notifier_block *cpu_chain;
25 * Used to check by callers if they need to acquire the cpucontrol
26 * or not to protect a cpu from being removed. Its sometimes required to
27 * call these functions both for normal operations, and in response to
28 * a cpu being added/removed. If the context of the call is in the same
29 * thread context as a CPU hotplug thread, we dont need to take the lock
30 * since its already protected
31 * check drivers/cpufreq/cpufreq.c for its usage - Ashok Raj
34 int current_in_cpu_hotplug(void)
36 return (current->flags & PF_HOTPLUG_CPU);
39 EXPORT_SYMBOL_GPL(current_in_cpu_hotplug);
42 /* Need to know about CPUs going up/down? */
43 int register_cpu_notifier(struct notifier_block *nb)
45 int ret;
47 if ((ret = down_interruptible(&cpucontrol)) != 0)
48 return ret;
49 ret = notifier_chain_register(&cpu_chain, nb);
50 up(&cpucontrol);
51 return ret;
53 EXPORT_SYMBOL(register_cpu_notifier);
55 void unregister_cpu_notifier(struct notifier_block *nb)
57 down(&cpucontrol);
58 notifier_chain_unregister(&cpu_chain, nb);
59 up(&cpucontrol);
61 EXPORT_SYMBOL(unregister_cpu_notifier);
63 #ifdef CONFIG_HOTPLUG_CPU
64 static inline void check_for_tasks(int cpu)
66 struct task_struct *p;
68 write_lock_irq(&tasklist_lock);
69 for_each_process(p) {
70 if (task_cpu(p) == cpu &&
71 (!cputime_eq(p->utime, cputime_zero) ||
72 !cputime_eq(p->stime, cputime_zero)))
73 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
74 (state = %ld, flags = %lx) \n",
75 p->comm, p->pid, cpu, p->state, p->flags);
77 write_unlock_irq(&tasklist_lock);
80 /* Take this CPU down. */
81 static int take_cpu_down(void *unused)
83 int err;
85 /* Ensure this CPU doesn't handle any more interrupts. */
86 err = __cpu_disable();
87 if (err < 0)
88 return err;
90 /* Force idle task to run as soon as we yield: it should
91 immediately notice cpu is offline and die quickly. */
92 sched_idle_next();
93 return 0;
96 int cpu_down(unsigned int cpu)
98 int err;
99 struct task_struct *p;
100 cpumask_t old_allowed, tmp;
102 if ((err = lock_cpu_hotplug_interruptible()) != 0)
103 return err;
105 if (num_online_cpus() == 1) {
106 err = -EBUSY;
107 goto out;
110 if (!cpu_online(cpu)) {
111 err = -EINVAL;
112 goto out;
116 * Leave a trace in current->flags indicating we are already in
117 * process of performing CPU hotplug. Callers can check if cpucontrol
118 * is already acquired by current thread, and if so not cause
119 * a dead lock by not acquiring the lock
121 current->flags |= PF_HOTPLUG_CPU;
122 err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
123 (void *)(long)cpu);
124 if (err == NOTIFY_BAD) {
125 printk("%s: attempt to take down CPU %u failed\n",
126 __FUNCTION__, cpu);
127 err = -EINVAL;
128 goto out;
131 /* Ensure that we are not runnable on dying cpu */
132 old_allowed = current->cpus_allowed;
133 tmp = CPU_MASK_ALL;
134 cpu_clear(cpu, tmp);
135 set_cpus_allowed(current, tmp);
137 p = __stop_machine_run(take_cpu_down, NULL, cpu);
138 if (IS_ERR(p)) {
139 /* CPU didn't die: tell everyone. Can't complain. */
140 if (notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
141 (void *)(long)cpu) == NOTIFY_BAD)
142 BUG();
144 err = PTR_ERR(p);
145 goto out_allowed;
148 if (cpu_online(cpu))
149 goto out_thread;
151 /* Wait for it to sleep (leaving idle task). */
152 while (!idle_cpu(cpu))
153 yield();
155 /* This actually kills the CPU. */
156 __cpu_die(cpu);
158 /* Move it here so it can run. */
159 kthread_bind(p, get_cpu());
160 put_cpu();
162 /* CPU is completely dead: tell everyone. Too late to complain. */
163 if (notifier_call_chain(&cpu_chain, CPU_DEAD, (void *)(long)cpu)
164 == NOTIFY_BAD)
165 BUG();
167 check_for_tasks(cpu);
169 out_thread:
170 err = kthread_stop(p);
171 out_allowed:
172 set_cpus_allowed(current, old_allowed);
173 out:
174 current->flags &= ~PF_HOTPLUG_CPU;
175 unlock_cpu_hotplug();
176 return err;
178 #endif /*CONFIG_HOTPLUG_CPU*/
180 int __devinit cpu_up(unsigned int cpu)
182 int ret;
183 void *hcpu = (void *)(long)cpu;
185 if ((ret = down_interruptible(&cpucontrol)) != 0)
186 return ret;
188 if (cpu_online(cpu) || !cpu_present(cpu)) {
189 ret = -EINVAL;
190 goto out;
194 * Leave a trace in current->flags indicating we are already in
195 * process of performing CPU hotplug.
197 current->flags |= PF_HOTPLUG_CPU;
198 ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
199 if (ret == NOTIFY_BAD) {
200 printk("%s: attempt to bring up CPU %u failed\n",
201 __FUNCTION__, cpu);
202 ret = -EINVAL;
203 goto out_notify;
206 /* Arch-specific enabling code. */
207 ret = __cpu_up(cpu);
208 if (ret != 0)
209 goto out_notify;
210 if (!cpu_online(cpu))
211 BUG();
213 /* Now call notifier in preparation. */
214 notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu);
216 out_notify:
217 if (ret != 0)
218 notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu);
219 out:
220 current->flags &= ~PF_HOTPLUG_CPU;
221 up(&cpucontrol);
222 return ret;