e1000: For sanity, reformat e1000_set_mac_type(), struct e1000_hw[_stats]
[linux-2.6/verdex.git] / arch / ia64 / kernel / smp.c
blobf4c7f7769cf7df617bdb7cecefd5be4802bb840e
1 /*
2 * SMP Support
4 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
5 * Copyright (C) 1999, 2001, 2003 David Mosberger-Tang <davidm@hpl.hp.com>
7 * Lots of stuff stolen from arch/alpha/kernel/smp.c
9 * 01/05/16 Rohit Seth <rohit.seth@intel.com> IA64-SMP functions. Reorganized
10 * the existing code (on the lines of x86 port).
11 * 00/09/11 David Mosberger <davidm@hpl.hp.com> Do loops_per_jiffy
12 * calibration on each CPU.
13 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> fixed logical processor id
14 * 00/03/31 Rohit Seth <rohit.seth@intel.com> Fixes for Bootstrap Processor
15 * & cpu_online_map now gets done here (instead of setup.c)
16 * 99/10/05 davidm Update to bring it in sync with new command-line processing
17 * scheme.
18 * 10/13/00 Goutham Rao <goutham.rao@intel.com> Updated smp_call_function and
19 * smp_call_function_single to resend IPI on timeouts
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/smp.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/mm.h>
29 #include <linux/cache.h>
30 #include <linux/delay.h>
31 #include <linux/efi.h>
32 #include <linux/bitops.h>
33 #include <linux/kexec.h>
35 #include <asm/atomic.h>
36 #include <asm/current.h>
37 #include <asm/delay.h>
38 #include <asm/machvec.h>
39 #include <asm/io.h>
40 #include <asm/irq.h>
41 #include <asm/page.h>
42 #include <asm/pgalloc.h>
43 #include <asm/pgtable.h>
44 #include <asm/processor.h>
45 #include <asm/ptrace.h>
46 #include <asm/sal.h>
47 #include <asm/system.h>
48 #include <asm/tlbflush.h>
49 #include <asm/unistd.h>
50 #include <asm/mca.h>
53 * Structure and data for smp_call_function(). This is designed to minimise static memory
54 * requirements. It also looks cleaner.
56 static __cacheline_aligned DEFINE_SPINLOCK(call_lock);
58 struct call_data_struct {
59 void (*func) (void *info);
60 void *info;
61 long wait;
62 atomic_t started;
63 atomic_t finished;
66 static volatile struct call_data_struct *call_data;
68 #define IPI_CALL_FUNC 0
69 #define IPI_CPU_STOP 1
70 #define IPI_KDUMP_CPU_STOP 3
72 /* This needs to be cacheline aligned because it is written to by *other* CPUs. */
73 static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
75 extern void cpu_halt (void);
77 void
78 lock_ipi_calllock(void)
80 spin_lock_irq(&call_lock);
83 void
84 unlock_ipi_calllock(void)
86 spin_unlock_irq(&call_lock);
89 static void
90 stop_this_cpu (void)
93 * Remove this CPU:
95 cpu_clear(smp_processor_id(), cpu_online_map);
96 max_xtp();
97 local_irq_disable();
98 cpu_halt();
101 void
102 cpu_die(void)
104 max_xtp();
105 local_irq_disable();
106 cpu_halt();
107 /* Should never be here */
108 BUG();
109 for (;;);
112 irqreturn_t
113 handle_IPI (int irq, void *dev_id)
115 int this_cpu = get_cpu();
116 unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
117 unsigned long ops;
119 mb(); /* Order interrupt and bit testing. */
120 while ((ops = xchg(pending_ipis, 0)) != 0) {
121 mb(); /* Order bit clearing and data access. */
122 do {
123 unsigned long which;
125 which = ffz(~ops);
126 ops &= ~(1 << which);
128 switch (which) {
129 case IPI_CALL_FUNC:
131 struct call_data_struct *data;
132 void (*func)(void *info);
133 void *info;
134 int wait;
136 /* release the 'pointer lock' */
137 data = (struct call_data_struct *) call_data;
138 func = data->func;
139 info = data->info;
140 wait = data->wait;
142 mb();
143 atomic_inc(&data->started);
145 * At this point the structure may be gone unless
146 * wait is true.
148 (*func)(info);
150 /* Notify the sending CPU that the task is done. */
151 mb();
152 if (wait)
153 atomic_inc(&data->finished);
155 break;
157 case IPI_CPU_STOP:
158 stop_this_cpu();
159 break;
160 #ifdef CONFIG_KEXEC
161 case IPI_KDUMP_CPU_STOP:
162 unw_init_running(kdump_cpu_freeze, NULL);
163 break;
164 #endif
165 default:
166 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
167 break;
169 } while (ops);
170 mb(); /* Order data access and bit testing. */
172 put_cpu();
173 return IRQ_HANDLED;
177 * Called with preeemption disabled.
179 static inline void
180 send_IPI_single (int dest_cpu, int op)
182 set_bit(op, &per_cpu(ipi_operation, dest_cpu));
183 platform_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0);
187 * Called with preeemption disabled.
189 static inline void
190 send_IPI_allbutself (int op)
192 unsigned int i;
194 for_each_online_cpu(i) {
195 if (i != smp_processor_id())
196 send_IPI_single(i, op);
201 * Called with preeemption disabled.
203 static inline void
204 send_IPI_all (int op)
206 int i;
208 for_each_online_cpu(i) {
209 send_IPI_single(i, op);
214 * Called with preeemption disabled.
216 static inline void
217 send_IPI_self (int op)
219 send_IPI_single(smp_processor_id(), op);
222 #ifdef CONFIG_KEXEC
223 void
224 kdump_smp_send_stop()
226 send_IPI_allbutself(IPI_KDUMP_CPU_STOP);
229 void
230 kdump_smp_send_init()
232 unsigned int cpu, self_cpu;
233 self_cpu = smp_processor_id();
234 for_each_online_cpu(cpu) {
235 if (cpu != self_cpu) {
236 if(kdump_status[cpu] == 0)
237 platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0);
241 #endif
243 * Called with preeemption disabled.
245 void
246 smp_send_reschedule (int cpu)
248 platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
251 void
252 smp_flush_tlb_all (void)
254 on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
257 void
258 smp_flush_tlb_mm (struct mm_struct *mm)
260 preempt_disable();
261 /* this happens for the common case of a single-threaded fork(): */
262 if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
264 local_finish_flush_tlb_mm(mm);
265 preempt_enable();
266 return;
269 preempt_enable();
271 * We could optimize this further by using mm->cpu_vm_mask to track which CPUs
272 * have been running in the address space. It's not clear that this is worth the
273 * trouble though: to avoid races, we have to raise the IPI on the target CPU
274 * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
275 * rather trivial.
277 on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
281 * Run a function on another CPU
282 * <func> The function to run. This must be fast and non-blocking.
283 * <info> An arbitrary pointer to pass to the function.
284 * <nonatomic> Currently unused.
285 * <wait> If true, wait until function has completed on other CPUs.
286 * [RETURNS] 0 on success, else a negative status code.
288 * Does not return until the remote CPU is nearly ready to execute <func>
289 * or is or has executed.
293 smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic,
294 int wait)
296 struct call_data_struct data;
297 int cpus = 1;
298 int me = get_cpu(); /* prevent preemption and reschedule on another processor */
300 if (cpuid == me) {
301 printk(KERN_INFO "%s: trying to call self\n", __FUNCTION__);
302 put_cpu();
303 return -EBUSY;
306 data.func = func;
307 data.info = info;
308 atomic_set(&data.started, 0);
309 data.wait = wait;
310 if (wait)
311 atomic_set(&data.finished, 0);
313 spin_lock_bh(&call_lock);
315 call_data = &data;
316 mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
317 send_IPI_single(cpuid, IPI_CALL_FUNC);
319 /* Wait for response */
320 while (atomic_read(&data.started) != cpus)
321 cpu_relax();
323 if (wait)
324 while (atomic_read(&data.finished) != cpus)
325 cpu_relax();
326 call_data = NULL;
328 spin_unlock_bh(&call_lock);
329 put_cpu();
330 return 0;
332 EXPORT_SYMBOL(smp_call_function_single);
335 * this function sends a 'generic call function' IPI to all other CPUs
336 * in the system.
340 * [SUMMARY] Run a function on all other CPUs.
341 * <func> The function to run. This must be fast and non-blocking.
342 * <info> An arbitrary pointer to pass to the function.
343 * <nonatomic> currently unused.
344 * <wait> If true, wait (atomically) until function has completed on other CPUs.
345 * [RETURNS] 0 on success, else a negative status code.
347 * Does not return until remote CPUs are nearly ready to execute <func> or are or have
348 * executed.
350 * You must not call this function with disabled interrupts or from a
351 * hardware interrupt handler or from a bottom half handler.
354 smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
356 struct call_data_struct data;
357 int cpus;
359 spin_lock(&call_lock);
360 cpus = num_online_cpus() - 1;
361 if (!cpus) {
362 spin_unlock(&call_lock);
363 return 0;
366 /* Can deadlock when called with interrupts disabled */
367 WARN_ON(irqs_disabled());
369 data.func = func;
370 data.info = info;
371 atomic_set(&data.started, 0);
372 data.wait = wait;
373 if (wait)
374 atomic_set(&data.finished, 0);
376 call_data = &data;
377 mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
378 send_IPI_allbutself(IPI_CALL_FUNC);
380 /* Wait for response */
381 while (atomic_read(&data.started) != cpus)
382 cpu_relax();
384 if (wait)
385 while (atomic_read(&data.finished) != cpus)
386 cpu_relax();
387 call_data = NULL;
389 spin_unlock(&call_lock);
390 return 0;
392 EXPORT_SYMBOL(smp_call_function);
395 * this function calls the 'stop' function on all other CPUs in the system.
397 void
398 smp_send_stop (void)
400 send_IPI_allbutself(IPI_CPU_STOP);
403 int __init
404 setup_profiling_timer (unsigned int multiplier)
406 return -EINVAL;