[SCSI] ipr: Add new PCI-E IDs to device table
[linux/fpc-iii.git] / arch / mips / kernel / smp.c
blobc46e479c992b4dae0f620fb3c0399e5674089b39
1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/spinlock.h>
26 #include <linux/threads.h>
27 #include <linux/module.h>
28 #include <linux/time.h>
29 #include <linux/timex.h>
30 #include <linux/sched.h>
31 #include <linux/cpumask.h>
32 #include <linux/cpu.h>
34 #include <asm/atomic.h>
35 #include <asm/cpu.h>
36 #include <asm/processor.h>
37 #include <asm/system.h>
38 #include <asm/mmu_context.h>
39 #include <asm/smp.h>
41 #ifdef CONFIG_MIPS_MT_SMTC
42 #include <asm/mipsmtregs.h>
43 #endif /* CONFIG_MIPS_MT_SMTC */
45 cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */
46 volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
47 cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */
48 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
49 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
51 EXPORT_SYMBOL(phys_cpu_present_map);
52 EXPORT_SYMBOL(cpu_online_map);
54 /* This happens early in bootup, can't really do it better */
55 static void smp_tune_scheduling (void)
57 struct cache_desc *cd = &current_cpu_data.scache;
58 unsigned long cachesize = cd->linesz * cd->sets * cd->ways;
60 if (cachesize > max_cache_size)
61 max_cache_size = cachesize;
64 extern void __init calibrate_delay(void);
65 extern ATTRIB_NORET void cpu_idle(void);
68 * First C code run on the secondary CPUs after being started up by
69 * the master.
71 asmlinkage void start_secondary(void)
73 unsigned int cpu;
75 #ifdef CONFIG_MIPS_MT_SMTC
76 /* Only do cpu_probe for first TC of CPU */
77 if ((read_c0_tcbind() & TCBIND_CURTC) == 0)
78 #endif /* CONFIG_MIPS_MT_SMTC */
79 cpu_probe();
80 cpu_report();
81 per_cpu_trap_init();
82 prom_init_secondary();
85 * XXX parity protection should be folded in here when it's converted
86 * to an option instead of something based on .cputype
89 calibrate_delay();
90 preempt_disable();
91 cpu = smp_processor_id();
92 cpu_data[cpu].udelay_val = loops_per_jiffy;
94 prom_smp_finish();
96 cpu_set(cpu, cpu_callin_map);
98 cpu_idle();
101 DEFINE_SPINLOCK(smp_call_lock);
103 struct call_data_struct *call_data;
106 * Run a function on all other CPUs.
107 * <func> The function to run. This must be fast and non-blocking.
108 * <info> An arbitrary pointer to pass to the function.
109 * <retry> If true, keep retrying until ready.
110 * <wait> If true, wait until function has completed on other CPUs.
111 * [RETURNS] 0 on success, else a negative status code.
113 * Does not return until remote CPUs are nearly ready to execute <func>
114 * or are or have executed.
116 * You must not call this function with disabled interrupts or from a
117 * hardware interrupt handler or from a bottom half handler:
119 * CPU A CPU B
120 * Disable interrupts
121 * smp_call_function()
122 * Take call_lock
123 * Send IPIs
124 * Wait for all cpus to acknowledge IPI
125 * CPU A has not responded, spin waiting
126 * for cpu A to respond, holding call_lock
127 * smp_call_function()
128 * Spin waiting for call_lock
129 * Deadlock Deadlock
131 int smp_call_function (void (*func) (void *info), void *info, int retry,
132 int wait)
134 struct call_data_struct data;
135 int i, cpus = num_online_cpus() - 1;
136 int cpu = smp_processor_id();
139 * Can die spectacularly if this CPU isn't yet marked online
141 BUG_ON(!cpu_online(cpu));
143 if (!cpus)
144 return 0;
146 /* Can deadlock when called with interrupts disabled */
147 WARN_ON(irqs_disabled());
149 data.func = func;
150 data.info = info;
151 atomic_set(&data.started, 0);
152 data.wait = wait;
153 if (wait)
154 atomic_set(&data.finished, 0);
156 spin_lock(&smp_call_lock);
157 call_data = &data;
158 smp_mb();
160 /* Send a message to all other CPUs and wait for them to respond */
161 for_each_online_cpu(i)
162 if (i != cpu)
163 core_send_ipi(i, SMP_CALL_FUNCTION);
165 /* Wait for response */
166 /* FIXME: lock-up detection, backtrace on lock-up */
167 while (atomic_read(&data.started) != cpus)
168 barrier();
170 if (wait)
171 while (atomic_read(&data.finished) != cpus)
172 barrier();
173 call_data = NULL;
174 spin_unlock(&smp_call_lock);
176 return 0;
180 void smp_call_function_interrupt(void)
182 void (*func) (void *info) = call_data->func;
183 void *info = call_data->info;
184 int wait = call_data->wait;
187 * Notify initiating CPU that I've grabbed the data and am
188 * about to execute the function.
190 smp_mb();
191 atomic_inc(&call_data->started);
194 * At this point the info structure may be out of scope unless wait==1.
196 irq_enter();
197 (*func)(info);
198 irq_exit();
200 if (wait) {
201 smp_mb();
202 atomic_inc(&call_data->finished);
206 static void stop_this_cpu(void *dummy)
209 * Remove this CPU:
211 cpu_clear(smp_processor_id(), cpu_online_map);
212 local_irq_enable(); /* May need to service _machine_restart IPI */
213 for (;;); /* Wait if available. */
216 void smp_send_stop(void)
218 smp_call_function(stop_this_cpu, NULL, 1, 0);
221 void __init smp_cpus_done(unsigned int max_cpus)
223 prom_cpus_done();
226 /* called from main before smp_init() */
227 void __init smp_prepare_cpus(unsigned int max_cpus)
229 init_new_context(current, &init_mm);
230 current_thread_info()->cpu = 0;
231 smp_tune_scheduling();
232 plat_prepare_cpus(max_cpus);
233 #ifndef CONFIG_HOTPLUG_CPU
234 cpu_present_map = cpu_possible_map;
235 #endif
238 /* preload SMP state for boot cpu */
239 void __devinit smp_prepare_boot_cpu(void)
242 * This assumes that bootup is always handled by the processor
243 * with the logic and physical number 0.
245 __cpu_number_map[0] = 0;
246 __cpu_logical_map[0] = 0;
247 cpu_set(0, phys_cpu_present_map);
248 cpu_set(0, cpu_online_map);
249 cpu_set(0, cpu_callin_map);
253 * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu
254 * and keep control until "cpu_online(cpu)" is set. Note: cpu is
255 * physical, not logical.
257 int __cpuinit __cpu_up(unsigned int cpu)
259 struct task_struct *idle;
262 * Processor goes to start_secondary(), sets online flag
263 * The following code is purely to make sure
264 * Linux can schedule processes on this slave.
266 idle = fork_idle(cpu);
267 if (IS_ERR(idle))
268 panic(KERN_ERR "Fork failed for CPU %d", cpu);
270 prom_boot_secondary(cpu, idle);
273 * Trust is futile. We should really have timeouts ...
275 while (!cpu_isset(cpu, cpu_callin_map))
276 udelay(100);
278 cpu_set(cpu, cpu_online_map);
280 return 0;
283 /* Not really SMP stuff ... */
284 int setup_profiling_timer(unsigned int multiplier)
286 return 0;
289 static void flush_tlb_all_ipi(void *info)
291 local_flush_tlb_all();
294 void flush_tlb_all(void)
296 on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1);
299 static void flush_tlb_mm_ipi(void *mm)
301 local_flush_tlb_mm((struct mm_struct *)mm);
305 * Special Variant of smp_call_function for use by TLB functions:
307 * o No return value
308 * o collapses to normal function call on UP kernels
309 * o collapses to normal function call on systems with a single shared
310 * primary cache.
311 * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core.
313 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
315 #ifndef CONFIG_MIPS_MT_SMTC
316 smp_call_function(func, info, 1, 1);
317 #endif
320 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
322 preempt_disable();
324 smp_on_other_tlbs(func, info);
325 func(info);
327 preempt_enable();
331 * The following tlb flush calls are invoked when old translations are
332 * being torn down, or pte attributes are changing. For single threaded
333 * address spaces, a new context is obtained on the current cpu, and tlb
334 * context on other cpus are invalidated to force a new context allocation
335 * at switch_mm time, should the mm ever be used on other cpus. For
336 * multithreaded address spaces, intercpu interrupts have to be sent.
337 * Another case where intercpu interrupts are required is when the target
338 * mm might be active on another cpu (eg debuggers doing the flushes on
339 * behalf of debugees, kswapd stealing pages from another process etc).
340 * Kanoj 07/00.
343 void flush_tlb_mm(struct mm_struct *mm)
345 preempt_disable();
347 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
348 smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm);
349 } else {
350 int i;
351 for (i = 0; i < num_online_cpus(); i++)
352 if (smp_processor_id() != i)
353 cpu_context(i, mm) = 0;
355 local_flush_tlb_mm(mm);
357 preempt_enable();
360 struct flush_tlb_data {
361 struct vm_area_struct *vma;
362 unsigned long addr1;
363 unsigned long addr2;
366 static void flush_tlb_range_ipi(void *info)
368 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
370 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
373 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
375 struct mm_struct *mm = vma->vm_mm;
377 preempt_disable();
378 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
379 struct flush_tlb_data fd;
381 fd.vma = vma;
382 fd.addr1 = start;
383 fd.addr2 = end;
384 smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd);
385 } else {
386 int i;
387 for (i = 0; i < num_online_cpus(); i++)
388 if (smp_processor_id() != i)
389 cpu_context(i, mm) = 0;
391 local_flush_tlb_range(vma, start, end);
392 preempt_enable();
395 static void flush_tlb_kernel_range_ipi(void *info)
397 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
399 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
402 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
404 struct flush_tlb_data fd;
406 fd.addr1 = start;
407 fd.addr2 = end;
408 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1);
411 static void flush_tlb_page_ipi(void *info)
413 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
415 local_flush_tlb_page(fd->vma, fd->addr1);
418 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
420 preempt_disable();
421 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
422 struct flush_tlb_data fd;
424 fd.vma = vma;
425 fd.addr1 = page;
426 smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd);
427 } else {
428 int i;
429 for (i = 0; i < num_online_cpus(); i++)
430 if (smp_processor_id() != i)
431 cpu_context(i, vma->vm_mm) = 0;
433 local_flush_tlb_page(vma, page);
434 preempt_enable();
437 static void flush_tlb_one_ipi(void *info)
439 unsigned long vaddr = (unsigned long) info;
441 local_flush_tlb_one(vaddr);
444 void flush_tlb_one(unsigned long vaddr)
446 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
449 EXPORT_SYMBOL(flush_tlb_page);
450 EXPORT_SYMBOL(flush_tlb_one);