2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/spinlock.h>
27 #include <linux/threads.h>
28 #include <linux/module.h>
29 #include <linux/time.h>
30 #include <linux/timex.h>
31 #include <linux/sched.h>
32 #include <linux/cpumask.h>
33 #include <linux/cpu.h>
34 #include <linux/err.h>
35 #include <linux/ftrace.h>
37 #include <asm/atomic.h>
39 #include <asm/processor.h>
40 #include <asm/r4k-timer.h>
41 #include <asm/system.h>
42 #include <asm/mmu_context.h>
45 #ifdef CONFIG_MIPS_MT_SMTC
46 #include <asm/mipsmtregs.h>
47 #endif /* CONFIG_MIPS_MT_SMTC */
49 volatile cpumask_t cpu_callin_map
; /* Bitmask of started secondaries */
51 int __cpu_number_map
[NR_CPUS
]; /* Map physical to logical */
52 EXPORT_SYMBOL(__cpu_number_map
);
54 int __cpu_logical_map
[NR_CPUS
]; /* Map logical to physical */
55 EXPORT_SYMBOL(__cpu_logical_map
);
57 /* Number of TCs (or siblings in Intel speak) per CPU core */
58 int smp_num_siblings
= 1;
59 EXPORT_SYMBOL(smp_num_siblings
);
61 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
62 cpumask_t cpu_sibling_map
[NR_CPUS
] __read_mostly
;
63 EXPORT_SYMBOL(cpu_sibling_map
);
65 /* representing cpus for which sibling maps can be computed */
66 static cpumask_t cpu_sibling_setup_map
;
68 static inline void set_cpu_sibling_map(int cpu
)
72 cpu_set(cpu
, cpu_sibling_setup_map
);
74 if (smp_num_siblings
> 1) {
75 for_each_cpu_mask(i
, cpu_sibling_setup_map
) {
76 if (cpu_data
[cpu
].core
== cpu_data
[i
].core
) {
77 cpu_set(i
, cpu_sibling_map
[cpu
]);
78 cpu_set(cpu
, cpu_sibling_map
[i
]);
82 cpu_set(cpu
, cpu_sibling_map
[cpu
]);
85 struct plat_smp_ops
*mp_ops
;
87 __cpuinit
void register_smp_ops(struct plat_smp_ops
*ops
)
90 printk(KERN_WARNING
"Overriding previously set SMP ops\n");
96 * First C code run on the secondary CPUs after being started up by
99 asmlinkage __cpuinit
void start_secondary(void)
103 #ifdef CONFIG_MIPS_MT_SMTC
104 /* Only do cpu_probe for first TC of CPU */
105 if ((read_c0_tcbind() & TCBIND_CURTC
) == 0)
106 #endif /* CONFIG_MIPS_MT_SMTC */
110 mips_clockevent_init();
111 mp_ops
->init_secondary();
114 * XXX parity protection should be folded in here when it's converted
115 * to an option instead of something based on .cputype
120 cpu
= smp_processor_id();
121 cpu_data
[cpu
].udelay_val
= loops_per_jiffy
;
123 notify_cpu_starting(cpu
);
125 mp_ops
->smp_finish();
126 set_cpu_sibling_map(cpu
);
128 cpu_set(cpu
, cpu_callin_map
);
130 synchronise_count_slave();
136 * Call into both interrupt handlers, as we share the IPI for them
138 void __irq_entry
smp_call_function_interrupt(void)
141 generic_smp_call_function_single_interrupt();
142 generic_smp_call_function_interrupt();
146 static void stop_this_cpu(void *dummy
)
151 cpu_clear(smp_processor_id(), cpu_online_map
);
154 (*cpu_wait
)(); /* Wait if available. */
158 void smp_send_stop(void)
160 smp_call_function(stop_this_cpu
, NULL
, 0);
163 void __init
smp_cpus_done(unsigned int max_cpus
)
166 synchronise_count_master();
169 /* called from main before smp_init() */
170 void __init
smp_prepare_cpus(unsigned int max_cpus
)
172 init_new_context(current
, &init_mm
);
173 current_thread_info()->cpu
= 0;
174 mp_ops
->prepare_cpus(max_cpus
);
175 set_cpu_sibling_map(0);
176 #ifndef CONFIG_HOTPLUG_CPU
177 init_cpu_present(&cpu_possible_map
);
181 /* preload SMP state for boot cpu */
182 void __devinit
smp_prepare_boot_cpu(void)
184 set_cpu_possible(0, true);
185 set_cpu_online(0, true);
186 cpu_set(0, cpu_callin_map
);
190 * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu
191 * and keep control until "cpu_online(cpu)" is set. Note: cpu is
192 * physical, not logical.
194 static struct task_struct
*cpu_idle_thread
[NR_CPUS
];
197 struct work_struct work
;
198 struct task_struct
*idle
;
199 struct completion done
;
203 static void __cpuinit
do_fork_idle(struct work_struct
*work
)
205 struct create_idle
*c_idle
=
206 container_of(work
, struct create_idle
, work
);
208 c_idle
->idle
= fork_idle(c_idle
->cpu
);
209 complete(&c_idle
->done
);
212 int __cpuinit
__cpu_up(unsigned int cpu
)
214 struct task_struct
*idle
;
217 * Processor goes to start_secondary(), sets online flag
218 * The following code is purely to make sure
219 * Linux can schedule processes on this slave.
221 if (!cpu_idle_thread
[cpu
]) {
223 * Schedule work item to avoid forking user task
224 * Ported from arch/x86/kernel/smpboot.c
226 struct create_idle c_idle
= {
228 .done
= COMPLETION_INITIALIZER_ONSTACK(c_idle
.done
),
231 INIT_WORK_ONSTACK(&c_idle
.work
, do_fork_idle
);
232 schedule_work(&c_idle
.work
);
233 wait_for_completion(&c_idle
.done
);
234 idle
= cpu_idle_thread
[cpu
] = c_idle
.idle
;
237 panic(KERN_ERR
"Fork failed for CPU %d", cpu
);
239 idle
= cpu_idle_thread
[cpu
];
240 init_idle(idle
, cpu
);
243 mp_ops
->boot_secondary(cpu
, idle
);
246 * Trust is futile. We should really have timeouts ...
248 while (!cpu_isset(cpu
, cpu_callin_map
))
251 cpu_set(cpu
, cpu_online_map
);
256 /* Not really SMP stuff ... */
257 int setup_profiling_timer(unsigned int multiplier
)
262 static void flush_tlb_all_ipi(void *info
)
264 local_flush_tlb_all();
267 void flush_tlb_all(void)
269 on_each_cpu(flush_tlb_all_ipi
, NULL
, 1);
272 static void flush_tlb_mm_ipi(void *mm
)
274 local_flush_tlb_mm((struct mm_struct
*)mm
);
278 * Special Variant of smp_call_function for use by TLB functions:
281 * o collapses to normal function call on UP kernels
282 * o collapses to normal function call on systems with a single shared
284 * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core.
286 static inline void smp_on_other_tlbs(void (*func
) (void *info
), void *info
)
288 #ifndef CONFIG_MIPS_MT_SMTC
289 smp_call_function(func
, info
, 1);
293 static inline void smp_on_each_tlb(void (*func
) (void *info
), void *info
)
297 smp_on_other_tlbs(func
, info
);
304 * The following tlb flush calls are invoked when old translations are
305 * being torn down, or pte attributes are changing. For single threaded
306 * address spaces, a new context is obtained on the current cpu, and tlb
307 * context on other cpus are invalidated to force a new context allocation
308 * at switch_mm time, should the mm ever be used on other cpus. For
309 * multithreaded address spaces, intercpu interrupts have to be sent.
310 * Another case where intercpu interrupts are required is when the target
311 * mm might be active on another cpu (eg debuggers doing the flushes on
312 * behalf of debugees, kswapd stealing pages from another process etc).
316 void flush_tlb_mm(struct mm_struct
*mm
)
320 if ((atomic_read(&mm
->mm_users
) != 1) || (current
->mm
!= mm
)) {
321 smp_on_other_tlbs(flush_tlb_mm_ipi
, mm
);
323 cpumask_t mask
= cpu_online_map
;
326 cpu_clear(smp_processor_id(), mask
);
327 for_each_cpu_mask(cpu
, mask
)
328 if (cpu_context(cpu
, mm
))
329 cpu_context(cpu
, mm
) = 0;
331 local_flush_tlb_mm(mm
);
336 struct flush_tlb_data
{
337 struct vm_area_struct
*vma
;
342 static void flush_tlb_range_ipi(void *info
)
344 struct flush_tlb_data
*fd
= info
;
346 local_flush_tlb_range(fd
->vma
, fd
->addr1
, fd
->addr2
);
349 void flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
351 struct mm_struct
*mm
= vma
->vm_mm
;
354 if ((atomic_read(&mm
->mm_users
) != 1) || (current
->mm
!= mm
)) {
355 struct flush_tlb_data fd
= {
361 smp_on_other_tlbs(flush_tlb_range_ipi
, &fd
);
363 cpumask_t mask
= cpu_online_map
;
366 cpu_clear(smp_processor_id(), mask
);
367 for_each_cpu_mask(cpu
, mask
)
368 if (cpu_context(cpu
, mm
))
369 cpu_context(cpu
, mm
) = 0;
371 local_flush_tlb_range(vma
, start
, end
);
375 static void flush_tlb_kernel_range_ipi(void *info
)
377 struct flush_tlb_data
*fd
= info
;
379 local_flush_tlb_kernel_range(fd
->addr1
, fd
->addr2
);
382 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
384 struct flush_tlb_data fd
= {
389 on_each_cpu(flush_tlb_kernel_range_ipi
, &fd
, 1);
392 static void flush_tlb_page_ipi(void *info
)
394 struct flush_tlb_data
*fd
= info
;
396 local_flush_tlb_page(fd
->vma
, fd
->addr1
);
399 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
402 if ((atomic_read(&vma
->vm_mm
->mm_users
) != 1) || (current
->mm
!= vma
->vm_mm
)) {
403 struct flush_tlb_data fd
= {
408 smp_on_other_tlbs(flush_tlb_page_ipi
, &fd
);
410 cpumask_t mask
= cpu_online_map
;
413 cpu_clear(smp_processor_id(), mask
);
414 for_each_cpu_mask(cpu
, mask
)
415 if (cpu_context(cpu
, vma
->vm_mm
))
416 cpu_context(cpu
, vma
->vm_mm
) = 0;
418 local_flush_tlb_page(vma
, page
);
422 static void flush_tlb_one_ipi(void *info
)
424 unsigned long vaddr
= (unsigned long) info
;
426 local_flush_tlb_one(vaddr
);
429 void flush_tlb_one(unsigned long vaddr
)
431 smp_on_each_tlb(flush_tlb_one_ipi
, (void *) vaddr
);
434 EXPORT_SYMBOL(flush_tlb_page
);
435 EXPORT_SYMBOL(flush_tlb_one
);