2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/spinlock.h>
27 #include <linux/threads.h>
28 #include <linux/module.h>
29 #include <linux/time.h>
30 #include <linux/timex.h>
31 #include <linux/sched.h>
32 #include <linux/cpumask.h>
33 #include <linux/cpu.h>
34 #include <linux/err.h>
35 #include <linux/ftrace.h>
37 #include <linux/atomic.h>
39 #include <asm/processor.h>
41 #include <asm/r4k-timer.h>
42 #include <asm/mmu_context.h>
44 #include <asm/setup.h>
46 cpumask_t cpu_callin_map
; /* Bitmask of started secondaries */
48 int __cpu_number_map
[NR_CPUS
]; /* Map physical to logical */
49 EXPORT_SYMBOL(__cpu_number_map
);
51 int __cpu_logical_map
[NR_CPUS
]; /* Map logical to physical */
52 EXPORT_SYMBOL(__cpu_logical_map
);
54 /* Number of TCs (or siblings in Intel speak) per CPU core */
55 int smp_num_siblings
= 1;
56 EXPORT_SYMBOL(smp_num_siblings
);
58 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
59 cpumask_t cpu_sibling_map
[NR_CPUS
] __read_mostly
;
60 EXPORT_SYMBOL(cpu_sibling_map
);
62 /* representing the core map of multi-core chips of each logical CPU */
63 cpumask_t cpu_core_map
[NR_CPUS
] __read_mostly
;
64 EXPORT_SYMBOL(cpu_core_map
);
67 * A logcal cpu mask containing only one VPE per core to
68 * reduce the number of IPIs on large MT systems.
70 cpumask_t cpu_foreign_map __read_mostly
;
71 EXPORT_SYMBOL(cpu_foreign_map
);
73 /* representing cpus for which sibling maps can be computed */
74 static cpumask_t cpu_sibling_setup_map
;
76 /* representing cpus for which core maps can be computed */
77 static cpumask_t cpu_core_setup_map
;
79 cpumask_t cpu_coherent_mask
;
81 static inline void set_cpu_sibling_map(int cpu
)
85 cpumask_set_cpu(cpu
, &cpu_sibling_setup_map
);
87 if (smp_num_siblings
> 1) {
88 for_each_cpu(i
, &cpu_sibling_setup_map
) {
89 if (cpu_data
[cpu
].package
== cpu_data
[i
].package
&&
90 cpu_data
[cpu
].core
== cpu_data
[i
].core
) {
91 cpumask_set_cpu(i
, &cpu_sibling_map
[cpu
]);
92 cpumask_set_cpu(cpu
, &cpu_sibling_map
[i
]);
96 cpumask_set_cpu(cpu
, &cpu_sibling_map
[cpu
]);
99 static inline void set_cpu_core_map(int cpu
)
103 cpumask_set_cpu(cpu
, &cpu_core_setup_map
);
105 for_each_cpu(i
, &cpu_core_setup_map
) {
106 if (cpu_data
[cpu
].package
== cpu_data
[i
].package
) {
107 cpumask_set_cpu(i
, &cpu_core_map
[cpu
]);
108 cpumask_set_cpu(cpu
, &cpu_core_map
[i
]);
114 * Calculate a new cpu_foreign_map mask whenever a
115 * new cpu appears or disappears.
117 static inline void calculate_cpu_foreign_map(void)
119 int i
, k
, core_present
;
120 cpumask_t temp_foreign_map
;
122 /* Re-calculate the mask */
123 for_each_online_cpu(i
) {
125 for_each_cpu(k
, &temp_foreign_map
)
126 if (cpu_data
[i
].package
== cpu_data
[k
].package
&&
127 cpu_data
[i
].core
== cpu_data
[k
].core
)
130 cpumask_set_cpu(i
, &temp_foreign_map
);
133 cpumask_copy(&cpu_foreign_map
, &temp_foreign_map
);
136 struct plat_smp_ops
*mp_ops
;
137 EXPORT_SYMBOL(mp_ops
);
139 void register_smp_ops(struct plat_smp_ops
*ops
)
142 printk(KERN_WARNING
"Overriding previously set SMP ops\n");
148 * First C code run on the secondary CPUs after being started up by
151 asmlinkage
void start_secondary(void)
156 per_cpu_trap_init(false);
157 mips_clockevent_init();
158 mp_ops
->init_secondary();
162 * XXX parity protection should be folded in here when it's converted
163 * to an option instead of something based on .cputype
168 cpu
= smp_processor_id();
169 cpu_data
[cpu
].udelay_val
= loops_per_jiffy
;
171 cpumask_set_cpu(cpu
, &cpu_coherent_mask
);
172 notify_cpu_starting(cpu
);
174 set_cpu_online(cpu
, true);
176 set_cpu_sibling_map(cpu
);
177 set_cpu_core_map(cpu
);
179 calculate_cpu_foreign_map();
181 cpumask_set_cpu(cpu
, &cpu_callin_map
);
183 synchronise_count_slave(cpu
);
186 * irq will be enabled in ->smp_finish(), enabling it too early
189 WARN_ON_ONCE(!irqs_disabled());
190 mp_ops
->smp_finish();
192 cpu_startup_entry(CPUHP_ONLINE
);
195 static void stop_this_cpu(void *dummy
)
198 * Remove this CPU. Be a bit slow here and
199 * set the bits for every online CPU so we don't miss
200 * any IPI whilst taking this VPE down.
203 cpumask_copy(&cpu_foreign_map
, cpu_online_mask
);
205 /* Make it visible to every other CPU */
208 set_cpu_online(smp_processor_id(), false);
209 calculate_cpu_foreign_map();
214 void smp_send_stop(void)
216 smp_call_function(stop_this_cpu
, NULL
, 0);
219 void __init
smp_cpus_done(unsigned int max_cpus
)
223 /* called from main before smp_init() */
224 void __init
smp_prepare_cpus(unsigned int max_cpus
)
226 init_new_context(current
, &init_mm
);
227 current_thread_info()->cpu
= 0;
228 mp_ops
->prepare_cpus(max_cpus
);
229 set_cpu_sibling_map(0);
231 calculate_cpu_foreign_map();
232 #ifndef CONFIG_HOTPLUG_CPU
233 init_cpu_present(cpu_possible_mask
);
235 cpumask_copy(&cpu_coherent_mask
, cpu_possible_mask
);
238 /* preload SMP state for boot cpu */
239 void smp_prepare_boot_cpu(void)
241 set_cpu_possible(0, true);
242 set_cpu_online(0, true);
243 cpumask_set_cpu(0, &cpu_callin_map
);
246 int __cpu_up(unsigned int cpu
, struct task_struct
*tidle
)
248 mp_ops
->boot_secondary(cpu
, tidle
);
251 * Trust is futile. We should really have timeouts ...
253 while (!cpumask_test_cpu(cpu
, &cpu_callin_map
)) {
258 synchronise_count_master(cpu
);
262 /* Not really SMP stuff ... */
263 int setup_profiling_timer(unsigned int multiplier
)
268 static void flush_tlb_all_ipi(void *info
)
270 local_flush_tlb_all();
273 void flush_tlb_all(void)
275 on_each_cpu(flush_tlb_all_ipi
, NULL
, 1);
278 static void flush_tlb_mm_ipi(void *mm
)
280 local_flush_tlb_mm((struct mm_struct
*)mm
);
284 * Special Variant of smp_call_function for use by TLB functions:
287 * o collapses to normal function call on UP kernels
288 * o collapses to normal function call on systems with a single shared
291 static inline void smp_on_other_tlbs(void (*func
) (void *info
), void *info
)
293 smp_call_function(func
, info
, 1);
296 static inline void smp_on_each_tlb(void (*func
) (void *info
), void *info
)
300 smp_on_other_tlbs(func
, info
);
307 * The following tlb flush calls are invoked when old translations are
308 * being torn down, or pte attributes are changing. For single threaded
309 * address spaces, a new context is obtained on the current cpu, and tlb
310 * context on other cpus are invalidated to force a new context allocation
311 * at switch_mm time, should the mm ever be used on other cpus. For
312 * multithreaded address spaces, intercpu interrupts have to be sent.
313 * Another case where intercpu interrupts are required is when the target
314 * mm might be active on another cpu (eg debuggers doing the flushes on
315 * behalf of debugees, kswapd stealing pages from another process etc).
319 void flush_tlb_mm(struct mm_struct
*mm
)
323 if ((atomic_read(&mm
->mm_users
) != 1) || (current
->mm
!= mm
)) {
324 smp_on_other_tlbs(flush_tlb_mm_ipi
, mm
);
328 for_each_online_cpu(cpu
) {
329 if (cpu
!= smp_processor_id() && cpu_context(cpu
, mm
))
330 cpu_context(cpu
, mm
) = 0;
333 local_flush_tlb_mm(mm
);
338 struct flush_tlb_data
{
339 struct vm_area_struct
*vma
;
344 static void flush_tlb_range_ipi(void *info
)
346 struct flush_tlb_data
*fd
= info
;
348 local_flush_tlb_range(fd
->vma
, fd
->addr1
, fd
->addr2
);
351 void flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
353 struct mm_struct
*mm
= vma
->vm_mm
;
356 if ((atomic_read(&mm
->mm_users
) != 1) || (current
->mm
!= mm
)) {
357 struct flush_tlb_data fd
= {
363 smp_on_other_tlbs(flush_tlb_range_ipi
, &fd
);
367 for_each_online_cpu(cpu
) {
368 if (cpu
!= smp_processor_id() && cpu_context(cpu
, mm
))
369 cpu_context(cpu
, mm
) = 0;
372 local_flush_tlb_range(vma
, start
, end
);
376 static void flush_tlb_kernel_range_ipi(void *info
)
378 struct flush_tlb_data
*fd
= info
;
380 local_flush_tlb_kernel_range(fd
->addr1
, fd
->addr2
);
383 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
385 struct flush_tlb_data fd
= {
390 on_each_cpu(flush_tlb_kernel_range_ipi
, &fd
, 1);
393 static void flush_tlb_page_ipi(void *info
)
395 struct flush_tlb_data
*fd
= info
;
397 local_flush_tlb_page(fd
->vma
, fd
->addr1
);
400 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
403 if ((atomic_read(&vma
->vm_mm
->mm_users
) != 1) || (current
->mm
!= vma
->vm_mm
)) {
404 struct flush_tlb_data fd
= {
409 smp_on_other_tlbs(flush_tlb_page_ipi
, &fd
);
413 for_each_online_cpu(cpu
) {
414 if (cpu
!= smp_processor_id() && cpu_context(cpu
, vma
->vm_mm
))
415 cpu_context(cpu
, vma
->vm_mm
) = 0;
418 local_flush_tlb_page(vma
, page
);
422 static void flush_tlb_one_ipi(void *info
)
424 unsigned long vaddr
= (unsigned long) info
;
426 local_flush_tlb_one(vaddr
);
429 void flush_tlb_one(unsigned long vaddr
)
431 smp_on_each_tlb(flush_tlb_one_ipi
, (void *) vaddr
);
434 EXPORT_SYMBOL(flush_tlb_page
);
435 EXPORT_SYMBOL(flush_tlb_one
);
437 #if defined(CONFIG_KEXEC)
438 void (*dump_ipi_function_ptr
)(void *) = NULL
;
439 void dump_send_ipi(void (*dump_ipi_callback
)(void *))
442 int cpu
= smp_processor_id();
444 dump_ipi_function_ptr
= dump_ipi_callback
;
446 for_each_online_cpu(i
)
448 mp_ops
->send_ipi_single(i
, SMP_DUMP
);
451 EXPORT_SYMBOL(dump_send_ipi
);
454 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
456 static DEFINE_PER_CPU(atomic_t
, tick_broadcast_count
);
457 static DEFINE_PER_CPU(struct call_single_data
, tick_broadcast_csd
);
459 void tick_broadcast(const struct cpumask
*mask
)
462 struct call_single_data
*csd
;
465 for_each_cpu(cpu
, mask
) {
466 count
= &per_cpu(tick_broadcast_count
, cpu
);
467 csd
= &per_cpu(tick_broadcast_csd
, cpu
);
469 if (atomic_inc_return(count
) == 1)
470 smp_call_function_single_async(cpu
, csd
);
474 static void tick_broadcast_callee(void *info
)
476 int cpu
= smp_processor_id();
477 tick_receive_broadcast();
478 atomic_set(&per_cpu(tick_broadcast_count
, cpu
), 0);
481 static int __init
tick_broadcast_init(void)
483 struct call_single_data
*csd
;
486 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
487 csd
= &per_cpu(tick_broadcast_csd
, cpu
);
488 csd
->func
= tick_broadcast_callee
;
493 early_initcall(tick_broadcast_init
);
495 #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */