2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/spinlock.h>
27 #include <linux/threads.h>
28 #include <linux/module.h>
29 #include <linux/time.h>
30 #include <linux/timex.h>
31 #include <linux/sched.h>
32 #include <linux/cpumask.h>
33 #include <linux/cpu.h>
34 #include <linux/err.h>
35 #include <linux/ftrace.h>
37 #include <linux/atomic.h>
39 #include <asm/processor.h>
41 #include <asm/r4k-timer.h>
42 #include <asm/mmu_context.h>
44 #include <asm/setup.h>
47 cpumask_t cpu_callin_map
; /* Bitmask of started secondaries */
49 int __cpu_number_map
[NR_CPUS
]; /* Map physical to logical */
50 EXPORT_SYMBOL(__cpu_number_map
);
52 int __cpu_logical_map
[NR_CPUS
]; /* Map logical to physical */
53 EXPORT_SYMBOL(__cpu_logical_map
);
55 /* Number of TCs (or siblings in Intel speak) per CPU core */
56 int smp_num_siblings
= 1;
57 EXPORT_SYMBOL(smp_num_siblings
);
59 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
60 cpumask_t cpu_sibling_map
[NR_CPUS
] __read_mostly
;
61 EXPORT_SYMBOL(cpu_sibling_map
);
63 /* representing the core map of multi-core chips of each logical CPU */
64 cpumask_t cpu_core_map
[NR_CPUS
] __read_mostly
;
65 EXPORT_SYMBOL(cpu_core_map
);
68 * A logcal cpu mask containing only one VPE per core to
69 * reduce the number of IPIs on large MT systems.
71 cpumask_t cpu_foreign_map __read_mostly
;
72 EXPORT_SYMBOL(cpu_foreign_map
);
74 /* representing cpus for which sibling maps can be computed */
75 static cpumask_t cpu_sibling_setup_map
;
77 /* representing cpus for which core maps can be computed */
78 static cpumask_t cpu_core_setup_map
;
80 cpumask_t cpu_coherent_mask
;
82 static inline void set_cpu_sibling_map(int cpu
)
86 cpumask_set_cpu(cpu
, &cpu_sibling_setup_map
);
88 if (smp_num_siblings
> 1) {
89 for_each_cpu(i
, &cpu_sibling_setup_map
) {
90 if (cpu_data
[cpu
].package
== cpu_data
[i
].package
&&
91 cpu_data
[cpu
].core
== cpu_data
[i
].core
) {
92 cpumask_set_cpu(i
, &cpu_sibling_map
[cpu
]);
93 cpumask_set_cpu(cpu
, &cpu_sibling_map
[i
]);
97 cpumask_set_cpu(cpu
, &cpu_sibling_map
[cpu
]);
100 static inline void set_cpu_core_map(int cpu
)
104 cpumask_set_cpu(cpu
, &cpu_core_setup_map
);
106 for_each_cpu(i
, &cpu_core_setup_map
) {
107 if (cpu_data
[cpu
].package
== cpu_data
[i
].package
) {
108 cpumask_set_cpu(i
, &cpu_core_map
[cpu
]);
109 cpumask_set_cpu(cpu
, &cpu_core_map
[i
]);
115 * Calculate a new cpu_foreign_map mask whenever a
116 * new cpu appears or disappears.
118 static inline void calculate_cpu_foreign_map(void)
120 int i
, k
, core_present
;
121 cpumask_t temp_foreign_map
;
123 /* Re-calculate the mask */
124 for_each_online_cpu(i
) {
126 for_each_cpu(k
, &temp_foreign_map
)
127 if (cpu_data
[i
].package
== cpu_data
[k
].package
&&
128 cpu_data
[i
].core
== cpu_data
[k
].core
)
131 cpumask_set_cpu(i
, &temp_foreign_map
);
134 cpumask_copy(&cpu_foreign_map
, &temp_foreign_map
);
137 struct plat_smp_ops
*mp_ops
;
138 EXPORT_SYMBOL(mp_ops
);
140 void register_smp_ops(struct plat_smp_ops
*ops
)
143 printk(KERN_WARNING
"Overriding previously set SMP ops\n");
149 * First C code run on the secondary CPUs after being started up by
152 asmlinkage
void start_secondary(void)
157 per_cpu_trap_init(false);
158 mips_clockevent_init();
159 mp_ops
->init_secondary();
164 * XXX parity protection should be folded in here when it's converted
165 * to an option instead of something based on .cputype
170 cpu
= smp_processor_id();
171 cpu_data
[cpu
].udelay_val
= loops_per_jiffy
;
173 cpumask_set_cpu(cpu
, &cpu_coherent_mask
);
174 notify_cpu_starting(cpu
);
176 set_cpu_online(cpu
, true);
178 set_cpu_sibling_map(cpu
);
179 set_cpu_core_map(cpu
);
181 calculate_cpu_foreign_map();
183 cpumask_set_cpu(cpu
, &cpu_callin_map
);
185 synchronise_count_slave(cpu
);
188 * irq will be enabled in ->smp_finish(), enabling it too early
191 WARN_ON_ONCE(!irqs_disabled());
192 mp_ops
->smp_finish();
194 cpu_startup_entry(CPUHP_ONLINE
);
197 static void stop_this_cpu(void *dummy
)
200 * Remove this CPU. Be a bit slow here and
201 * set the bits for every online CPU so we don't miss
202 * any IPI whilst taking this VPE down.
205 cpumask_copy(&cpu_foreign_map
, cpu_online_mask
);
207 /* Make it visible to every other CPU */
210 set_cpu_online(smp_processor_id(), false);
211 calculate_cpu_foreign_map();
216 void smp_send_stop(void)
218 smp_call_function(stop_this_cpu
, NULL
, 0);
221 void __init
smp_cpus_done(unsigned int max_cpus
)
225 /* called from main before smp_init() */
226 void __init
smp_prepare_cpus(unsigned int max_cpus
)
228 init_new_context(current
, &init_mm
);
229 current_thread_info()->cpu
= 0;
230 mp_ops
->prepare_cpus(max_cpus
);
231 set_cpu_sibling_map(0);
233 calculate_cpu_foreign_map();
234 #ifndef CONFIG_HOTPLUG_CPU
235 init_cpu_present(cpu_possible_mask
);
237 cpumask_copy(&cpu_coherent_mask
, cpu_possible_mask
);
240 /* preload SMP state for boot cpu */
241 void smp_prepare_boot_cpu(void)
243 set_cpu_possible(0, true);
244 set_cpu_online(0, true);
245 cpumask_set_cpu(0, &cpu_callin_map
);
248 int __cpu_up(unsigned int cpu
, struct task_struct
*tidle
)
250 mp_ops
->boot_secondary(cpu
, tidle
);
253 * Trust is futile. We should really have timeouts ...
255 while (!cpumask_test_cpu(cpu
, &cpu_callin_map
)) {
260 synchronise_count_master(cpu
);
264 /* Not really SMP stuff ... */
265 int setup_profiling_timer(unsigned int multiplier
)
270 static void flush_tlb_all_ipi(void *info
)
272 local_flush_tlb_all();
275 void flush_tlb_all(void)
277 on_each_cpu(flush_tlb_all_ipi
, NULL
, 1);
280 static void flush_tlb_mm_ipi(void *mm
)
282 local_flush_tlb_mm((struct mm_struct
*)mm
);
286 * Special Variant of smp_call_function for use by TLB functions:
289 * o collapses to normal function call on UP kernels
290 * o collapses to normal function call on systems with a single shared
293 static inline void smp_on_other_tlbs(void (*func
) (void *info
), void *info
)
295 smp_call_function(func
, info
, 1);
298 static inline void smp_on_each_tlb(void (*func
) (void *info
), void *info
)
302 smp_on_other_tlbs(func
, info
);
309 * The following tlb flush calls are invoked when old translations are
310 * being torn down, or pte attributes are changing. For single threaded
311 * address spaces, a new context is obtained on the current cpu, and tlb
312 * context on other cpus are invalidated to force a new context allocation
313 * at switch_mm time, should the mm ever be used on other cpus. For
314 * multithreaded address spaces, intercpu interrupts have to be sent.
315 * Another case where intercpu interrupts are required is when the target
316 * mm might be active on another cpu (eg debuggers doing the flushes on
317 * behalf of debugees, kswapd stealing pages from another process etc).
321 void flush_tlb_mm(struct mm_struct
*mm
)
325 if ((atomic_read(&mm
->mm_users
) != 1) || (current
->mm
!= mm
)) {
326 smp_on_other_tlbs(flush_tlb_mm_ipi
, mm
);
330 for_each_online_cpu(cpu
) {
331 if (cpu
!= smp_processor_id() && cpu_context(cpu
, mm
))
332 cpu_context(cpu
, mm
) = 0;
335 local_flush_tlb_mm(mm
);
340 struct flush_tlb_data
{
341 struct vm_area_struct
*vma
;
346 static void flush_tlb_range_ipi(void *info
)
348 struct flush_tlb_data
*fd
= info
;
350 local_flush_tlb_range(fd
->vma
, fd
->addr1
, fd
->addr2
);
353 void flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
355 struct mm_struct
*mm
= vma
->vm_mm
;
358 if ((atomic_read(&mm
->mm_users
) != 1) || (current
->mm
!= mm
)) {
359 struct flush_tlb_data fd
= {
365 smp_on_other_tlbs(flush_tlb_range_ipi
, &fd
);
369 for_each_online_cpu(cpu
) {
370 if (cpu
!= smp_processor_id() && cpu_context(cpu
, mm
))
371 cpu_context(cpu
, mm
) = 0;
374 local_flush_tlb_range(vma
, start
, end
);
378 static void flush_tlb_kernel_range_ipi(void *info
)
380 struct flush_tlb_data
*fd
= info
;
382 local_flush_tlb_kernel_range(fd
->addr1
, fd
->addr2
);
385 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
387 struct flush_tlb_data fd
= {
392 on_each_cpu(flush_tlb_kernel_range_ipi
, &fd
, 1);
395 static void flush_tlb_page_ipi(void *info
)
397 struct flush_tlb_data
*fd
= info
;
399 local_flush_tlb_page(fd
->vma
, fd
->addr1
);
402 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
405 if ((atomic_read(&vma
->vm_mm
->mm_users
) != 1) || (current
->mm
!= vma
->vm_mm
)) {
406 struct flush_tlb_data fd
= {
411 smp_on_other_tlbs(flush_tlb_page_ipi
, &fd
);
415 for_each_online_cpu(cpu
) {
416 if (cpu
!= smp_processor_id() && cpu_context(cpu
, vma
->vm_mm
))
417 cpu_context(cpu
, vma
->vm_mm
) = 0;
420 local_flush_tlb_page(vma
, page
);
424 static void flush_tlb_one_ipi(void *info
)
426 unsigned long vaddr
= (unsigned long) info
;
428 local_flush_tlb_one(vaddr
);
431 void flush_tlb_one(unsigned long vaddr
)
433 smp_on_each_tlb(flush_tlb_one_ipi
, (void *) vaddr
);
436 EXPORT_SYMBOL(flush_tlb_page
);
437 EXPORT_SYMBOL(flush_tlb_one
);
439 #if defined(CONFIG_KEXEC)
440 void (*dump_ipi_function_ptr
)(void *) = NULL
;
441 void dump_send_ipi(void (*dump_ipi_callback
)(void *))
444 int cpu
= smp_processor_id();
446 dump_ipi_function_ptr
= dump_ipi_callback
;
448 for_each_online_cpu(i
)
450 mp_ops
->send_ipi_single(i
, SMP_DUMP
);
453 EXPORT_SYMBOL(dump_send_ipi
);
456 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
458 static DEFINE_PER_CPU(atomic_t
, tick_broadcast_count
);
459 static DEFINE_PER_CPU(struct call_single_data
, tick_broadcast_csd
);
461 void tick_broadcast(const struct cpumask
*mask
)
464 struct call_single_data
*csd
;
467 for_each_cpu(cpu
, mask
) {
468 count
= &per_cpu(tick_broadcast_count
, cpu
);
469 csd
= &per_cpu(tick_broadcast_csd
, cpu
);
471 if (atomic_inc_return(count
) == 1)
472 smp_call_function_single_async(cpu
, csd
);
476 static void tick_broadcast_callee(void *info
)
478 int cpu
= smp_processor_id();
479 tick_receive_broadcast();
480 atomic_set(&per_cpu(tick_broadcast_count
, cpu
), 0);
483 static int __init
tick_broadcast_init(void)
485 struct call_single_data
*csd
;
488 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
489 csd
= &per_cpu(tick_broadcast_csd
, cpu
);
490 csd
->func
= tick_broadcast_callee
;
495 early_initcall(tick_broadcast_init
);
497 #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */