4 * SMP support for the SuperH processors.
6 * Copyright (C) 2002 - 2007 Paul Mundt
7 * Copyright (C) 2006 - 2007 Akio Idehara
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/err.h>
14 #include <linux/cache.h>
15 #include <linux/cpumask.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/spinlock.h>
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <asm/atomic.h>
23 #include <asm/processor.h>
24 #include <asm/system.h>
25 #include <asm/mmu_context.h>
27 #include <asm/cacheflush.h>
28 #include <asm/sections.h>
30 int __cpu_number_map
[NR_CPUS
]; /* Map physical to logical */
31 int __cpu_logical_map
[NR_CPUS
]; /* Map logical to physical */
33 cpumask_t cpu_possible_map
;
34 EXPORT_SYMBOL(cpu_possible_map
);
36 cpumask_t cpu_online_map
;
37 EXPORT_SYMBOL(cpu_online_map
);
39 static atomic_t cpus_booted
= ATOMIC_INIT(0);
42 * Run specified function on a particular processor.
44 void __smp_call_function(unsigned int cpu
);
46 static inline void __init
smp_store_cpu_info(unsigned int cpu
)
48 struct sh_cpuinfo
*c
= cpu_data
+ cpu
;
50 c
->loops_per_jiffy
= loops_per_jiffy
;
53 void __init
smp_prepare_cpus(unsigned int max_cpus
)
55 unsigned int cpu
= smp_processor_id();
57 init_new_context(current
, &init_mm
);
58 current_thread_info()->cpu
= cpu
;
59 plat_prepare_cpus(max_cpus
);
61 #ifndef CONFIG_HOTPLUG_CPU
62 cpu_present_map
= cpu_possible_map
;
66 void __devinit
smp_prepare_boot_cpu(void)
68 unsigned int cpu
= smp_processor_id();
70 __cpu_number_map
[0] = cpu
;
71 __cpu_logical_map
[0] = cpu
;
73 cpu_set(cpu
, cpu_online_map
);
74 cpu_set(cpu
, cpu_possible_map
);
77 asmlinkage
void __cpuinit
start_secondary(void)
80 struct mm_struct
*mm
= &init_mm
;
82 atomic_inc(&mm
->mm_count
);
83 atomic_inc(&mm
->mm_users
);
84 current
->active_mm
= mm
;
86 enter_lazy_tlb(mm
, current
);
96 cpu
= smp_processor_id();
97 smp_store_cpu_info(cpu
);
99 cpu_set(cpu
, cpu_online_map
);
106 unsigned long bss_start
;
107 unsigned long bss_end
;
108 void *start_kernel_fn
;
113 int __cpuinit
__cpu_up(unsigned int cpu
)
115 struct task_struct
*tsk
;
116 unsigned long timeout
;
118 tsk
= fork_idle(cpu
);
120 printk(KERN_ERR
"Failed forking idle task for cpu %d\n", cpu
);
124 /* Fill in data in head.S for secondary cpus */
125 stack_start
.sp
= tsk
->thread
.sp
;
126 stack_start
.thread_info
= tsk
->stack
;
127 stack_start
.bss_start
= 0; /* don't clear bss for secondary cpus */
128 stack_start
.start_kernel_fn
= start_secondary
;
132 plat_start_cpu(cpu
, (unsigned long)_stext
);
134 timeout
= jiffies
+ HZ
;
135 while (time_before(jiffies
, timeout
)) {
148 void __init
smp_cpus_done(unsigned int max_cpus
)
150 unsigned long bogosum
= 0;
153 for_each_online_cpu(cpu
)
154 bogosum
+= cpu_data
[cpu
].loops_per_jiffy
;
156 printk(KERN_INFO
"SMP: Total of %d processors activated "
157 "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
158 bogosum
/ (500000/HZ
),
159 (bogosum
/ (5000/HZ
)) % 100);
162 void smp_send_reschedule(int cpu
)
164 plat_send_ipi(cpu
, SMP_MSG_RESCHEDULE
);
167 static void stop_this_cpu(void *unused
)
169 cpu_clear(smp_processor_id(), cpu_online_map
);
176 void smp_send_stop(void)
178 smp_call_function(stop_this_cpu
, 0, 1, 0);
181 struct smp_fn_call_struct smp_fn_call
= {
182 .lock
= __SPIN_LOCK_UNLOCKED(smp_fn_call
.lock
),
183 .finished
= ATOMIC_INIT(0),
187 * The caller of this wants the passed function to run on every cpu. If wait
188 * is set, wait until all cpus have finished the function before returning.
189 * The lock is here to protect the call structure.
190 * You must not call this function with disabled interrupts or from a
191 * hardware interrupt handler or from a bottom half handler.
193 int smp_call_function(void (*func
)(void *info
), void *info
, int retry
, int wait
)
195 unsigned int nr_cpus
= atomic_read(&cpus_booted
);
198 /* Can deadlock when called with interrupts disabled */
199 WARN_ON(irqs_disabled());
201 spin_lock(&smp_fn_call
.lock
);
203 atomic_set(&smp_fn_call
.finished
, 0);
204 smp_fn_call
.fn
= func
;
205 smp_fn_call
.data
= info
;
207 for (i
= 0; i
< nr_cpus
; i
++)
208 if (i
!= smp_processor_id())
209 plat_send_ipi(i
, SMP_MSG_FUNCTION
);
212 while (atomic_read(&smp_fn_call
.finished
) != (nr_cpus
- 1));
214 spin_unlock(&smp_fn_call
.lock
);
219 /* Not really SMP stuff ... */
220 int setup_profiling_timer(unsigned int multiplier
)
225 static void flush_tlb_all_ipi(void *info
)
227 local_flush_tlb_all();
230 void flush_tlb_all(void)
232 on_each_cpu(flush_tlb_all_ipi
, 0, 1, 1);
235 static void flush_tlb_mm_ipi(void *mm
)
237 local_flush_tlb_mm((struct mm_struct
*)mm
);
241 * The following tlb flush calls are invoked when old translations are
242 * being torn down, or pte attributes are changing. For single threaded
243 * address spaces, a new context is obtained on the current cpu, and tlb
244 * context on other cpus are invalidated to force a new context allocation
245 * at switch_mm time, should the mm ever be used on other cpus. For
246 * multithreaded address spaces, intercpu interrupts have to be sent.
247 * Another case where intercpu interrupts are required is when the target
248 * mm might be active on another cpu (eg debuggers doing the flushes on
249 * behalf of debugees, kswapd stealing pages from another process etc).
253 void flush_tlb_mm(struct mm_struct
*mm
)
257 if ((atomic_read(&mm
->mm_users
) != 1) || (current
->mm
!= mm
)) {
258 smp_call_function(flush_tlb_mm_ipi
, (void *)mm
, 1, 1);
261 for (i
= 0; i
< num_online_cpus(); i
++)
262 if (smp_processor_id() != i
)
263 cpu_context(i
, mm
) = 0;
265 local_flush_tlb_mm(mm
);
270 struct flush_tlb_data
{
271 struct vm_area_struct
*vma
;
276 static void flush_tlb_range_ipi(void *info
)
278 struct flush_tlb_data
*fd
= (struct flush_tlb_data
*)info
;
280 local_flush_tlb_range(fd
->vma
, fd
->addr1
, fd
->addr2
);
283 void flush_tlb_range(struct vm_area_struct
*vma
,
284 unsigned long start
, unsigned long end
)
286 struct mm_struct
*mm
= vma
->vm_mm
;
289 if ((atomic_read(&mm
->mm_users
) != 1) || (current
->mm
!= mm
)) {
290 struct flush_tlb_data fd
;
295 smp_call_function(flush_tlb_range_ipi
, (void *)&fd
, 1, 1);
298 for (i
= 0; i
< num_online_cpus(); i
++)
299 if (smp_processor_id() != i
)
300 cpu_context(i
, mm
) = 0;
302 local_flush_tlb_range(vma
, start
, end
);
306 static void flush_tlb_kernel_range_ipi(void *info
)
308 struct flush_tlb_data
*fd
= (struct flush_tlb_data
*)info
;
310 local_flush_tlb_kernel_range(fd
->addr1
, fd
->addr2
);
313 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
315 struct flush_tlb_data fd
;
319 on_each_cpu(flush_tlb_kernel_range_ipi
, (void *)&fd
, 1, 1);
322 static void flush_tlb_page_ipi(void *info
)
324 struct flush_tlb_data
*fd
= (struct flush_tlb_data
*)info
;
326 local_flush_tlb_page(fd
->vma
, fd
->addr1
);
329 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
332 if ((atomic_read(&vma
->vm_mm
->mm_users
) != 1) ||
333 (current
->mm
!= vma
->vm_mm
)) {
334 struct flush_tlb_data fd
;
338 smp_call_function(flush_tlb_page_ipi
, (void *)&fd
, 1, 1);
341 for (i
= 0; i
< num_online_cpus(); i
++)
342 if (smp_processor_id() != i
)
343 cpu_context(i
, vma
->vm_mm
) = 0;
345 local_flush_tlb_page(vma
, page
);
349 static void flush_tlb_one_ipi(void *info
)
351 struct flush_tlb_data
*fd
= (struct flush_tlb_data
*)info
;
352 local_flush_tlb_one(fd
->addr1
, fd
->addr2
);
355 void flush_tlb_one(unsigned long asid
, unsigned long vaddr
)
357 struct flush_tlb_data fd
;
362 smp_call_function(flush_tlb_one_ipi
, (void *)&fd
, 1, 1);
363 local_flush_tlb_one(asid
, vaddr
);