1 #include <linux/types.h>
4 #include <hwregs/intr_vect.h>
5 #include <hwregs/intr_vect_defs.h>
6 #include <asm/tlbflush.h>
7 #include <asm/mmu_context.h>
8 #include <hwregs/asm/mmu_defs_asm.h>
9 #include <hwregs/supp_reg.h>
10 #include <linux/atomic.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/timex.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/cpumask.h>
18 #include <linux/interrupt.h>
19 #include <linux/module.h>
21 #define IPI_SCHEDULE 1
23 #define IPI_FLUSH_TLB 4
26 #define FLUSH_ALL (void*)0xffffffff
28 /* Vector of locks used for various atomic operations */
29 spinlock_t cris_atomic_locks
[] = {
30 [0 ... LOCK_COUNT
- 1] = __SPIN_LOCK_UNLOCKED(cris_atomic_locks
)
34 cpumask_t phys_cpu_present_map
= CPU_MASK_NONE
;
35 EXPORT_SYMBOL(phys_cpu_present_map
);
37 /* Variables used during SMP boot */
38 volatile int cpu_now_booting
= 0;
39 volatile struct thread_info
*smp_init_current_idle_thread
;
41 /* Variables used during IPI */
42 static DEFINE_SPINLOCK(call_lock
);
43 static DEFINE_SPINLOCK(tlbstate_lock
);
45 struct call_data_struct
{
46 void (*func
) (void *info
);
51 static struct call_data_struct
* call_data
;
53 static struct mm_struct
* flush_mm
;
54 static struct vm_area_struct
* flush_vma
;
55 static unsigned long flush_addr
;
58 static unsigned long irq_regs
[NR_CPUS
] = {
63 static irqreturn_t
crisv32_ipi_interrupt(int irq
, void *dev_id
);
64 static int send_ipi(int vector
, int wait
, cpumask_t cpu_mask
);
65 static struct irqaction irq_ipi
= {
66 .handler
= crisv32_ipi_interrupt
,
67 .flags
= IRQF_DISABLED
,
71 extern void cris_mmu_init(void);
72 extern void cris_timer_init(void);
74 /* SMP initialization */
75 void __init
smp_prepare_cpus(unsigned int max_cpus
)
79 /* From now on we can expect IPIs so set them up */
80 setup_irq(IPI_INTR_VECT
, &irq_ipi
);
82 /* Mark all possible CPUs as present */
83 for (i
= 0; i
< max_cpus
; i
++)
84 cpumask_set_cpu(i
, &phys_cpu_present_map
);
87 void smp_prepare_boot_cpu(void)
89 /* PGD pointer has moved after per_cpu initialization so
93 pgd
= (pgd_t
**)&per_cpu(current_pgd
, smp_processor_id());
96 SUPP_REG_WR(RW_MM_TLB_PGD
, pgd
);
98 SUPP_REG_WR(RW_MM_TLB_PGD
, pgd
);
100 set_cpu_online(0, true);
101 cpumask_set_cpu(0, &phys_cpu_present_map
);
102 set_cpu_possible(0, true);
105 void __init
smp_cpus_done(unsigned int max_cpus
)
109 /* Bring one cpu online.*/
111 smp_boot_one_cpu(int cpuid
, struct task_struct idle
)
116 cpumask_clear(&cpu_mask
);
117 task_thread_info(idle
)->cpu
= cpuid
;
119 /* Information to the CPU that is about to boot */
120 smp_init_current_idle_thread
= task_thread_info(idle
);
121 cpu_now_booting
= cpuid
;
124 set_cpu_online(cpuid
, true);
125 cpumask_set_cpu(cpuid
, &cpu_mask
);
126 send_ipi(IPI_BOOT
, 0, cpu_mask
);
127 set_cpu_online(cpuid
, false);
129 /* Wait for CPU to come online */
130 for (timeout
= 0; timeout
< 10000; timeout
++) {
131 if(cpu_online(cpuid
)) {
133 smp_init_current_idle_thread
= NULL
;
134 return 0; /* CPU online */
140 printk(KERN_CRIT
"SMP: CPU:%d is stuck.\n", cpuid
);
144 /* Secondary CPUs starts using C here. Here we need to setup CPU
145 * specific stuff such as the local timer and the MMU. */
146 void __init
smp_callin(void)
148 extern void cpu_idle(void);
150 int cpu
= cpu_now_booting
;
151 reg_intr_vect_rw_mask vect_mask
= {0};
153 /* Initialise the idle task for this CPU */
154 atomic_inc(&init_mm
.mm_count
);
155 current
->active_mm
= &init_mm
;
161 /* Setup local timer. */
164 /* Enable IRQ and idle */
165 REG_WR(intr_vect
, irq_regs
[cpu
], rw_mask
, vect_mask
);
166 crisv32_unmask_irq(IPI_INTR_VECT
);
167 crisv32_unmask_irq(TIMER0_INTR_VECT
);
169 notify_cpu_starting(cpu
);
172 set_cpu_online(cpu
, true);
176 /* Stop execution on this CPU.*/
177 void stop_this_cpu(void* dummy
)
180 asm volatile("halt");
184 void smp_send_stop(void)
186 smp_call_function(stop_this_cpu
, NULL
, 0);
189 int setup_profiling_timer(unsigned int multiplier
)
195 /* cache_decay_ticks is used by the scheduler to decide if a process
196 * is "hot" on one CPU. A higher value means a higher penalty to move
197 * a process to another CPU. Our cache is rather small so we report
200 unsigned long cache_decay_ticks
= 1;
202 int __cpuinit
__cpu_up(unsigned int cpu
, struct task_struct
*tidle
)
204 smp_boot_one_cpu(cpu
, tidle
);
205 return cpu_online(cpu
) ? 0 : -ENOSYS
;
208 void smp_send_reschedule(int cpu
)
211 cpumask_clear(&cpu_mask
);
212 cpumask_set_cpu(cpu
, &cpu_mask
);
213 send_ipi(IPI_SCHEDULE
, 0, cpu_mask
);
218 * Flush needs to be done on the local CPU and on any other CPU that
219 * may have the same mapping. The mm->cpu_vm_mask is used to keep track
220 * of which CPUs that a specific process has been executed on.
222 void flush_tlb_common(struct mm_struct
* mm
, struct vm_area_struct
* vma
, unsigned long addr
)
227 spin_lock_irqsave(&tlbstate_lock
, flags
);
228 cpu_mask
= (mm
== FLUSH_ALL
? cpu_all_mask
: *mm_cpumask(mm
));
229 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
233 send_ipi(IPI_FLUSH_TLB
, 1, cpu_mask
);
234 spin_unlock_irqrestore(&tlbstate_lock
, flags
);
237 void flush_tlb_all(void)
240 flush_tlb_common(FLUSH_ALL
, FLUSH_ALL
, 0);
243 void flush_tlb_mm(struct mm_struct
*mm
)
246 flush_tlb_common(mm
, FLUSH_ALL
, 0);
247 /* No more mappings in other CPUs */
248 cpumask_clear(mm_cpumask(mm
));
249 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm
));
252 void flush_tlb_page(struct vm_area_struct
*vma
,
255 __flush_tlb_page(vma
, addr
);
256 flush_tlb_common(vma
->vm_mm
, vma
, addr
);
259 /* Inter processor interrupts
261 * The IPIs are used for:
262 * * Force a schedule on a CPU
263 * * FLush TLB on other CPUs
264 * * Call a function on other CPUs
267 int send_ipi(int vector
, int wait
, cpumask_t cpu_mask
)
270 reg_intr_vect_rw_ipi ipi
= REG_RD(intr_vect
, irq_regs
[i
], rw_ipi
);
273 /* Calculate CPUs to send to. */
274 cpumask_and(&cpu_mask
, &cpu_mask
, cpu_online_mask
);
277 for_each_cpu(i
, &cpu_mask
)
279 ipi
.vector
|= vector
;
280 REG_WR(intr_vect
, irq_regs
[i
], rw_ipi
, ipi
);
283 /* Wait for IPI to finish on other CPUS */
285 for_each_cpu(i
, &cpu_mask
) {
287 for (j
= 0 ; j
< 1000; j
++) {
288 ipi
= REG_RD(intr_vect
, irq_regs
[i
], rw_ipi
);
296 printk("SMP call timeout from %d to %d\n", smp_processor_id(), i
);
306 * You must not call this function with disabled interrupts or from a
307 * hardware interrupt handler or from a bottom half handler.
309 int smp_call_function(void (*func
)(void *info
), void *info
, int wait
)
312 struct call_data_struct data
;
315 cpumask_setall(&cpu_mask
);
316 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
318 WARN_ON(irqs_disabled());
324 spin_lock(&call_lock
);
326 ret
= send_ipi(IPI_CALL
, wait
, cpu_mask
);
327 spin_unlock(&call_lock
);
332 irqreturn_t
crisv32_ipi_interrupt(int irq
, void *dev_id
)
334 void (*func
) (void *info
) = call_data
->func
;
335 void *info
= call_data
->info
;
336 reg_intr_vect_rw_ipi ipi
;
338 ipi
= REG_RD(intr_vect
, irq_regs
[smp_processor_id()], rw_ipi
);
340 if (ipi
.vector
& IPI_SCHEDULE
) {
343 if (ipi
.vector
& IPI_CALL
) {
346 if (ipi
.vector
& IPI_FLUSH_TLB
) {
347 if (flush_mm
== FLUSH_ALL
)
349 else if (flush_vma
== FLUSH_ALL
)
350 __flush_tlb_mm(flush_mm
);
352 __flush_tlb_page(flush_vma
, flush_addr
);
356 REG_WR(intr_vect
, irq_regs
[smp_processor_id()], rw_ipi
, ipi
);