2 * linux/arch/alpha/kernel/smp.c
4 * 2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5 * Renamed modified smp_call_function to smp_call_function_on_cpu()
6 * Created an function that conforms to the old calling convention
7 * of smp_call_function().
9 * This is helpful for DCPI.
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
19 #include <linux/err.h>
20 #include <linux/threads.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/spinlock.h>
26 #include <linux/irq.h>
27 #include <linux/cache.h>
28 #include <linux/profile.h>
29 #include <linux/bitops.h>
30 #include <linux/cpu.h>
32 #include <asm/hwrpb.h>
33 #include <asm/ptrace.h>
34 #include <asm/atomic.h>
38 #include <asm/pgtable.h>
39 #include <asm/pgalloc.h>
40 #include <asm/mmu_context.h>
41 #include <asm/tlbflush.h>
49 #define DBGS(args) printk args
54 /* A collection of per-processor data. */
55 struct cpuinfo_alpha cpu_data
[NR_CPUS
];
56 EXPORT_SYMBOL(cpu_data
);
58 /* A collection of single bit ipi messages. */
60 unsigned long bits ____cacheline_aligned
;
61 } ipi_data
[NR_CPUS
] __cacheline_aligned
;
63 enum ipi_message_type
{
70 /* Set to a secondary's cpuid when it comes online. */
71 static int smp_secondary_alive __devinitdata
= 0;
73 /* Which cpus ids came online. */
74 cpumask_t cpu_online_map
;
76 EXPORT_SYMBOL(cpu_online_map
);
78 int smp_num_probed
; /* Internal processor count */
79 int smp_num_cpus
= 1; /* Number that came online. */
80 EXPORT_SYMBOL(smp_num_cpus
);
83 * Called by both boot and secondaries to move global data into
84 * per-processor storage.
86 static inline void __init
87 smp_store_cpu_info(int cpuid
)
89 cpu_data
[cpuid
].loops_per_jiffy
= loops_per_jiffy
;
90 cpu_data
[cpuid
].last_asn
= ASN_FIRST_VERSION
;
91 cpu_data
[cpuid
].need_new_asn
= 0;
92 cpu_data
[cpuid
].asn_lock
= 0;
96 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
98 static inline void __init
99 smp_setup_percpu_timer(int cpuid
)
101 cpu_data
[cpuid
].prof_counter
= 1;
102 cpu_data
[cpuid
].prof_multiplier
= 1;
106 wait_boot_cpu_to_stop(int cpuid
)
108 unsigned long stop
= jiffies
+ 10*HZ
;
110 while (time_before(jiffies
, stop
)) {
111 if (!smp_secondary_alive
)
116 printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid
);
122 * Where secondaries begin a life of C.
127 int cpuid
= hard_smp_processor_id();
129 if (cpu_test_and_set(cpuid
, cpu_online_map
)) {
130 printk("??, cpu 0x%x already present??\n", cpuid
);
134 /* Turn on machine checks. */
137 /* Set trap vectors. */
140 /* Set interrupt vector. */
143 /* Get our local ticker going. */
144 smp_setup_percpu_timer(cpuid
);
146 /* Call platform-specific callin, if specified */
147 if (alpha_mv
.smp_callin
) alpha_mv
.smp_callin();
149 /* All kernel threads share the same mm context. */
150 atomic_inc(&init_mm
.mm_count
);
151 current
->active_mm
= &init_mm
;
153 /* inform the notifiers about the new cpu */
154 notify_cpu_starting(cpuid
);
156 /* Must have completely accurate bogos. */
159 /* Wait boot CPU to stop with irq enabled before running
161 wait_boot_cpu_to_stop(cpuid
);
165 smp_store_cpu_info(cpuid
);
166 /* Allow master to continue only after we written loops_per_jiffy. */
168 smp_secondary_alive
= 1;
170 DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
171 cpuid
, current
, current
->active_mm
));
177 /* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
179 wait_for_txrdy (unsigned long cpumask
)
181 unsigned long timeout
;
183 if (!(hwrpb
->txrdy
& cpumask
))
186 timeout
= jiffies
+ 10*HZ
;
187 while (time_before(jiffies
, timeout
)) {
188 if (!(hwrpb
->txrdy
& cpumask
))
198 * Send a message to a secondary's console. "START" is one such
199 * interesting message. ;-)
201 static void __cpuinit
202 send_secondary_console_msg(char *str
, int cpuid
)
204 struct percpu_struct
*cpu
;
205 register char *cp1
, *cp2
;
206 unsigned long cpumask
;
209 cpu
= (struct percpu_struct
*)
211 + hwrpb
->processor_offset
212 + cpuid
* hwrpb
->processor_size
);
214 cpumask
= (1UL << cpuid
);
215 if (wait_for_txrdy(cpumask
))
220 *(unsigned int *)&cpu
->ipc_buffer
[0] = len
;
221 cp1
= (char *) &cpu
->ipc_buffer
[1];
222 memcpy(cp1
, cp2
, len
);
224 /* atomic test and set */
226 set_bit(cpuid
, &hwrpb
->rxrdy
);
228 if (wait_for_txrdy(cpumask
))
233 printk("Processor %x not ready\n", cpuid
);
237 * A secondary console wants to send a message. Receive it.
240 recv_secondary_console_msg(void)
243 unsigned long txrdy
= hwrpb
->txrdy
;
244 char *cp1
, *cp2
, buf
[80];
245 struct percpu_struct
*cpu
;
247 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy
));
249 mycpu
= hard_smp_processor_id();
251 for (i
= 0; i
< NR_CPUS
; i
++) {
252 if (!(txrdy
& (1UL << i
)))
255 DBGS(("recv_secondary_console_msg: "
256 "TXRDY contains CPU %d.\n", i
));
258 cpu
= (struct percpu_struct
*)
260 + hwrpb
->processor_offset
261 + i
* hwrpb
->processor_size
);
263 DBGS(("recv_secondary_console_msg: on %d from %d"
264 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
265 mycpu
, i
, cpu
->halt_reason
, cpu
->flags
));
267 cnt
= cpu
->ipc_buffer
[0] >> 32;
268 if (cnt
<= 0 || cnt
>= 80)
269 strcpy(buf
, "<<< BOGUS MSG >>>");
271 cp1
= (char *) &cpu
->ipc_buffer
[11];
275 while ((cp2
= strchr(cp2
, '\r')) != 0) {
282 DBGS((KERN_INFO
"recv_secondary_console_msg: on %d "
283 "message is '%s'\n", mycpu
, buf
));
290 * Convince the console to have a secondary cpu begin execution.
293 secondary_cpu_start(int cpuid
, struct task_struct
*idle
)
295 struct percpu_struct
*cpu
;
296 struct pcb_struct
*hwpcb
, *ipcb
;
297 unsigned long timeout
;
299 cpu
= (struct percpu_struct
*)
301 + hwrpb
->processor_offset
302 + cpuid
* hwrpb
->processor_size
);
303 hwpcb
= (struct pcb_struct
*) cpu
->hwpcb
;
304 ipcb
= &task_thread_info(idle
)->pcb
;
306 /* Initialize the CPU's HWPCB to something just good enough for
307 us to get started. Immediately after starting, we'll swpctx
308 to the target idle task's pcb. Reuse the stack in the mean
309 time. Precalculate the target PCBB. */
310 hwpcb
->ksp
= (unsigned long)ipcb
+ sizeof(union thread_union
) - 16;
312 hwpcb
->ptbr
= ipcb
->ptbr
;
315 hwpcb
->unique
= virt_to_phys(ipcb
);
316 hwpcb
->flags
= ipcb
->flags
;
317 hwpcb
->res1
= hwpcb
->res2
= 0;
320 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
321 hwpcb
->ksp
, hwpcb
->ptbr
, hwrpb
->vptb
, hwpcb
->unique
));
323 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
324 cpuid
, idle
->state
, ipcb
->flags
));
326 /* Setup HWRPB fields that SRM uses to activate secondary CPU */
327 hwrpb
->CPU_restart
= __smp_callin
;
328 hwrpb
->CPU_restart_data
= (unsigned long) __smp_callin
;
330 /* Recalculate and update the HWRPB checksum */
331 hwrpb_update_checksum(hwrpb
);
334 * Send a "start" command to the specified processor.
337 /* SRM III 3.4.1.3 */
338 cpu
->flags
|= 0x22; /* turn on Context Valid and Restart Capable */
339 cpu
->flags
&= ~1; /* turn off Bootstrap In Progress */
342 send_secondary_console_msg("START\r\n", cpuid
);
344 /* Wait 10 seconds for an ACK from the console. */
345 timeout
= jiffies
+ 10*HZ
;
346 while (time_before(jiffies
, timeout
)) {
352 printk(KERN_ERR
"SMP: Processor %d failed to start.\n", cpuid
);
356 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid
));
361 * Bring one cpu online.
364 smp_boot_one_cpu(int cpuid
)
366 struct task_struct
*idle
;
367 unsigned long timeout
;
369 /* Cook up an idler for this guy. Note that the address we
370 give to kernel_thread is irrelevant -- it's going to start
371 where HWRPB.CPU_restart says to start. But this gets all
372 the other task-y sort of data structures set up like we
373 wish. We can't use kernel_thread since we must avoid
374 rescheduling the child. */
375 idle
= fork_idle(cpuid
);
377 panic("failed fork for CPU %d", cpuid
);
379 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
380 cpuid
, idle
->state
, idle
->flags
));
382 /* Signal the secondary to wait a moment. */
383 smp_secondary_alive
= -1;
385 /* Whirrr, whirrr, whirrrrrrrrr... */
386 if (secondary_cpu_start(cpuid
, idle
))
389 /* Notify the secondary CPU it can run calibrate_delay. */
391 smp_secondary_alive
= 0;
393 /* We've been acked by the console; wait one second for
394 the task to start up for real. */
395 timeout
= jiffies
+ 1*HZ
;
396 while (time_before(jiffies
, timeout
)) {
397 if (smp_secondary_alive
== 1)
403 /* We failed to boot the CPU. */
405 printk(KERN_ERR
"SMP: Processor %d is stuck.\n", cpuid
);
409 /* Another "Red Snapper". */
414 * Called from setup_arch. Detect an SMP system and which processors
420 struct percpu_struct
*cpubase
, *cpu
;
423 if (boot_cpuid
!= 0) {
424 printk(KERN_WARNING
"SMP: Booting off cpu %d instead of 0?\n",
428 if (hwrpb
->nr_processors
> 1) {
431 DBGS(("setup_smp: nr_processors %ld\n",
432 hwrpb
->nr_processors
));
434 cpubase
= (struct percpu_struct
*)
435 ((char*)hwrpb
+ hwrpb
->processor_offset
);
436 boot_cpu_palrev
= cpubase
->pal_revision
;
438 for (i
= 0; i
< hwrpb
->nr_processors
; i
++) {
439 cpu
= (struct percpu_struct
*)
440 ((char *)cpubase
+ i
*hwrpb
->processor_size
);
441 if ((cpu
->flags
& 0x1cc) == 0x1cc) {
443 cpu_set(i
, cpu_present_map
);
444 cpu
->pal_revision
= boot_cpu_palrev
;
447 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
448 i
, cpu
->flags
, cpu
->type
));
449 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
450 i
, cpu
->pal_revision
));
456 printk(KERN_INFO
"SMP: %d CPUs probed -- cpu_present_map = %lx\n",
457 smp_num_probed
, cpu_present_map
.bits
[0]);
461 * Called by smp_init prepare the secondaries
464 smp_prepare_cpus(unsigned int max_cpus
)
466 /* Take care of some initial bookkeeping. */
467 memset(ipi_data
, 0, sizeof(ipi_data
));
469 current_thread_info()->cpu
= boot_cpuid
;
471 smp_store_cpu_info(boot_cpuid
);
472 smp_setup_percpu_timer(boot_cpuid
);
474 /* Nothing to do on a UP box, or when told not to. */
475 if (smp_num_probed
== 1 || max_cpus
== 0) {
476 cpu_present_map
= cpumask_of_cpu(boot_cpuid
);
477 printk(KERN_INFO
"SMP mode deactivated.\n");
481 printk(KERN_INFO
"SMP starting up secondaries.\n");
483 smp_num_cpus
= smp_num_probed
;
487 smp_prepare_boot_cpu(void)
492 __cpu_up(unsigned int cpu
)
494 smp_boot_one_cpu(cpu
);
496 return cpu_online(cpu
) ? 0 : -ENOSYS
;
500 smp_cpus_done(unsigned int max_cpus
)
503 unsigned long bogosum
= 0;
505 for(cpu
= 0; cpu
< NR_CPUS
; cpu
++)
507 bogosum
+= cpu_data
[cpu
].loops_per_jiffy
;
509 printk(KERN_INFO
"SMP: Total of %d processors activated "
510 "(%lu.%02lu BogoMIPS).\n",
512 (bogosum
+ 2500) / (500000/HZ
),
513 ((bogosum
+ 2500) / (5000/HZ
)) % 100);
518 smp_percpu_timer_interrupt(struct pt_regs
*regs
)
520 struct pt_regs
*old_regs
;
521 int cpu
= smp_processor_id();
522 unsigned long user
= user_mode(regs
);
523 struct cpuinfo_alpha
*data
= &cpu_data
[cpu
];
525 old_regs
= set_irq_regs(regs
);
527 /* Record kernel PC. */
528 profile_tick(CPU_PROFILING
);
530 if (!--data
->prof_counter
) {
531 /* We need to make like a normal interrupt -- otherwise
532 timer interrupts ignore the global interrupt lock,
533 which would be a Bad Thing. */
536 update_process_times(user
);
538 data
->prof_counter
= data
->prof_multiplier
;
542 set_irq_regs(old_regs
);
546 setup_profiling_timer(unsigned int multiplier
)
553 send_ipi_message(cpumask_t to_whom
, enum ipi_message_type operation
)
558 for_each_cpu_mask(i
, to_whom
)
559 set_bit(operation
, &ipi_data
[i
].bits
);
562 for_each_cpu_mask(i
, to_whom
)
567 handle_ipi(struct pt_regs
*regs
)
569 int this_cpu
= smp_processor_id();
570 unsigned long *pending_ipis
= &ipi_data
[this_cpu
].bits
;
574 DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
575 this_cpu
, *pending_ipis
, regs
->pc
));
578 mb(); /* Order interrupt and bit testing. */
579 while ((ops
= xchg(pending_ipis
, 0)) != 0) {
580 mb(); /* Order bit clearing and data access. */
586 which
= __ffs(which
);
590 /* Reschedule callback. Everything to be done
591 is done by the interrupt return path. */
595 generic_smp_call_function_interrupt();
598 case IPI_CALL_FUNC_SINGLE
:
599 generic_smp_call_function_single_interrupt();
606 printk(KERN_CRIT
"Unknown IPI on CPU %d: %lu\n",
612 mb(); /* Order data access and bit testing. */
615 cpu_data
[this_cpu
].ipi_count
++;
618 recv_secondary_console_msg();
622 smp_send_reschedule(int cpu
)
625 if (cpu
== hard_smp_processor_id())
627 "smp_send_reschedule: Sending IPI to self.\n");
629 send_ipi_message(cpumask_of_cpu(cpu
), IPI_RESCHEDULE
);
635 cpumask_t to_whom
= cpu_possible_map
;
636 cpu_clear(smp_processor_id(), to_whom
);
638 if (hard_smp_processor_id() != boot_cpu_id
)
639 printk(KERN_WARNING
"smp_send_stop: Not on boot cpu.\n");
641 send_ipi_message(to_whom
, IPI_CPU_STOP
);
644 void arch_send_call_function_ipi(cpumask_t mask
)
646 send_ipi_message(mask
, IPI_CALL_FUNC
);
649 void arch_send_call_function_single_ipi(int cpu
)
651 send_ipi_message(cpumask_of_cpu(cpu
), IPI_CALL_FUNC_SINGLE
);
655 ipi_imb(void *ignored
)
663 /* Must wait other processors to flush their icache before continue. */
664 if (on_each_cpu(ipi_imb
, NULL
, 1))
665 printk(KERN_CRIT
"smp_imb: timed out\n");
667 EXPORT_SYMBOL(smp_imb
);
670 ipi_flush_tlb_all(void *ignored
)
678 /* Although we don't have any data to pass, we do want to
679 synchronize with the other processors. */
680 if (on_each_cpu(ipi_flush_tlb_all
, NULL
, 1)) {
681 printk(KERN_CRIT
"flush_tlb_all: timed out\n");
685 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
688 ipi_flush_tlb_mm(void *x
)
690 struct mm_struct
*mm
= (struct mm_struct
*) x
;
691 if (mm
== current
->active_mm
&& !asn_locked())
692 flush_tlb_current(mm
);
698 flush_tlb_mm(struct mm_struct
*mm
)
702 if (mm
== current
->active_mm
) {
703 flush_tlb_current(mm
);
704 if (atomic_read(&mm
->mm_users
) <= 1) {
705 int cpu
, this_cpu
= smp_processor_id();
706 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
707 if (!cpu_online(cpu
) || cpu
== this_cpu
)
709 if (mm
->context
[cpu
])
710 mm
->context
[cpu
] = 0;
717 if (smp_call_function(ipi_flush_tlb_mm
, mm
, 1)) {
718 printk(KERN_CRIT
"flush_tlb_mm: timed out\n");
723 EXPORT_SYMBOL(flush_tlb_mm
);
725 struct flush_tlb_page_struct
{
726 struct vm_area_struct
*vma
;
727 struct mm_struct
*mm
;
732 ipi_flush_tlb_page(void *x
)
734 struct flush_tlb_page_struct
*data
= (struct flush_tlb_page_struct
*)x
;
735 struct mm_struct
* mm
= data
->mm
;
737 if (mm
== current
->active_mm
&& !asn_locked())
738 flush_tlb_current_page(mm
, data
->vma
, data
->addr
);
744 flush_tlb_page(struct vm_area_struct
*vma
, unsigned long addr
)
746 struct flush_tlb_page_struct data
;
747 struct mm_struct
*mm
= vma
->vm_mm
;
751 if (mm
== current
->active_mm
) {
752 flush_tlb_current_page(mm
, vma
, addr
);
753 if (atomic_read(&mm
->mm_users
) <= 1) {
754 int cpu
, this_cpu
= smp_processor_id();
755 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
756 if (!cpu_online(cpu
) || cpu
== this_cpu
)
758 if (mm
->context
[cpu
])
759 mm
->context
[cpu
] = 0;
770 if (smp_call_function(ipi_flush_tlb_page
, &data
, 1)) {
771 printk(KERN_CRIT
"flush_tlb_page: timed out\n");
776 EXPORT_SYMBOL(flush_tlb_page
);
779 flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
781 /* On the Alpha we always flush the whole user tlb. */
782 flush_tlb_mm(vma
->vm_mm
);
784 EXPORT_SYMBOL(flush_tlb_range
);
787 ipi_flush_icache_page(void *x
)
789 struct mm_struct
*mm
= (struct mm_struct
*) x
;
790 if (mm
== current
->active_mm
&& !asn_locked())
791 __load_new_mm_context(mm
);
797 flush_icache_user_range(struct vm_area_struct
*vma
, struct page
*page
,
798 unsigned long addr
, int len
)
800 struct mm_struct
*mm
= vma
->vm_mm
;
802 if ((vma
->vm_flags
& VM_EXEC
) == 0)
807 if (mm
== current
->active_mm
) {
808 __load_new_mm_context(mm
);
809 if (atomic_read(&mm
->mm_users
) <= 1) {
810 int cpu
, this_cpu
= smp_processor_id();
811 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
812 if (!cpu_online(cpu
) || cpu
== this_cpu
)
814 if (mm
->context
[cpu
])
815 mm
->context
[cpu
] = 0;
822 if (smp_call_function(ipi_flush_icache_page
, mm
, 1)) {
823 printk(KERN_CRIT
"flush_icache_page: timed out\n");