2 * linux/arch/alpha/kernel/smp.c
4 * 2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5 * Renamed modified smp_call_function to smp_call_function_on_cpu()
6 * Created an function that conforms to the old calling convention
7 * of smp_call_function().
9 * This is helpful for DCPI.
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
19 #include <linux/err.h>
20 #include <linux/threads.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/spinlock.h>
26 #include <linux/irq.h>
27 #include <linux/cache.h>
28 #include <linux/profile.h>
29 #include <linux/bitops.h>
30 #include <linux/cpu.h>
32 #include <asm/hwrpb.h>
33 #include <asm/ptrace.h>
34 #include <asm/atomic.h>
38 #include <asm/pgtable.h>
39 #include <asm/pgalloc.h>
40 #include <asm/mmu_context.h>
41 #include <asm/tlbflush.h>
49 #define DBGS(args) printk args
54 /* A collection of per-processor data. */
55 struct cpuinfo_alpha cpu_data
[NR_CPUS
];
56 EXPORT_SYMBOL(cpu_data
);
58 /* A collection of single bit ipi messages. */
60 unsigned long bits ____cacheline_aligned
;
61 } ipi_data
[NR_CPUS
] __cacheline_aligned
;
63 enum ipi_message_type
{
70 /* Set to a secondary's cpuid when it comes online. */
71 static int smp_secondary_alive __devinitdata
= 0;
73 int smp_num_probed
; /* Internal processor count */
74 int smp_num_cpus
= 1; /* Number that came online. */
75 EXPORT_SYMBOL(smp_num_cpus
);
78 * Called by both boot and secondaries to move global data into
79 * per-processor storage.
81 static inline void __init
82 smp_store_cpu_info(int cpuid
)
84 cpu_data
[cpuid
].loops_per_jiffy
= loops_per_jiffy
;
85 cpu_data
[cpuid
].last_asn
= ASN_FIRST_VERSION
;
86 cpu_data
[cpuid
].need_new_asn
= 0;
87 cpu_data
[cpuid
].asn_lock
= 0;
91 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
93 static inline void __init
94 smp_setup_percpu_timer(int cpuid
)
96 cpu_data
[cpuid
].prof_counter
= 1;
97 cpu_data
[cpuid
].prof_multiplier
= 1;
101 wait_boot_cpu_to_stop(int cpuid
)
103 unsigned long stop
= jiffies
+ 10*HZ
;
105 while (time_before(jiffies
, stop
)) {
106 if (!smp_secondary_alive
)
111 printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid
);
117 * Where secondaries begin a life of C.
122 int cpuid
= hard_smp_processor_id();
124 if (cpu_test_and_set(cpuid
, cpu_online_map
)) {
125 printk("??, cpu 0x%x already present??\n", cpuid
);
129 /* Turn on machine checks. */
132 /* Set trap vectors. */
135 /* Set interrupt vector. */
138 /* Get our local ticker going. */
139 smp_setup_percpu_timer(cpuid
);
141 /* Call platform-specific callin, if specified */
142 if (alpha_mv
.smp_callin
) alpha_mv
.smp_callin();
144 /* All kernel threads share the same mm context. */
145 atomic_inc(&init_mm
.mm_count
);
146 current
->active_mm
= &init_mm
;
148 /* inform the notifiers about the new cpu */
149 notify_cpu_starting(cpuid
);
151 /* Must have completely accurate bogos. */
154 /* Wait boot CPU to stop with irq enabled before running
156 wait_boot_cpu_to_stop(cpuid
);
160 smp_store_cpu_info(cpuid
);
161 /* Allow master to continue only after we written loops_per_jiffy. */
163 smp_secondary_alive
= 1;
165 DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
166 cpuid
, current
, current
->active_mm
));
172 /* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
174 wait_for_txrdy (unsigned long cpumask
)
176 unsigned long timeout
;
178 if (!(hwrpb
->txrdy
& cpumask
))
181 timeout
= jiffies
+ 10*HZ
;
182 while (time_before(jiffies
, timeout
)) {
183 if (!(hwrpb
->txrdy
& cpumask
))
193 * Send a message to a secondary's console. "START" is one such
194 * interesting message. ;-)
196 static void __cpuinit
197 send_secondary_console_msg(char *str
, int cpuid
)
199 struct percpu_struct
*cpu
;
200 register char *cp1
, *cp2
;
201 unsigned long cpumask
;
204 cpu
= (struct percpu_struct
*)
206 + hwrpb
->processor_offset
207 + cpuid
* hwrpb
->processor_size
);
209 cpumask
= (1UL << cpuid
);
210 if (wait_for_txrdy(cpumask
))
215 *(unsigned int *)&cpu
->ipc_buffer
[0] = len
;
216 cp1
= (char *) &cpu
->ipc_buffer
[1];
217 memcpy(cp1
, cp2
, len
);
219 /* atomic test and set */
221 set_bit(cpuid
, &hwrpb
->rxrdy
);
223 if (wait_for_txrdy(cpumask
))
228 printk("Processor %x not ready\n", cpuid
);
232 * A secondary console wants to send a message. Receive it.
235 recv_secondary_console_msg(void)
238 unsigned long txrdy
= hwrpb
->txrdy
;
239 char *cp1
, *cp2
, buf
[80];
240 struct percpu_struct
*cpu
;
242 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy
));
244 mycpu
= hard_smp_processor_id();
246 for (i
= 0; i
< NR_CPUS
; i
++) {
247 if (!(txrdy
& (1UL << i
)))
250 DBGS(("recv_secondary_console_msg: "
251 "TXRDY contains CPU %d.\n", i
));
253 cpu
= (struct percpu_struct
*)
255 + hwrpb
->processor_offset
256 + i
* hwrpb
->processor_size
);
258 DBGS(("recv_secondary_console_msg: on %d from %d"
259 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
260 mycpu
, i
, cpu
->halt_reason
, cpu
->flags
));
262 cnt
= cpu
->ipc_buffer
[0] >> 32;
263 if (cnt
<= 0 || cnt
>= 80)
264 strcpy(buf
, "<<< BOGUS MSG >>>");
266 cp1
= (char *) &cpu
->ipc_buffer
[11];
270 while ((cp2
= strchr(cp2
, '\r')) != 0) {
277 DBGS((KERN_INFO
"recv_secondary_console_msg: on %d "
278 "message is '%s'\n", mycpu
, buf
));
285 * Convince the console to have a secondary cpu begin execution.
288 secondary_cpu_start(int cpuid
, struct task_struct
*idle
)
290 struct percpu_struct
*cpu
;
291 struct pcb_struct
*hwpcb
, *ipcb
;
292 unsigned long timeout
;
294 cpu
= (struct percpu_struct
*)
296 + hwrpb
->processor_offset
297 + cpuid
* hwrpb
->processor_size
);
298 hwpcb
= (struct pcb_struct
*) cpu
->hwpcb
;
299 ipcb
= &task_thread_info(idle
)->pcb
;
301 /* Initialize the CPU's HWPCB to something just good enough for
302 us to get started. Immediately after starting, we'll swpctx
303 to the target idle task's pcb. Reuse the stack in the mean
304 time. Precalculate the target PCBB. */
305 hwpcb
->ksp
= (unsigned long)ipcb
+ sizeof(union thread_union
) - 16;
307 hwpcb
->ptbr
= ipcb
->ptbr
;
310 hwpcb
->unique
= virt_to_phys(ipcb
);
311 hwpcb
->flags
= ipcb
->flags
;
312 hwpcb
->res1
= hwpcb
->res2
= 0;
315 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
316 hwpcb
->ksp
, hwpcb
->ptbr
, hwrpb
->vptb
, hwpcb
->unique
));
318 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
319 cpuid
, idle
->state
, ipcb
->flags
));
321 /* Setup HWRPB fields that SRM uses to activate secondary CPU */
322 hwrpb
->CPU_restart
= __smp_callin
;
323 hwrpb
->CPU_restart_data
= (unsigned long) __smp_callin
;
325 /* Recalculate and update the HWRPB checksum */
326 hwrpb_update_checksum(hwrpb
);
329 * Send a "start" command to the specified processor.
332 /* SRM III 3.4.1.3 */
333 cpu
->flags
|= 0x22; /* turn on Context Valid and Restart Capable */
334 cpu
->flags
&= ~1; /* turn off Bootstrap In Progress */
337 send_secondary_console_msg("START\r\n", cpuid
);
339 /* Wait 10 seconds for an ACK from the console. */
340 timeout
= jiffies
+ 10*HZ
;
341 while (time_before(jiffies
, timeout
)) {
347 printk(KERN_ERR
"SMP: Processor %d failed to start.\n", cpuid
);
351 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid
));
356 * Bring one cpu online.
359 smp_boot_one_cpu(int cpuid
)
361 struct task_struct
*idle
;
362 unsigned long timeout
;
364 /* Cook up an idler for this guy. Note that the address we
365 give to kernel_thread is irrelevant -- it's going to start
366 where HWRPB.CPU_restart says to start. But this gets all
367 the other task-y sort of data structures set up like we
368 wish. We can't use kernel_thread since we must avoid
369 rescheduling the child. */
370 idle
= fork_idle(cpuid
);
372 panic("failed fork for CPU %d", cpuid
);
374 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
375 cpuid
, idle
->state
, idle
->flags
));
377 /* Signal the secondary to wait a moment. */
378 smp_secondary_alive
= -1;
380 /* Whirrr, whirrr, whirrrrrrrrr... */
381 if (secondary_cpu_start(cpuid
, idle
))
384 /* Notify the secondary CPU it can run calibrate_delay. */
386 smp_secondary_alive
= 0;
388 /* We've been acked by the console; wait one second for
389 the task to start up for real. */
390 timeout
= jiffies
+ 1*HZ
;
391 while (time_before(jiffies
, timeout
)) {
392 if (smp_secondary_alive
== 1)
398 /* We failed to boot the CPU. */
400 printk(KERN_ERR
"SMP: Processor %d is stuck.\n", cpuid
);
404 /* Another "Red Snapper". */
409 * Called from setup_arch. Detect an SMP system and which processors
415 struct percpu_struct
*cpubase
, *cpu
;
418 if (boot_cpuid
!= 0) {
419 printk(KERN_WARNING
"SMP: Booting off cpu %d instead of 0?\n",
423 if (hwrpb
->nr_processors
> 1) {
426 DBGS(("setup_smp: nr_processors %ld\n",
427 hwrpb
->nr_processors
));
429 cpubase
= (struct percpu_struct
*)
430 ((char*)hwrpb
+ hwrpb
->processor_offset
);
431 boot_cpu_palrev
= cpubase
->pal_revision
;
433 for (i
= 0; i
< hwrpb
->nr_processors
; i
++) {
434 cpu
= (struct percpu_struct
*)
435 ((char *)cpubase
+ i
*hwrpb
->processor_size
);
436 if ((cpu
->flags
& 0x1cc) == 0x1cc) {
438 cpu_set(i
, cpu_possible_map
);
439 cpu_set(i
, cpu_present_map
);
440 cpu
->pal_revision
= boot_cpu_palrev
;
443 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
444 i
, cpu
->flags
, cpu
->type
));
445 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
446 i
, cpu
->pal_revision
));
452 printk(KERN_INFO
"SMP: %d CPUs probed -- cpu_present_map = %lx\n",
453 smp_num_probed
, cpu_present_map
.bits
[0]);
457 * Called by smp_init prepare the secondaries
460 smp_prepare_cpus(unsigned int max_cpus
)
462 /* Take care of some initial bookkeeping. */
463 memset(ipi_data
, 0, sizeof(ipi_data
));
465 current_thread_info()->cpu
= boot_cpuid
;
467 smp_store_cpu_info(boot_cpuid
);
468 smp_setup_percpu_timer(boot_cpuid
);
470 /* Nothing to do on a UP box, or when told not to. */
471 if (smp_num_probed
== 1 || max_cpus
== 0) {
472 cpu_possible_map
= cpumask_of_cpu(boot_cpuid
);
473 cpu_present_map
= cpumask_of_cpu(boot_cpuid
);
474 printk(KERN_INFO
"SMP mode deactivated.\n");
478 printk(KERN_INFO
"SMP starting up secondaries.\n");
480 smp_num_cpus
= smp_num_probed
;
484 smp_prepare_boot_cpu(void)
489 __cpu_up(unsigned int cpu
)
491 smp_boot_one_cpu(cpu
);
493 return cpu_online(cpu
) ? 0 : -ENOSYS
;
497 smp_cpus_done(unsigned int max_cpus
)
500 unsigned long bogosum
= 0;
502 for(cpu
= 0; cpu
< NR_CPUS
; cpu
++)
504 bogosum
+= cpu_data
[cpu
].loops_per_jiffy
;
506 printk(KERN_INFO
"SMP: Total of %d processors activated "
507 "(%lu.%02lu BogoMIPS).\n",
509 (bogosum
+ 2500) / (500000/HZ
),
510 ((bogosum
+ 2500) / (5000/HZ
)) % 100);
515 smp_percpu_timer_interrupt(struct pt_regs
*regs
)
517 struct pt_regs
*old_regs
;
518 int cpu
= smp_processor_id();
519 unsigned long user
= user_mode(regs
);
520 struct cpuinfo_alpha
*data
= &cpu_data
[cpu
];
522 old_regs
= set_irq_regs(regs
);
524 /* Record kernel PC. */
525 profile_tick(CPU_PROFILING
);
527 if (!--data
->prof_counter
) {
528 /* We need to make like a normal interrupt -- otherwise
529 timer interrupts ignore the global interrupt lock,
530 which would be a Bad Thing. */
533 update_process_times(user
);
535 data
->prof_counter
= data
->prof_multiplier
;
539 set_irq_regs(old_regs
);
543 setup_profiling_timer(unsigned int multiplier
)
550 send_ipi_message(cpumask_t to_whom
, enum ipi_message_type operation
)
555 for_each_cpu_mask(i
, to_whom
)
556 set_bit(operation
, &ipi_data
[i
].bits
);
559 for_each_cpu_mask(i
, to_whom
)
564 handle_ipi(struct pt_regs
*regs
)
566 int this_cpu
= smp_processor_id();
567 unsigned long *pending_ipis
= &ipi_data
[this_cpu
].bits
;
571 DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
572 this_cpu
, *pending_ipis
, regs
->pc
));
575 mb(); /* Order interrupt and bit testing. */
576 while ((ops
= xchg(pending_ipis
, 0)) != 0) {
577 mb(); /* Order bit clearing and data access. */
583 which
= __ffs(which
);
587 /* Reschedule callback. Everything to be done
588 is done by the interrupt return path. */
592 generic_smp_call_function_interrupt();
595 case IPI_CALL_FUNC_SINGLE
:
596 generic_smp_call_function_single_interrupt();
603 printk(KERN_CRIT
"Unknown IPI on CPU %d: %lu\n",
609 mb(); /* Order data access and bit testing. */
612 cpu_data
[this_cpu
].ipi_count
++;
615 recv_secondary_console_msg();
619 smp_send_reschedule(int cpu
)
622 if (cpu
== hard_smp_processor_id())
624 "smp_send_reschedule: Sending IPI to self.\n");
626 send_ipi_message(cpumask_of_cpu(cpu
), IPI_RESCHEDULE
);
632 cpumask_t to_whom
= cpu_possible_map
;
633 cpu_clear(smp_processor_id(), to_whom
);
635 if (hard_smp_processor_id() != boot_cpu_id
)
636 printk(KERN_WARNING
"smp_send_stop: Not on boot cpu.\n");
638 send_ipi_message(to_whom
, IPI_CPU_STOP
);
641 void arch_send_call_function_ipi(cpumask_t mask
)
643 send_ipi_message(mask
, IPI_CALL_FUNC
);
646 void arch_send_call_function_single_ipi(int cpu
)
648 send_ipi_message(cpumask_of_cpu(cpu
), IPI_CALL_FUNC_SINGLE
);
652 ipi_imb(void *ignored
)
660 /* Must wait other processors to flush their icache before continue. */
661 if (on_each_cpu(ipi_imb
, NULL
, 1))
662 printk(KERN_CRIT
"smp_imb: timed out\n");
664 EXPORT_SYMBOL(smp_imb
);
667 ipi_flush_tlb_all(void *ignored
)
675 /* Although we don't have any data to pass, we do want to
676 synchronize with the other processors. */
677 if (on_each_cpu(ipi_flush_tlb_all
, NULL
, 1)) {
678 printk(KERN_CRIT
"flush_tlb_all: timed out\n");
682 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
685 ipi_flush_tlb_mm(void *x
)
687 struct mm_struct
*mm
= (struct mm_struct
*) x
;
688 if (mm
== current
->active_mm
&& !asn_locked())
689 flush_tlb_current(mm
);
695 flush_tlb_mm(struct mm_struct
*mm
)
699 if (mm
== current
->active_mm
) {
700 flush_tlb_current(mm
);
701 if (atomic_read(&mm
->mm_users
) <= 1) {
702 int cpu
, this_cpu
= smp_processor_id();
703 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
704 if (!cpu_online(cpu
) || cpu
== this_cpu
)
706 if (mm
->context
[cpu
])
707 mm
->context
[cpu
] = 0;
714 if (smp_call_function(ipi_flush_tlb_mm
, mm
, 1)) {
715 printk(KERN_CRIT
"flush_tlb_mm: timed out\n");
720 EXPORT_SYMBOL(flush_tlb_mm
);
722 struct flush_tlb_page_struct
{
723 struct vm_area_struct
*vma
;
724 struct mm_struct
*mm
;
729 ipi_flush_tlb_page(void *x
)
731 struct flush_tlb_page_struct
*data
= (struct flush_tlb_page_struct
*)x
;
732 struct mm_struct
* mm
= data
->mm
;
734 if (mm
== current
->active_mm
&& !asn_locked())
735 flush_tlb_current_page(mm
, data
->vma
, data
->addr
);
741 flush_tlb_page(struct vm_area_struct
*vma
, unsigned long addr
)
743 struct flush_tlb_page_struct data
;
744 struct mm_struct
*mm
= vma
->vm_mm
;
748 if (mm
== current
->active_mm
) {
749 flush_tlb_current_page(mm
, vma
, addr
);
750 if (atomic_read(&mm
->mm_users
) <= 1) {
751 int cpu
, this_cpu
= smp_processor_id();
752 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
753 if (!cpu_online(cpu
) || cpu
== this_cpu
)
755 if (mm
->context
[cpu
])
756 mm
->context
[cpu
] = 0;
767 if (smp_call_function(ipi_flush_tlb_page
, &data
, 1)) {
768 printk(KERN_CRIT
"flush_tlb_page: timed out\n");
773 EXPORT_SYMBOL(flush_tlb_page
);
776 flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
778 /* On the Alpha we always flush the whole user tlb. */
779 flush_tlb_mm(vma
->vm_mm
);
781 EXPORT_SYMBOL(flush_tlb_range
);
784 ipi_flush_icache_page(void *x
)
786 struct mm_struct
*mm
= (struct mm_struct
*) x
;
787 if (mm
== current
->active_mm
&& !asn_locked())
788 __load_new_mm_context(mm
);
794 flush_icache_user_range(struct vm_area_struct
*vma
, struct page
*page
,
795 unsigned long addr
, int len
)
797 struct mm_struct
*mm
= vma
->vm_mm
;
799 if ((vma
->vm_flags
& VM_EXEC
) == 0)
804 if (mm
== current
->active_mm
) {
805 __load_new_mm_context(mm
);
806 if (atomic_read(&mm
->mm_users
) <= 1) {
807 int cpu
, this_cpu
= smp_processor_id();
808 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
809 if (!cpu_online(cpu
) || cpu
== this_cpu
)
811 if (mm
->context
[cpu
])
812 mm
->context
[cpu
] = 0;
819 if (smp_call_function(ipi_flush_icache_page
, mm
, 1)) {
820 printk(KERN_CRIT
"flush_icache_page: timed out\n");