2 * linux/arch/alpha/kernel/smp.c
4 * 2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5 * Renamed modified smp_call_function to smp_call_function_on_cpu()
6 * Created an function that conforms to the old calling convention
7 * of smp_call_function().
9 * This is helpful for DCPI.
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
19 #include <linux/err.h>
20 #include <linux/threads.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/spinlock.h>
26 #include <linux/irq.h>
27 #include <linux/cache.h>
28 #include <linux/profile.h>
29 #include <linux/bitops.h>
30 #include <linux/cpu.h>
32 #include <asm/hwrpb.h>
33 #include <asm/ptrace.h>
34 #include <linux/atomic.h>
38 #include <asm/pgtable.h>
39 #include <asm/pgalloc.h>
40 #include <asm/mmu_context.h>
41 #include <asm/tlbflush.h>
49 #define DBGS(args) printk args
54 /* A collection of per-processor data. */
55 struct cpuinfo_alpha cpu_data
[NR_CPUS
];
56 EXPORT_SYMBOL(cpu_data
);
58 /* A collection of single bit ipi messages. */
60 unsigned long bits ____cacheline_aligned
;
61 } ipi_data
[NR_CPUS
] __cacheline_aligned
;
63 enum ipi_message_type
{
69 /* Set to a secondary's cpuid when it comes online. */
70 static int smp_secondary_alive
= 0;
72 int smp_num_probed
; /* Internal processor count */
73 int smp_num_cpus
= 1; /* Number that came online. */
74 EXPORT_SYMBOL(smp_num_cpus
);
77 * Called by both boot and secondaries to move global data into
78 * per-processor storage.
80 static inline void __init
81 smp_store_cpu_info(int cpuid
)
83 cpu_data
[cpuid
].loops_per_jiffy
= loops_per_jiffy
;
84 cpu_data
[cpuid
].last_asn
= ASN_FIRST_VERSION
;
85 cpu_data
[cpuid
].need_new_asn
= 0;
86 cpu_data
[cpuid
].asn_lock
= 0;
90 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
92 static inline void __init
93 smp_setup_percpu_timer(int cpuid
)
95 cpu_data
[cpuid
].prof_counter
= 1;
96 cpu_data
[cpuid
].prof_multiplier
= 1;
100 wait_boot_cpu_to_stop(int cpuid
)
102 unsigned long stop
= jiffies
+ 10*HZ
;
104 while (time_before(jiffies
, stop
)) {
105 if (!smp_secondary_alive
)
110 printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid
);
116 * Where secondaries begin a life of C.
121 int cpuid
= hard_smp_processor_id();
123 if (cpu_online(cpuid
)) {
124 printk("??, cpu 0x%x already present??\n", cpuid
);
127 set_cpu_online(cpuid
, true);
129 /* Turn on machine checks. */
132 /* Set trap vectors. */
135 /* Set interrupt vector. */
138 /* Get our local ticker going. */
139 smp_setup_percpu_timer(cpuid
);
142 /* Call platform-specific callin, if specified */
143 if (alpha_mv
.smp_callin
)
144 alpha_mv
.smp_callin();
146 /* All kernel threads share the same mm context. */
147 atomic_inc(&init_mm
.mm_count
);
148 current
->active_mm
= &init_mm
;
150 /* inform the notifiers about the new cpu */
151 notify_cpu_starting(cpuid
);
153 /* Must have completely accurate bogos. */
156 /* Wait boot CPU to stop with irq enabled before running
158 wait_boot_cpu_to_stop(cpuid
);
162 smp_store_cpu_info(cpuid
);
163 /* Allow master to continue only after we written loops_per_jiffy. */
165 smp_secondary_alive
= 1;
167 DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
168 cpuid
, current
, current
->active_mm
));
171 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE
);
174 /* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
176 wait_for_txrdy (unsigned long cpumask
)
178 unsigned long timeout
;
180 if (!(hwrpb
->txrdy
& cpumask
))
183 timeout
= jiffies
+ 10*HZ
;
184 while (time_before(jiffies
, timeout
)) {
185 if (!(hwrpb
->txrdy
& cpumask
))
195 * Send a message to a secondary's console. "START" is one such
196 * interesting message. ;-)
199 send_secondary_console_msg(char *str
, int cpuid
)
201 struct percpu_struct
*cpu
;
202 register char *cp1
, *cp2
;
203 unsigned long cpumask
;
206 cpu
= (struct percpu_struct
*)
208 + hwrpb
->processor_offset
209 + cpuid
* hwrpb
->processor_size
);
211 cpumask
= (1UL << cpuid
);
212 if (wait_for_txrdy(cpumask
))
217 *(unsigned int *)&cpu
->ipc_buffer
[0] = len
;
218 cp1
= (char *) &cpu
->ipc_buffer
[1];
219 memcpy(cp1
, cp2
, len
);
221 /* atomic test and set */
223 set_bit(cpuid
, &hwrpb
->rxrdy
);
225 if (wait_for_txrdy(cpumask
))
230 printk("Processor %x not ready\n", cpuid
);
234 * A secondary console wants to send a message. Receive it.
237 recv_secondary_console_msg(void)
240 unsigned long txrdy
= hwrpb
->txrdy
;
241 char *cp1
, *cp2
, buf
[80];
242 struct percpu_struct
*cpu
;
244 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy
));
246 mycpu
= hard_smp_processor_id();
248 for (i
= 0; i
< NR_CPUS
; i
++) {
249 if (!(txrdy
& (1UL << i
)))
252 DBGS(("recv_secondary_console_msg: "
253 "TXRDY contains CPU %d.\n", i
));
255 cpu
= (struct percpu_struct
*)
257 + hwrpb
->processor_offset
258 + i
* hwrpb
->processor_size
);
260 DBGS(("recv_secondary_console_msg: on %d from %d"
261 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
262 mycpu
, i
, cpu
->halt_reason
, cpu
->flags
));
264 cnt
= cpu
->ipc_buffer
[0] >> 32;
265 if (cnt
<= 0 || cnt
>= 80)
266 strcpy(buf
, "<<< BOGUS MSG >>>");
268 cp1
= (char *) &cpu
->ipc_buffer
[1];
270 memcpy(cp2
, cp1
, cnt
);
273 while ((cp2
= strchr(cp2
, '\r')) != 0) {
280 DBGS((KERN_INFO
"recv_secondary_console_msg: on %d "
281 "message is '%s'\n", mycpu
, buf
));
288 * Convince the console to have a secondary cpu begin execution.
291 secondary_cpu_start(int cpuid
, struct task_struct
*idle
)
293 struct percpu_struct
*cpu
;
294 struct pcb_struct
*hwpcb
, *ipcb
;
295 unsigned long timeout
;
297 cpu
= (struct percpu_struct
*)
299 + hwrpb
->processor_offset
300 + cpuid
* hwrpb
->processor_size
);
301 hwpcb
= (struct pcb_struct
*) cpu
->hwpcb
;
302 ipcb
= &task_thread_info(idle
)->pcb
;
304 /* Initialize the CPU's HWPCB to something just good enough for
305 us to get started. Immediately after starting, we'll swpctx
306 to the target idle task's pcb. Reuse the stack in the mean
307 time. Precalculate the target PCBB. */
308 hwpcb
->ksp
= (unsigned long)ipcb
+ sizeof(union thread_union
) - 16;
310 hwpcb
->ptbr
= ipcb
->ptbr
;
313 hwpcb
->unique
= virt_to_phys(ipcb
);
314 hwpcb
->flags
= ipcb
->flags
;
315 hwpcb
->res1
= hwpcb
->res2
= 0;
318 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
319 hwpcb
->ksp
, hwpcb
->ptbr
, hwrpb
->vptb
, hwpcb
->unique
));
321 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
322 cpuid
, idle
->state
, ipcb
->flags
));
324 /* Setup HWRPB fields that SRM uses to activate secondary CPU */
325 hwrpb
->CPU_restart
= __smp_callin
;
326 hwrpb
->CPU_restart_data
= (unsigned long) __smp_callin
;
328 /* Recalculate and update the HWRPB checksum */
329 hwrpb_update_checksum(hwrpb
);
332 * Send a "start" command to the specified processor.
335 /* SRM III 3.4.1.3 */
336 cpu
->flags
|= 0x22; /* turn on Context Valid and Restart Capable */
337 cpu
->flags
&= ~1; /* turn off Bootstrap In Progress */
340 send_secondary_console_msg("START\r\n", cpuid
);
342 /* Wait 10 seconds for an ACK from the console. */
343 timeout
= jiffies
+ 10*HZ
;
344 while (time_before(jiffies
, timeout
)) {
350 printk(KERN_ERR
"SMP: Processor %d failed to start.\n", cpuid
);
354 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid
));
359 * Bring one cpu online.
362 smp_boot_one_cpu(int cpuid
, struct task_struct
*idle
)
364 unsigned long timeout
;
366 /* Signal the secondary to wait a moment. */
367 smp_secondary_alive
= -1;
369 /* Whirrr, whirrr, whirrrrrrrrr... */
370 if (secondary_cpu_start(cpuid
, idle
))
373 /* Notify the secondary CPU it can run calibrate_delay. */
375 smp_secondary_alive
= 0;
377 /* We've been acked by the console; wait one second for
378 the task to start up for real. */
379 timeout
= jiffies
+ 1*HZ
;
380 while (time_before(jiffies
, timeout
)) {
381 if (smp_secondary_alive
== 1)
387 /* We failed to boot the CPU. */
389 printk(KERN_ERR
"SMP: Processor %d is stuck.\n", cpuid
);
393 /* Another "Red Snapper". */
398 * Called from setup_arch. Detect an SMP system and which processors
404 struct percpu_struct
*cpubase
, *cpu
;
407 if (boot_cpuid
!= 0) {
408 printk(KERN_WARNING
"SMP: Booting off cpu %d instead of 0?\n",
412 if (hwrpb
->nr_processors
> 1) {
415 DBGS(("setup_smp: nr_processors %ld\n",
416 hwrpb
->nr_processors
));
418 cpubase
= (struct percpu_struct
*)
419 ((char*)hwrpb
+ hwrpb
->processor_offset
);
420 boot_cpu_palrev
= cpubase
->pal_revision
;
422 for (i
= 0; i
< hwrpb
->nr_processors
; i
++) {
423 cpu
= (struct percpu_struct
*)
424 ((char *)cpubase
+ i
*hwrpb
->processor_size
);
425 if ((cpu
->flags
& 0x1cc) == 0x1cc) {
427 set_cpu_possible(i
, true);
428 set_cpu_present(i
, true);
429 cpu
->pal_revision
= boot_cpu_palrev
;
432 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
433 i
, cpu
->flags
, cpu
->type
));
434 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
435 i
, cpu
->pal_revision
));
441 printk(KERN_INFO
"SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
442 smp_num_probed
, cpumask_bits(cpu_present_mask
)[0]);
446 * Called by smp_init prepare the secondaries
449 smp_prepare_cpus(unsigned int max_cpus
)
451 /* Take care of some initial bookkeeping. */
452 memset(ipi_data
, 0, sizeof(ipi_data
));
454 current_thread_info()->cpu
= boot_cpuid
;
456 smp_store_cpu_info(boot_cpuid
);
457 smp_setup_percpu_timer(boot_cpuid
);
459 /* Nothing to do on a UP box, or when told not to. */
460 if (smp_num_probed
== 1 || max_cpus
== 0) {
461 init_cpu_possible(cpumask_of(boot_cpuid
));
462 init_cpu_present(cpumask_of(boot_cpuid
));
463 printk(KERN_INFO
"SMP mode deactivated.\n");
467 printk(KERN_INFO
"SMP starting up secondaries.\n");
469 smp_num_cpus
= smp_num_probed
;
473 smp_prepare_boot_cpu(void)
478 __cpu_up(unsigned int cpu
, struct task_struct
*tidle
)
480 smp_boot_one_cpu(cpu
, tidle
);
482 return cpu_online(cpu
) ? 0 : -ENOSYS
;
486 smp_cpus_done(unsigned int max_cpus
)
489 unsigned long bogosum
= 0;
491 for(cpu
= 0; cpu
< NR_CPUS
; cpu
++)
493 bogosum
+= cpu_data
[cpu
].loops_per_jiffy
;
495 printk(KERN_INFO
"SMP: Total of %d processors activated "
496 "(%lu.%02lu BogoMIPS).\n",
498 (bogosum
+ 2500) / (500000/HZ
),
499 ((bogosum
+ 2500) / (5000/HZ
)) % 100);
503 setup_profiling_timer(unsigned int multiplier
)
509 send_ipi_message(const struct cpumask
*to_whom
, enum ipi_message_type operation
)
514 for_each_cpu(i
, to_whom
)
515 set_bit(operation
, &ipi_data
[i
].bits
);
518 for_each_cpu(i
, to_whom
)
523 handle_ipi(struct pt_regs
*regs
)
525 int this_cpu
= smp_processor_id();
526 unsigned long *pending_ipis
= &ipi_data
[this_cpu
].bits
;
530 DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
531 this_cpu
, *pending_ipis
, regs
->pc
));
534 mb(); /* Order interrupt and bit testing. */
535 while ((ops
= xchg(pending_ipis
, 0)) != 0) {
536 mb(); /* Order bit clearing and data access. */
542 which
= __ffs(which
);
550 generic_smp_call_function_interrupt();
557 printk(KERN_CRIT
"Unknown IPI on CPU %d: %lu\n",
563 mb(); /* Order data access and bit testing. */
566 cpu_data
[this_cpu
].ipi_count
++;
569 recv_secondary_console_msg();
573 smp_send_reschedule(int cpu
)
576 if (cpu
== hard_smp_processor_id())
578 "smp_send_reschedule: Sending IPI to self.\n");
580 send_ipi_message(cpumask_of(cpu
), IPI_RESCHEDULE
);
587 cpumask_copy(&to_whom
, cpu_possible_mask
);
588 cpumask_clear_cpu(smp_processor_id(), &to_whom
);
590 if (hard_smp_processor_id() != boot_cpu_id
)
591 printk(KERN_WARNING
"smp_send_stop: Not on boot cpu.\n");
593 send_ipi_message(&to_whom
, IPI_CPU_STOP
);
596 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
598 send_ipi_message(mask
, IPI_CALL_FUNC
);
601 void arch_send_call_function_single_ipi(int cpu
)
603 send_ipi_message(cpumask_of(cpu
), IPI_CALL_FUNC
);
607 ipi_imb(void *ignored
)
615 /* Must wait other processors to flush their icache before continue. */
616 if (on_each_cpu(ipi_imb
, NULL
, 1))
617 printk(KERN_CRIT
"smp_imb: timed out\n");
619 EXPORT_SYMBOL(smp_imb
);
622 ipi_flush_tlb_all(void *ignored
)
630 /* Although we don't have any data to pass, we do want to
631 synchronize with the other processors. */
632 if (on_each_cpu(ipi_flush_tlb_all
, NULL
, 1)) {
633 printk(KERN_CRIT
"flush_tlb_all: timed out\n");
637 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
640 ipi_flush_tlb_mm(void *x
)
642 struct mm_struct
*mm
= (struct mm_struct
*) x
;
643 if (mm
== current
->active_mm
&& !asn_locked())
644 flush_tlb_current(mm
);
650 flush_tlb_mm(struct mm_struct
*mm
)
654 if (mm
== current
->active_mm
) {
655 flush_tlb_current(mm
);
656 if (atomic_read(&mm
->mm_users
) <= 1) {
657 int cpu
, this_cpu
= smp_processor_id();
658 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
659 if (!cpu_online(cpu
) || cpu
== this_cpu
)
661 if (mm
->context
[cpu
])
662 mm
->context
[cpu
] = 0;
669 if (smp_call_function(ipi_flush_tlb_mm
, mm
, 1)) {
670 printk(KERN_CRIT
"flush_tlb_mm: timed out\n");
675 EXPORT_SYMBOL(flush_tlb_mm
);
677 struct flush_tlb_page_struct
{
678 struct vm_area_struct
*vma
;
679 struct mm_struct
*mm
;
684 ipi_flush_tlb_page(void *x
)
686 struct flush_tlb_page_struct
*data
= (struct flush_tlb_page_struct
*)x
;
687 struct mm_struct
* mm
= data
->mm
;
689 if (mm
== current
->active_mm
&& !asn_locked())
690 flush_tlb_current_page(mm
, data
->vma
, data
->addr
);
696 flush_tlb_page(struct vm_area_struct
*vma
, unsigned long addr
)
698 struct flush_tlb_page_struct data
;
699 struct mm_struct
*mm
= vma
->vm_mm
;
703 if (mm
== current
->active_mm
) {
704 flush_tlb_current_page(mm
, vma
, addr
);
705 if (atomic_read(&mm
->mm_users
) <= 1) {
706 int cpu
, this_cpu
= smp_processor_id();
707 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
708 if (!cpu_online(cpu
) || cpu
== this_cpu
)
710 if (mm
->context
[cpu
])
711 mm
->context
[cpu
] = 0;
722 if (smp_call_function(ipi_flush_tlb_page
, &data
, 1)) {
723 printk(KERN_CRIT
"flush_tlb_page: timed out\n");
728 EXPORT_SYMBOL(flush_tlb_page
);
731 flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
733 /* On the Alpha we always flush the whole user tlb. */
734 flush_tlb_mm(vma
->vm_mm
);
736 EXPORT_SYMBOL(flush_tlb_range
);
739 ipi_flush_icache_page(void *x
)
741 struct mm_struct
*mm
= (struct mm_struct
*) x
;
742 if (mm
== current
->active_mm
&& !asn_locked())
743 __load_new_mm_context(mm
);
749 flush_icache_user_range(struct vm_area_struct
*vma
, struct page
*page
,
750 unsigned long addr
, int len
)
752 struct mm_struct
*mm
= vma
->vm_mm
;
754 if ((vma
->vm_flags
& VM_EXEC
) == 0)
759 if (mm
== current
->active_mm
) {
760 __load_new_mm_context(mm
);
761 if (atomic_read(&mm
->mm_users
) <= 1) {
762 int cpu
, this_cpu
= smp_processor_id();
763 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
764 if (!cpu_online(cpu
) || cpu
== this_cpu
)
766 if (mm
->context
[cpu
])
767 mm
->context
[cpu
] = 0;
774 if (smp_call_function(ipi_flush_icache_page
, mm
, 1)) {
775 printk(KERN_CRIT
"flush_icache_page: timed out\n");