1 /* smp.c: Sparc64 SMP support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
10 #include <linux/pagemap.h>
11 #include <linux/threads.h>
12 #include <linux/smp.h>
13 #include <linux/smp_lock.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/spinlock.h>
20 #include <linux/seq_file.h>
21 #include <linux/cache.h>
22 #include <linux/jiffies.h>
23 #include <linux/profile.h>
24 #include <linux/bootmem.h>
27 #include <asm/ptrace.h>
28 #include <asm/atomic.h>
29 #include <asm/tlbflush.h>
30 #include <asm/mmu_context.h>
31 #include <asm/cpudata.h>
35 #include <asm/pgtable.h>
36 #include <asm/oplib.h>
37 #include <asm/uaccess.h>
38 #include <asm/timer.h>
39 #include <asm/starfire.h>
42 extern int linux_num_cpus
;
43 extern void calibrate_delay(void);
45 /* Please don't make this stuff initdata!!! --DaveM */
46 static unsigned char boot_cpu_id
;
48 cpumask_t cpu_online_map
= CPU_MASK_NONE
;
49 cpumask_t phys_cpu_present_map
= CPU_MASK_NONE
;
50 static cpumask_t smp_commenced_mask
;
51 static cpumask_t cpu_callout_map
;
53 void smp_info(struct seq_file
*m
)
57 seq_printf(m
, "State:\n");
58 for (i
= 0; i
< NR_CPUS
; i
++) {
61 "CPU%d:\t\tonline\n", i
);
65 void smp_bogo(struct seq_file
*m
)
69 for (i
= 0; i
< NR_CPUS
; i
++)
72 "Cpu%dBogo\t: %lu.%02lu\n"
73 "Cpu%dClkTck\t: %016lx\n",
74 i
, cpu_data(i
).udelay_val
/ (500000/HZ
),
75 (cpu_data(i
).udelay_val
/ (5000/HZ
)) % 100,
76 i
, cpu_data(i
).clock_tick
);
79 void __init
smp_store_cpu_info(int id
)
83 /* multiplier and counter set by
84 smp_setup_percpu_timer() */
85 cpu_data(id
).udelay_val
= loops_per_jiffy
;
87 cpu_find_by_mid(id
, &cpu_node
);
88 cpu_data(id
).clock_tick
= prom_getintdefault(cpu_node
,
89 "clock-frequency", 0);
91 cpu_data(id
).pgcache_size
= 0;
92 cpu_data(id
).pte_cache
[0] = NULL
;
93 cpu_data(id
).pte_cache
[1] = NULL
;
94 cpu_data(id
).pgd_cache
= NULL
;
95 cpu_data(id
).idle_volume
= 1;
98 static void smp_setup_percpu_timer(void);
100 static volatile unsigned long callin_flag
= 0;
102 extern void inherit_locked_prom_mappings(int save_p
);
104 static inline void cpu_setup_percpu_base(unsigned long cpu_id
)
106 __asm__
__volatile__("mov %0, %%g5\n\t"
107 "stxa %0, [%1] %2\n\t"
110 : "r" (__per_cpu_offset(cpu_id
)),
111 "r" (TSB_REG
), "i" (ASI_IMMU
));
114 void __init
smp_callin(void)
116 int cpuid
= hard_smp_processor_id();
118 inherit_locked_prom_mappings(0);
122 cpu_setup_percpu_base(cpuid
);
124 smp_setup_percpu_timer();
129 smp_store_cpu_info(cpuid
);
131 __asm__
__volatile__("membar #Sync\n\t"
132 "flush %%g6" : : : "memory");
134 /* Clear this or we will die instantly when we
135 * schedule back to this idler...
137 clear_thread_flag(TIF_NEWCHILD
);
139 /* Attach to the address space of init_task. */
140 atomic_inc(&init_mm
.mm_count
);
141 current
->active_mm
= &init_mm
;
143 while (!cpu_isset(cpuid
, smp_commenced_mask
))
146 cpu_set(cpuid
, cpu_online_map
);
151 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
152 panic("SMP bolixed\n");
155 static unsigned long current_tick_offset
;
157 /* This tick register synchronization scheme is taken entirely from
158 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
160 * The only change I've made is to rework it so that the master
161 * initiates the synchonization instead of the slave. -DaveM
165 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
167 #define NUM_ROUNDS 64 /* magic value */
168 #define NUM_ITERS 5 /* likewise */
170 static DEFINE_SPINLOCK(itc_sync_lock
);
171 static unsigned long go
[SLAVE
+ 1];
173 #define DEBUG_TICK_SYNC 0
175 static inline long get_delta (long *rt
, long *master
)
177 unsigned long best_t0
= 0, best_t1
= ~0UL, best_tm
= 0;
178 unsigned long tcenter
, t0
, t1
, tm
;
181 for (i
= 0; i
< NUM_ITERS
; i
++) {
182 t0
= tick_ops
->get_tick();
184 membar("#StoreLoad");
185 while (!(tm
= go
[SLAVE
]))
188 membar("#StoreStore");
189 t1
= tick_ops
->get_tick();
191 if (t1
- t0
< best_t1
- best_t0
)
192 best_t0
= t0
, best_t1
= t1
, best_tm
= tm
;
195 *rt
= best_t1
- best_t0
;
196 *master
= best_tm
- best_t0
;
198 /* average best_t0 and best_t1 without overflow: */
199 tcenter
= (best_t0
/2 + best_t1
/2);
200 if (best_t0
% 2 + best_t1
% 2 == 2)
202 return tcenter
- best_tm
;
205 void smp_synchronize_tick_client(void)
207 long i
, delta
, adj
, adjust_latency
= 0, done
= 0;
208 unsigned long flags
, rt
, master_time_stamp
, bound
;
211 long rt
; /* roundtrip time */
212 long master
; /* master's timestamp */
213 long diff
; /* difference between midpoint and master's timestamp */
214 long lat
; /* estimate of itc adjustment latency */
223 local_irq_save(flags
);
225 for (i
= 0; i
< NUM_ROUNDS
; i
++) {
226 delta
= get_delta(&rt
, &master_time_stamp
);
228 done
= 1; /* let's lock on to this... */
234 adjust_latency
+= -delta
;
235 adj
= -delta
+ adjust_latency
/4;
239 tick_ops
->add_tick(adj
, current_tick_offset
);
243 t
[i
].master
= master_time_stamp
;
245 t
[i
].lat
= adjust_latency
/4;
249 local_irq_restore(flags
);
252 for (i
= 0; i
< NUM_ROUNDS
; i
++)
253 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
254 t
[i
].rt
, t
[i
].master
, t
[i
].diff
, t
[i
].lat
);
257 printk(KERN_INFO
"CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
258 "maxerr %lu cycles)\n", smp_processor_id(), delta
, rt
);
261 static void smp_start_sync_tick_client(int cpu
);
263 static void smp_synchronize_one_tick(int cpu
)
265 unsigned long flags
, i
;
269 smp_start_sync_tick_client(cpu
);
271 /* wait for client to be ready */
275 /* now let the client proceed into his loop */
277 membar("#StoreLoad");
279 spin_lock_irqsave(&itc_sync_lock
, flags
);
281 for (i
= 0; i
< NUM_ROUNDS
*NUM_ITERS
; i
++) {
285 membar("#StoreStore");
286 go
[SLAVE
] = tick_ops
->get_tick();
287 membar("#StoreLoad");
290 spin_unlock_irqrestore(&itc_sync_lock
, flags
);
293 extern unsigned long sparc64_cpu_startup
;
295 /* The OBP cpu startup callback truncates the 3rd arg cookie to
296 * 32-bits (I think) so to be safe we have it read the pointer
297 * contained here so we work on >4GB machines. -DaveM
299 static struct thread_info
*cpu_new_thread
= NULL
;
301 static int __devinit
smp_boot_one_cpu(unsigned int cpu
)
303 unsigned long entry
=
304 (unsigned long)(&sparc64_cpu_startup
);
305 unsigned long cookie
=
306 (unsigned long)(&cpu_new_thread
);
307 struct task_struct
*p
;
308 int timeout
, ret
, cpu_node
;
312 cpu_new_thread
= p
->thread_info
;
313 cpu_set(cpu
, cpu_callout_map
);
315 cpu_find_by_mid(cpu
, &cpu_node
);
316 prom_startcpu(cpu_node
, entry
, cookie
);
318 for (timeout
= 0; timeout
< 5000000; timeout
++) {
326 printk("Processor %d is stuck.\n", cpu
);
327 cpu_clear(cpu
, cpu_callout_map
);
330 cpu_new_thread
= NULL
;
335 static void spitfire_xcall_helper(u64 data0
, u64 data1
, u64 data2
, u64 pstate
, unsigned long cpu
)
340 if (this_is_starfire
) {
341 /* map to real upaid */
342 cpu
= (((cpu
& 0x3c) << 1) |
343 ((cpu
& 0x40) >> 4) |
347 target
= (cpu
<< 14) | 0x70;
349 /* Ok, this is the real Spitfire Errata #54.
350 * One must read back from a UDB internal register
351 * after writes to the UDB interrupt dispatch, but
352 * before the membar Sync for that write.
353 * So we use the high UDB control register (ASI 0x7f,
354 * ADDR 0x20) for the dummy read. -DaveM
357 __asm__
__volatile__(
358 "wrpr %1, %2, %%pstate\n\t"
359 "stxa %4, [%0] %3\n\t"
360 "stxa %5, [%0+%8] %3\n\t"
362 "stxa %6, [%0+%8] %3\n\t"
364 "stxa %%g0, [%7] %3\n\t"
367 "ldxa [%%g1] 0x7f, %%g0\n\t"
370 : "r" (pstate
), "i" (PSTATE_IE
), "i" (ASI_INTR_W
),
371 "r" (data0
), "r" (data1
), "r" (data2
), "r" (target
),
372 "r" (0x10), "0" (tmp
)
375 /* NOTE: PSTATE_IE is still clear. */
378 __asm__
__volatile__("ldxa [%%g0] %1, %0"
380 : "i" (ASI_INTR_DISPATCH_STAT
));
382 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
389 } while (result
& 0x1);
390 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
393 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
394 smp_processor_id(), result
);
401 static __inline__
void spitfire_xcall_deliver(u64 data0
, u64 data1
, u64 data2
, cpumask_t mask
)
406 __asm__
__volatile__("rdpr %%pstate, %0" : "=r" (pstate
));
407 for_each_cpu_mask(i
, mask
)
408 spitfire_xcall_helper(data0
, data1
, data2
, pstate
, i
);
411 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
412 * packet, but we have no use for that. However we do take advantage of
413 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
415 static void cheetah_xcall_deliver(u64 data0
, u64 data1
, u64 data2
, cpumask_t mask
)
418 int nack_busy_id
, is_jalapeno
;
420 if (cpus_empty(mask
))
423 /* Unfortunately, someone at Sun had the brilliant idea to make the
424 * busy/nack fields hard-coded by ITID number for this Ultra-III
425 * derivative processor.
427 __asm__ ("rdpr %%ver, %0" : "=r" (ver
));
428 is_jalapeno
= ((ver
>> 32) == 0x003e0016);
430 __asm__
__volatile__("rdpr %%pstate, %0" : "=r" (pstate
));
433 __asm__
__volatile__("wrpr %0, %1, %%pstate\n\t"
434 : : "r" (pstate
), "i" (PSTATE_IE
));
436 /* Setup the dispatch data registers. */
437 __asm__
__volatile__("stxa %0, [%3] %6\n\t"
438 "stxa %1, [%4] %6\n\t"
439 "stxa %2, [%5] %6\n\t"
442 : "r" (data0
), "r" (data1
), "r" (data2
),
443 "r" (0x40), "r" (0x50), "r" (0x60),
450 for_each_cpu_mask(i
, mask
) {
451 u64 target
= (i
<< 14) | 0x70;
454 target
|= (nack_busy_id
<< 24);
455 __asm__
__volatile__(
456 "stxa %%g0, [%0] %1\n\t"
459 : "r" (target
), "i" (ASI_INTR_W
));
464 /* Now, poll for completion. */
469 stuck
= 100000 * nack_busy_id
;
471 __asm__
__volatile__("ldxa [%%g0] %1, %0"
472 : "=r" (dispatch_stat
)
473 : "i" (ASI_INTR_DISPATCH_STAT
));
474 if (dispatch_stat
== 0UL) {
475 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
481 } while (dispatch_stat
& 0x5555555555555555UL
);
483 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
486 if ((dispatch_stat
& ~(0x5555555555555555UL
)) == 0) {
487 /* Busy bits will not clear, continue instead
488 * of freezing up on this cpu.
490 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
491 smp_processor_id(), dispatch_stat
);
493 int i
, this_busy_nack
= 0;
495 /* Delay some random time with interrupts enabled
496 * to prevent deadlock.
498 udelay(2 * nack_busy_id
);
500 /* Clear out the mask bits for cpus which did not
503 for_each_cpu_mask(i
, mask
) {
507 check_mask
= (0x2UL
<< (2*i
));
509 check_mask
= (0x2UL
<<
511 if ((dispatch_stat
& check_mask
) == 0)
521 /* Send cross call to all processors mentioned in MASK
524 static void smp_cross_call_masked(unsigned long *func
, u32 ctx
, u64 data1
, u64 data2
, cpumask_t mask
)
526 u64 data0
= (((u64
)ctx
)<<32 | (((u64
)func
) & 0xffffffff));
527 int this_cpu
= get_cpu();
529 cpus_and(mask
, mask
, cpu_online_map
);
530 cpu_clear(this_cpu
, mask
);
532 if (tlb_type
== spitfire
)
533 spitfire_xcall_deliver(data0
, data1
, data2
, mask
);
535 cheetah_xcall_deliver(data0
, data1
, data2
, mask
);
536 /* NOTE: Caller runs local copy on master. */
541 extern unsigned long xcall_sync_tick
;
543 static void smp_start_sync_tick_client(int cpu
)
545 cpumask_t mask
= cpumask_of_cpu(cpu
);
547 smp_cross_call_masked(&xcall_sync_tick
,
551 /* Send cross call to all processors except self. */
552 #define smp_cross_call(func, ctx, data1, data2) \
553 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
555 struct call_data_struct
{
556 void (*func
) (void *info
);
562 static DEFINE_SPINLOCK(call_lock
);
563 static struct call_data_struct
*call_data
;
565 extern unsigned long xcall_call_function
;
568 * You must not call this function with disabled interrupts or from a
569 * hardware interrupt handler or from a bottom half handler.
571 int smp_call_function(void (*func
)(void *info
), void *info
,
572 int nonatomic
, int wait
)
574 struct call_data_struct data
;
575 int cpus
= num_online_cpus() - 1;
581 /* Can deadlock when called with interrupts disabled */
582 WARN_ON(irqs_disabled());
586 atomic_set(&data
.finished
, 0);
589 spin_lock(&call_lock
);
593 smp_cross_call(&xcall_call_function
, 0, 0, 0);
596 * Wait for other cpus to complete function or at
597 * least snap the call data.
600 while (atomic_read(&data
.finished
) != cpus
) {
607 spin_unlock(&call_lock
);
612 spin_unlock(&call_lock
);
613 printk("XCALL: Remote cpus not responding, ncpus=%ld finished=%ld\n",
614 (long) num_online_cpus() - 1L,
615 (long) atomic_read(&data
.finished
));
619 void smp_call_function_client(int irq
, struct pt_regs
*regs
)
621 void (*func
) (void *info
) = call_data
->func
;
622 void *info
= call_data
->info
;
624 clear_softint(1 << irq
);
625 if (call_data
->wait
) {
626 /* let initiator proceed only after completion */
628 atomic_inc(&call_data
->finished
);
630 /* let initiator proceed after getting data */
631 atomic_inc(&call_data
->finished
);
636 extern unsigned long xcall_flush_tlb_mm
;
637 extern unsigned long xcall_flush_tlb_pending
;
638 extern unsigned long xcall_flush_tlb_kernel_range
;
639 extern unsigned long xcall_flush_tlb_all_spitfire
;
640 extern unsigned long xcall_flush_tlb_all_cheetah
;
641 extern unsigned long xcall_report_regs
;
642 extern unsigned long xcall_receive_signal
;
644 #ifdef DCACHE_ALIASING_POSSIBLE
645 extern unsigned long xcall_flush_dcache_page_cheetah
;
647 extern unsigned long xcall_flush_dcache_page_spitfire
;
649 #ifdef CONFIG_DEBUG_DCFLUSH
650 extern atomic_t dcpage_flushes
;
651 extern atomic_t dcpage_flushes_xcall
;
654 static __inline__
void __local_flush_dcache_page(struct page
*page
)
656 #ifdef DCACHE_ALIASING_POSSIBLE
657 __flush_dcache_page(page_address(page
),
658 ((tlb_type
== spitfire
) &&
659 page_mapping(page
) != NULL
));
661 if (page_mapping(page
) != NULL
&&
662 tlb_type
== spitfire
)
663 __flush_icache_page(__pa(page_address(page
)));
667 void smp_flush_dcache_page_impl(struct page
*page
, int cpu
)
669 cpumask_t mask
= cpumask_of_cpu(cpu
);
670 int this_cpu
= get_cpu();
672 #ifdef CONFIG_DEBUG_DCFLUSH
673 atomic_inc(&dcpage_flushes
);
675 if (cpu
== this_cpu
) {
676 __local_flush_dcache_page(page
);
677 } else if (cpu_online(cpu
)) {
678 void *pg_addr
= page_address(page
);
681 if (tlb_type
== spitfire
) {
683 ((u64
)&xcall_flush_dcache_page_spitfire
);
684 if (page_mapping(page
) != NULL
)
685 data0
|= ((u64
)1 << 32);
686 spitfire_xcall_deliver(data0
,
691 #ifdef DCACHE_ALIASING_POSSIBLE
693 ((u64
)&xcall_flush_dcache_page_cheetah
);
694 cheetah_xcall_deliver(data0
,
699 #ifdef CONFIG_DEBUG_DCFLUSH
700 atomic_inc(&dcpage_flushes_xcall
);
707 void flush_dcache_page_all(struct mm_struct
*mm
, struct page
*page
)
709 void *pg_addr
= page_address(page
);
710 cpumask_t mask
= cpu_online_map
;
712 int this_cpu
= get_cpu();
714 cpu_clear(this_cpu
, mask
);
716 #ifdef CONFIG_DEBUG_DCFLUSH
717 atomic_inc(&dcpage_flushes
);
719 if (cpus_empty(mask
))
721 if (tlb_type
== spitfire
) {
722 data0
= ((u64
)&xcall_flush_dcache_page_spitfire
);
723 if (page_mapping(page
) != NULL
)
724 data0
|= ((u64
)1 << 32);
725 spitfire_xcall_deliver(data0
,
730 #ifdef DCACHE_ALIASING_POSSIBLE
731 data0
= ((u64
)&xcall_flush_dcache_page_cheetah
);
732 cheetah_xcall_deliver(data0
,
737 #ifdef CONFIG_DEBUG_DCFLUSH
738 atomic_inc(&dcpage_flushes_xcall
);
741 __local_flush_dcache_page(page
);
746 void smp_receive_signal(int cpu
)
748 cpumask_t mask
= cpumask_of_cpu(cpu
);
750 if (cpu_online(cpu
)) {
751 u64 data0
= (((u64
)&xcall_receive_signal
) & 0xffffffff);
753 if (tlb_type
== spitfire
)
754 spitfire_xcall_deliver(data0
, 0, 0, mask
);
756 cheetah_xcall_deliver(data0
, 0, 0, mask
);
760 void smp_receive_signal_client(int irq
, struct pt_regs
*regs
)
762 /* Just return, rtrap takes care of the rest. */
763 clear_softint(1 << irq
);
766 void smp_report_regs(void)
768 smp_cross_call(&xcall_report_regs
, 0, 0, 0);
771 void smp_flush_tlb_all(void)
773 if (tlb_type
== spitfire
)
774 smp_cross_call(&xcall_flush_tlb_all_spitfire
, 0, 0, 0);
776 smp_cross_call(&xcall_flush_tlb_all_cheetah
, 0, 0, 0);
780 /* We know that the window frames of the user have been flushed
781 * to the stack before we get here because all callers of us
782 * are flush_tlb_*() routines, and these run after flush_cache_*()
783 * which performs the flushw.
785 * The SMP TLB coherency scheme we use works as follows:
787 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
788 * space has (potentially) executed on, this is the heuristic
789 * we use to avoid doing cross calls.
791 * Also, for flushing from kswapd and also for clones, we
792 * use cpu_vm_mask as the list of cpus to make run the TLB.
794 * 2) TLB context numbers are shared globally across all processors
795 * in the system, this allows us to play several games to avoid
798 * One invariant is that when a cpu switches to a process, and
799 * that processes tsk->active_mm->cpu_vm_mask does not have the
800 * current cpu's bit set, that tlb context is flushed locally.
802 * If the address space is non-shared (ie. mm->count == 1) we avoid
803 * cross calls when we want to flush the currently running process's
804 * tlb state. This is done by clearing all cpu bits except the current
805 * processor's in current->active_mm->cpu_vm_mask and performing the
806 * flush locally only. This will force any subsequent cpus which run
807 * this task to flush the context from the local tlb if the process
808 * migrates to another cpu (again).
810 * 3) For shared address spaces (threads) and swapping we bite the
811 * bullet for most cases and perform the cross call (but only to
812 * the cpus listed in cpu_vm_mask).
814 * The performance gain from "optimizing" away the cross call for threads is
815 * questionable (in theory the big win for threads is the massive sharing of
816 * address space state across processors).
818 void smp_flush_tlb_mm(struct mm_struct
*mm
)
821 * This code is called from two places, dup_mmap and exit_mmap. In the
822 * former case, we really need a flush. In the later case, the callers
823 * are single threaded exec_mmap (really need a flush), multithreaded
824 * exec_mmap case (do not need to flush, since the caller gets a new
825 * context via activate_mm), and all other callers of mmput() whence
826 * the flush can be optimized since the associated threads are dead and
827 * the mm is being torn down (__exit_mm and other mmput callers) or the
828 * owning thread is dissociating itself from the mm. The
829 * (atomic_read(&mm->mm_users) == 0) check ensures real work is done
830 * for single thread exec and dup_mmap cases. An alternate check might
831 * have been (current->mm != mm).
834 if (atomic_read(&mm
->mm_users
) == 0)
838 u32 ctx
= CTX_HWBITS(mm
->context
);
841 if (atomic_read(&mm
->mm_users
) == 1) {
842 mm
->cpu_vm_mask
= cpumask_of_cpu(cpu
);
843 goto local_flush_and_out
;
846 smp_cross_call_masked(&xcall_flush_tlb_mm
,
851 __flush_tlb_mm(ctx
, SECONDARY_CONTEXT
);
857 void smp_flush_tlb_pending(struct mm_struct
*mm
, unsigned long nr
, unsigned long *vaddrs
)
859 u32 ctx
= CTX_HWBITS(mm
->context
);
862 if (mm
== current
->active_mm
&& atomic_read(&mm
->mm_users
) == 1) {
863 mm
->cpu_vm_mask
= cpumask_of_cpu(cpu
);
864 goto local_flush_and_out
;
866 /* This optimization is not valid. Normally
867 * we will be holding the page_table_lock, but
868 * there is an exception which is copy_page_range()
869 * when forking. The lock is held during the individual
870 * page table updates in the parent, but not at the
871 * top level, which is where we are invoked.
874 cpumask_t this_cpu_mask
= cpumask_of_cpu(cpu
);
876 /* By virtue of running under the mm->page_table_lock,
877 * and mmu_context.h:switch_mm doing the same, the
878 * following operation is safe.
880 if (cpus_equal(mm
->cpu_vm_mask
, this_cpu_mask
))
881 goto local_flush_and_out
;
885 smp_cross_call_masked(&xcall_flush_tlb_pending
,
886 ctx
, nr
, (unsigned long) vaddrs
,
890 __flush_tlb_pending(ctx
, nr
, vaddrs
);
895 void smp_flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
898 end
= PAGE_ALIGN(end
);
900 smp_cross_call(&xcall_flush_tlb_kernel_range
,
903 __flush_tlb_kernel_range(start
, end
);
908 /* #define CAPTURE_DEBUG */
909 extern unsigned long xcall_capture
;
911 static atomic_t smp_capture_depth
= ATOMIC_INIT(0);
912 static atomic_t smp_capture_registry
= ATOMIC_INIT(0);
913 static unsigned long penguins_are_doing_time
;
915 void smp_capture(void)
917 int result
= atomic_add_ret(1, &smp_capture_depth
);
920 int ncpus
= num_online_cpus();
923 printk("CPU[%d]: Sending penguins to jail...",
926 penguins_are_doing_time
= 1;
927 membar("#StoreStore | #LoadStore");
928 atomic_inc(&smp_capture_registry
);
929 smp_cross_call(&xcall_capture
, 0, 0, 0);
930 while (atomic_read(&smp_capture_registry
) != ncpus
)
938 void smp_release(void)
940 if (atomic_dec_and_test(&smp_capture_depth
)) {
942 printk("CPU[%d]: Giving pardon to "
943 "imprisoned penguins\n",
946 penguins_are_doing_time
= 0;
947 membar("#StoreStore | #StoreLoad");
948 atomic_dec(&smp_capture_registry
);
952 /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
953 * can service tlb flush xcalls...
955 extern void prom_world(int);
956 extern void save_alternate_globals(unsigned long *);
957 extern void restore_alternate_globals(unsigned long *);
958 void smp_penguin_jailcell(int irq
, struct pt_regs
*regs
)
960 unsigned long global_save
[24];
962 clear_softint(1 << irq
);
966 __asm__
__volatile__("flushw");
967 save_alternate_globals(global_save
);
969 atomic_inc(&smp_capture_registry
);
970 membar("#StoreLoad | #StoreStore");
971 while (penguins_are_doing_time
)
973 restore_alternate_globals(global_save
);
974 atomic_dec(&smp_capture_registry
);
980 extern unsigned long xcall_promstop
;
982 void smp_promstop_others(void)
984 smp_cross_call(&xcall_promstop
, 0, 0, 0);
987 #define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
988 #define prof_counter(__cpu) cpu_data(__cpu).counter
990 void smp_percpu_timer_interrupt(struct pt_regs
*regs
)
992 unsigned long compare
, tick
, pstate
;
993 int cpu
= smp_processor_id();
994 int user
= user_mode(regs
);
997 * Check for level 14 softint.
1000 unsigned long tick_mask
= tick_ops
->softint_mask
;
1002 if (!(get_softint() & tick_mask
)) {
1003 extern void handler_irq(int, struct pt_regs
*);
1005 handler_irq(14, regs
);
1008 clear_softint(tick_mask
);
1012 profile_tick(CPU_PROFILING
, regs
);
1013 if (!--prof_counter(cpu
)) {
1016 if (cpu
== boot_cpu_id
) {
1017 kstat_this_cpu
.irqs
[0]++;
1018 timer_tick_interrupt(regs
);
1021 update_process_times(user
);
1025 prof_counter(cpu
) = prof_multiplier(cpu
);
1028 /* Guarantee that the following sequences execute
1031 __asm__
__volatile__("rdpr %%pstate, %0\n\t"
1032 "wrpr %0, %1, %%pstate"
1036 compare
= tick_ops
->add_compare(current_tick_offset
);
1037 tick
= tick_ops
->get_tick();
1039 /* Restore PSTATE_IE. */
1040 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
1043 } while (time_after_eq(tick
, compare
));
1046 static void __init
smp_setup_percpu_timer(void)
1048 int cpu
= smp_processor_id();
1049 unsigned long pstate
;
1051 prof_counter(cpu
) = prof_multiplier(cpu
) = 1;
1053 /* Guarantee that the following sequences execute
1056 __asm__
__volatile__("rdpr %%pstate, %0\n\t"
1057 "wrpr %0, %1, %%pstate"
1061 tick_ops
->init_tick(current_tick_offset
);
1063 /* Restore PSTATE_IE. */
1064 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
1069 void __init
smp_tick_init(void)
1071 boot_cpu_id
= hard_smp_processor_id();
1072 current_tick_offset
= timer_tick_offset
;
1074 cpu_set(boot_cpu_id
, cpu_online_map
);
1075 prof_counter(boot_cpu_id
) = prof_multiplier(boot_cpu_id
) = 1;
1078 /* /proc/profile writes can call this, don't __init it please. */
1079 static DEFINE_SPINLOCK(prof_setup_lock
);
1081 int setup_profiling_timer(unsigned int multiplier
)
1083 unsigned long flags
;
1086 if ((!multiplier
) || (timer_tick_offset
/ multiplier
) < 1000)
1089 spin_lock_irqsave(&prof_setup_lock
, flags
);
1090 for (i
= 0; i
< NR_CPUS
; i
++)
1091 prof_multiplier(i
) = multiplier
;
1092 current_tick_offset
= (timer_tick_offset
/ multiplier
);
1093 spin_unlock_irqrestore(&prof_setup_lock
, flags
);
1098 void __init
smp_prepare_cpus(unsigned int max_cpus
)
1103 while (!cpu_find_by_instance(instance
, NULL
, &mid
)) {
1105 cpu_set(mid
, phys_cpu_present_map
);
1109 if (num_possible_cpus() > max_cpus
) {
1111 while (!cpu_find_by_instance(instance
, NULL
, &mid
)) {
1112 if (mid
!= boot_cpu_id
) {
1113 cpu_clear(mid
, phys_cpu_present_map
);
1114 if (num_possible_cpus() <= max_cpus
)
1121 smp_store_cpu_info(boot_cpu_id
);
1124 void __devinit
smp_prepare_boot_cpu(void)
1126 if (hard_smp_processor_id() >= NR_CPUS
) {
1127 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
1131 current_thread_info()->cpu
= hard_smp_processor_id();
1133 cpu_set(smp_processor_id(), cpu_online_map
);
1134 cpu_set(smp_processor_id(), phys_cpu_present_map
);
1137 int __devinit
__cpu_up(unsigned int cpu
)
1139 int ret
= smp_boot_one_cpu(cpu
);
1142 cpu_set(cpu
, smp_commenced_mask
);
1143 while (!cpu_isset(cpu
, cpu_online_map
))
1145 if (!cpu_isset(cpu
, cpu_online_map
)) {
1148 smp_synchronize_one_tick(cpu
);
1154 void __init
smp_cpus_done(unsigned int max_cpus
)
1156 unsigned long bogosum
= 0;
1159 for (i
= 0; i
< NR_CPUS
; i
++) {
1161 bogosum
+= cpu_data(i
).udelay_val
;
1163 printk("Total of %ld processors activated "
1164 "(%lu.%02lu BogoMIPS).\n",
1165 (long) num_online_cpus(),
1166 bogosum
/(500000/HZ
),
1167 (bogosum
/(5000/HZ
))%100);
1170 /* This needn't do anything as we do not sleep the cpu
1171 * inside of the idler task, so an interrupt is not needed
1172 * to get a clean fast response.
1174 * XXX Reverify this assumption... -DaveM
1176 * Addendum: We do want it to do something for the signal
1177 * delivery case, we detect that by just seeing
1178 * if we are trying to send this to an idler or not.
1180 void smp_send_reschedule(int cpu
)
1182 if (cpu_data(cpu
).idle_volume
== 0)
1183 smp_receive_signal(cpu
);
1186 /* This is a nop because we capture all other cpus
1187 * anyways when making the PROM active.
1189 void smp_send_stop(void)
1193 unsigned long __per_cpu_base
;
1194 unsigned long __per_cpu_shift
;
1196 EXPORT_SYMBOL(__per_cpu_base
);
1197 EXPORT_SYMBOL(__per_cpu_shift
);
1199 void __init
setup_per_cpu_areas(void)
1201 unsigned long goal
, size
, i
;
1203 /* Created by linker magic */
1204 extern char __per_cpu_start
[], __per_cpu_end
[];
1206 /* Copy section for each CPU (we discard the original) */
1207 goal
= ALIGN(__per_cpu_end
- __per_cpu_start
, PAGE_SIZE
);
1209 #ifdef CONFIG_MODULES
1210 if (goal
< PERCPU_ENOUGH_ROOM
)
1211 goal
= PERCPU_ENOUGH_ROOM
;
1213 __per_cpu_shift
= 0;
1214 for (size
= 1UL; size
< goal
; size
<<= 1UL)
1217 /* Make sure the resulting __per_cpu_base value
1218 * will fit in the 43-bit sign extended IMMU
1221 ptr
= __alloc_bootmem(size
* NR_CPUS
, PAGE_SIZE
,
1222 (unsigned long) __per_cpu_start
);
1224 __per_cpu_base
= ptr
- __per_cpu_start
;
1226 if ((__per_cpu_shift
< PAGE_SHIFT
) ||
1227 (__per_cpu_base
& ~PAGE_MASK
) ||
1228 (__per_cpu_base
!= (((long) __per_cpu_base
<< 20) >> 20))) {
1229 prom_printf("PER_CPU: Invalid layout, "
1230 "ptr[%p] shift[%lx] base[%lx]\n",
1231 ptr
, __per_cpu_shift
, __per_cpu_base
);
1235 for (i
= 0; i
< NR_CPUS
; i
++, ptr
+= size
)
1236 memcpy(ptr
, __per_cpu_start
, __per_cpu_end
- __per_cpu_start
);
1238 /* Finally, load in the boot cpu's base value.
1239 * We abuse the IMMU TSB register for trap handler
1240 * entry and exit loading of %g5. That is why it
1241 * has to be page aligned.
1243 cpu_setup_percpu_base(hard_smp_processor_id());