1 /* smp.c: Sparc64 SMP support.
3 * Copyright (C) 1997, 2007 David S. Miller (davem@davemloft.net)
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
10 #include <linux/pagemap.h>
11 #include <linux/threads.h>
12 #include <linux/smp.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/delay.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
19 #include <linux/seq_file.h>
20 #include <linux/cache.h>
21 #include <linux/jiffies.h>
22 #include <linux/profile.h>
23 #include <linux/bootmem.h>
26 #include <asm/ptrace.h>
27 #include <asm/atomic.h>
28 #include <asm/tlbflush.h>
29 #include <asm/mmu_context.h>
30 #include <asm/cpudata.h>
31 #include <asm/hvtramp.h>
35 #include <asm/irq_regs.h>
37 #include <asm/pgtable.h>
38 #include <asm/oplib.h>
39 #include <asm/uaccess.h>
40 #include <asm/timer.h>
41 #include <asm/starfire.h>
43 #include <asm/sections.h>
45 #include <asm/mdesc.h>
47 #include <asm/hypervisor.h>
49 int sparc64_multi_core __read_mostly
;
51 cpumask_t cpu_possible_map __read_mostly
= CPU_MASK_NONE
;
52 cpumask_t cpu_online_map __read_mostly
= CPU_MASK_NONE
;
53 DEFINE_PER_CPU(cpumask_t
, cpu_sibling_map
) = CPU_MASK_NONE
;
54 cpumask_t cpu_core_map
[NR_CPUS
] __read_mostly
=
55 { [0 ... NR_CPUS
-1] = CPU_MASK_NONE
};
57 EXPORT_SYMBOL(cpu_possible_map
);
58 EXPORT_SYMBOL(cpu_online_map
);
59 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map
);
60 EXPORT_SYMBOL(cpu_core_map
);
62 static cpumask_t smp_commenced_mask
;
64 void smp_info(struct seq_file
*m
)
68 seq_printf(m
, "State:\n");
69 for_each_online_cpu(i
)
70 seq_printf(m
, "CPU%d:\t\tonline\n", i
);
73 void smp_bogo(struct seq_file
*m
)
77 for_each_online_cpu(i
)
79 "Cpu%dClkTck\t: %016lx\n",
80 i
, cpu_data(i
).clock_tick
);
83 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(call_lock
);
85 extern void setup_sparc64_timer(void);
87 static volatile unsigned long callin_flag
= 0;
89 <<<<<<< HEAD
:arch
/sparc64
/kernel
/smp
.c
90 void __devinit
smp_callin(void)
92 void __cpuinit
smp_callin(void)
93 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/sparc64
/kernel
/smp
.c
95 int cpuid
= hard_smp_processor_id();
97 __local_per_cpu_offset
= __per_cpu_offset(cpuid
);
99 if (tlb_type
== hypervisor
)
100 sun4v_ktsb_register();
104 setup_sparc64_timer();
106 if (cheetah_pcache_forced_on
)
107 cheetah_enable_pcache();
112 __asm__
__volatile__("membar #Sync\n\t"
113 "flush %%g6" : : : "memory");
115 /* Clear this or we will die instantly when we
116 * schedule back to this idler...
118 current_thread_info()->new_child
= 0;
120 /* Attach to the address space of init_task. */
121 atomic_inc(&init_mm
.mm_count
);
122 current
->active_mm
= &init_mm
;
124 while (!cpu_isset(cpuid
, smp_commenced_mask
))
127 spin_lock(&call_lock
);
128 cpu_set(cpuid
, cpu_online_map
);
129 spin_unlock(&call_lock
);
131 /* idle thread is expected to have preempt disabled */
137 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
138 panic("SMP bolixed\n");
141 /* This tick register synchronization scheme is taken entirely from
142 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
144 * The only change I've made is to rework it so that the master
145 * initiates the synchonization instead of the slave. -DaveM
149 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
151 #define NUM_ROUNDS 64 /* magic value */
152 #define NUM_ITERS 5 /* likewise */
154 static DEFINE_SPINLOCK(itc_sync_lock
);
155 static unsigned long go
[SLAVE
+ 1];
157 #define DEBUG_TICK_SYNC 0
159 static inline long get_delta (long *rt
, long *master
)
161 unsigned long best_t0
= 0, best_t1
= ~0UL, best_tm
= 0;
162 unsigned long tcenter
, t0
, t1
, tm
;
165 for (i
= 0; i
< NUM_ITERS
; i
++) {
166 t0
= tick_ops
->get_tick();
169 while (!(tm
= go
[SLAVE
]))
173 t1
= tick_ops
->get_tick();
175 if (t1
- t0
< best_t1
- best_t0
)
176 best_t0
= t0
, best_t1
= t1
, best_tm
= tm
;
179 *rt
= best_t1
- best_t0
;
180 *master
= best_tm
- best_t0
;
182 /* average best_t0 and best_t1 without overflow: */
183 tcenter
= (best_t0
/2 + best_t1
/2);
184 if (best_t0
% 2 + best_t1
% 2 == 2)
186 return tcenter
- best_tm
;
189 void smp_synchronize_tick_client(void)
191 long i
, delta
, adj
, adjust_latency
= 0, done
= 0;
192 unsigned long flags
, rt
, master_time_stamp
, bound
;
195 long rt
; /* roundtrip time */
196 long master
; /* master's timestamp */
197 long diff
; /* difference between midpoint and master's timestamp */
198 long lat
; /* estimate of itc adjustment latency */
207 local_irq_save(flags
);
209 for (i
= 0; i
< NUM_ROUNDS
; i
++) {
210 delta
= get_delta(&rt
, &master_time_stamp
);
212 done
= 1; /* let's lock on to this... */
218 adjust_latency
+= -delta
;
219 adj
= -delta
+ adjust_latency
/4;
223 tick_ops
->add_tick(adj
);
227 t
[i
].master
= master_time_stamp
;
229 t
[i
].lat
= adjust_latency
/4;
233 local_irq_restore(flags
);
236 for (i
= 0; i
< NUM_ROUNDS
; i
++)
237 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
238 t
[i
].rt
, t
[i
].master
, t
[i
].diff
, t
[i
].lat
);
241 printk(KERN_INFO
"CPU %d: synchronized TICK with master CPU "
242 "(last diff %ld cycles, maxerr %lu cycles)\n",
243 smp_processor_id(), delta
, rt
);
246 static void smp_start_sync_tick_client(int cpu
);
248 static void smp_synchronize_one_tick(int cpu
)
250 unsigned long flags
, i
;
254 smp_start_sync_tick_client(cpu
);
256 /* wait for client to be ready */
260 /* now let the client proceed into his loop */
264 spin_lock_irqsave(&itc_sync_lock
, flags
);
266 for (i
= 0; i
< NUM_ROUNDS
*NUM_ITERS
; i
++) {
271 go
[SLAVE
] = tick_ops
->get_tick();
275 spin_unlock_irqrestore(&itc_sync_lock
, flags
);
278 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
279 /* XXX Put this in some common place. XXX */
280 static unsigned long kimage_addr_to_ra(void *p
)
282 unsigned long val
= (unsigned long) p
;
284 return kern_base
+ (val
- KERNBASE
);
287 static void ldom_startcpu_cpuid(unsigned int cpu
, unsigned long thread_reg
)
289 extern unsigned long sparc64_ttable_tl0
;
290 extern unsigned long kern_locked_tte_data
;
291 extern int bigkernel
;
292 struct hvtramp_descr
*hdesc
;
293 unsigned long trampoline_ra
;
294 struct trap_per_cpu
*tb
;
295 u64 tte_vaddr
, tte_data
;
296 unsigned long hv_err
;
298 hdesc
= kzalloc(sizeof(*hdesc
), GFP_KERNEL
);
300 printk(KERN_ERR
"ldom_startcpu_cpuid: Cannot allocate "
306 hdesc
->num_mappings
= (bigkernel
? 2 : 1);
308 tb
= &trap_block
[cpu
];
311 hdesc
->fault_info_va
= (unsigned long) &tb
->fault_info
;
312 hdesc
->fault_info_pa
= kimage_addr_to_ra(&tb
->fault_info
);
314 hdesc
->thread_reg
= thread_reg
;
316 tte_vaddr
= (unsigned long) KERNBASE
;
317 tte_data
= kern_locked_tte_data
;
319 hdesc
->maps
[0].vaddr
= tte_vaddr
;
320 hdesc
->maps
[0].tte
= tte_data
;
322 tte_vaddr
+= 0x400000;
323 tte_data
+= 0x400000;
324 hdesc
->maps
[1].vaddr
= tte_vaddr
;
325 hdesc
->maps
[1].tte
= tte_data
;
328 trampoline_ra
= kimage_addr_to_ra(hv_cpu_startup
);
330 hv_err
= sun4v_cpu_start(cpu
, trampoline_ra
,
331 kimage_addr_to_ra(&sparc64_ttable_tl0
),
334 printk(KERN_ERR
"ldom_startcpu_cpuid: sun4v_cpu_start() "
335 "gives error %lu\n", hv_err
);
339 extern unsigned long sparc64_cpu_startup
;
341 /* The OBP cpu startup callback truncates the 3rd arg cookie to
342 * 32-bits (I think) so to be safe we have it read the pointer
343 * contained here so we work on >4GB machines. -DaveM
345 static struct thread_info
*cpu_new_thread
= NULL
;
347 static int __devinit
smp_boot_one_cpu(unsigned int cpu
)
349 struct trap_per_cpu
*tb
= &trap_block
[cpu
];
350 unsigned long entry
=
351 (unsigned long)(&sparc64_cpu_startup
);
352 unsigned long cookie
=
353 (unsigned long)(&cpu_new_thread
);
354 struct task_struct
*p
;
361 cpu_new_thread
= task_thread_info(p
);
363 if (tlb_type
== hypervisor
) {
364 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
365 if (ldom_domaining_enabled
)
366 ldom_startcpu_cpuid(cpu
,
367 (unsigned long) cpu_new_thread
);
370 prom_startcpu_cpuid(cpu
, entry
, cookie
);
372 struct device_node
*dp
= of_find_node_by_cpuid(cpu
);
374 prom_startcpu(dp
->node
, entry
, cookie
);
377 for (timeout
= 0; timeout
< 50000; timeout
++) {
386 printk("Processor %d is stuck.\n", cpu
);
389 cpu_new_thread
= NULL
;
399 static void spitfire_xcall_helper(u64 data0
, u64 data1
, u64 data2
, u64 pstate
, unsigned long cpu
)
404 if (this_is_starfire
) {
405 /* map to real upaid */
406 cpu
= (((cpu
& 0x3c) << 1) |
407 ((cpu
& 0x40) >> 4) |
411 target
= (cpu
<< 14) | 0x70;
413 /* Ok, this is the real Spitfire Errata #54.
414 * One must read back from a UDB internal register
415 * after writes to the UDB interrupt dispatch, but
416 * before the membar Sync for that write.
417 * So we use the high UDB control register (ASI 0x7f,
418 * ADDR 0x20) for the dummy read. -DaveM
421 __asm__
__volatile__(
422 "wrpr %1, %2, %%pstate\n\t"
423 "stxa %4, [%0] %3\n\t"
424 "stxa %5, [%0+%8] %3\n\t"
426 "stxa %6, [%0+%8] %3\n\t"
428 "stxa %%g0, [%7] %3\n\t"
431 "ldxa [%%g1] 0x7f, %%g0\n\t"
434 : "r" (pstate
), "i" (PSTATE_IE
), "i" (ASI_INTR_W
),
435 "r" (data0
), "r" (data1
), "r" (data2
), "r" (target
),
436 "r" (0x10), "0" (tmp
)
439 /* NOTE: PSTATE_IE is still clear. */
442 __asm__
__volatile__("ldxa [%%g0] %1, %0"
444 : "i" (ASI_INTR_DISPATCH_STAT
));
446 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
453 } while (result
& 0x1);
454 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
457 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
458 smp_processor_id(), result
);
465 static inline void spitfire_xcall_deliver(u64 data0
, u64 data1
, u64 data2
, cpumask_t mask
)
470 __asm__
__volatile__("rdpr %%pstate, %0" : "=r" (pstate
));
471 for_each_cpu_mask(i
, mask
)
472 spitfire_xcall_helper(data0
, data1
, data2
, pstate
, i
);
475 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
476 * packet, but we have no use for that. However we do take advantage of
477 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
479 static void cheetah_xcall_deliver(u64 data0
, u64 data1
, u64 data2
, cpumask_t mask
)
481 u64 pstate
, ver
, busy_mask
;
482 int nack_busy_id
, is_jbus
, need_more
;
484 if (cpus_empty(mask
))
487 /* Unfortunately, someone at Sun had the brilliant idea to make the
488 * busy/nack fields hard-coded by ITID number for this Ultra-III
489 * derivative processor.
491 __asm__ ("rdpr %%ver, %0" : "=r" (ver
));
492 is_jbus
= ((ver
>> 32) == __JALAPENO_ID
||
493 (ver
>> 32) == __SERRANO_ID
);
495 __asm__
__volatile__("rdpr %%pstate, %0" : "=r" (pstate
));
499 __asm__
__volatile__("wrpr %0, %1, %%pstate\n\t"
500 : : "r" (pstate
), "i" (PSTATE_IE
));
502 /* Setup the dispatch data registers. */
503 __asm__
__volatile__("stxa %0, [%3] %6\n\t"
504 "stxa %1, [%4] %6\n\t"
505 "stxa %2, [%5] %6\n\t"
508 : "r" (data0
), "r" (data1
), "r" (data2
),
509 "r" (0x40), "r" (0x50), "r" (0x60),
517 for_each_cpu_mask(i
, mask
) {
518 u64 target
= (i
<< 14) | 0x70;
521 busy_mask
|= (0x1UL
<< (i
* 2));
523 target
|= (nack_busy_id
<< 24);
524 busy_mask
|= (0x1UL
<<
527 __asm__
__volatile__(
528 "stxa %%g0, [%0] %1\n\t"
531 : "r" (target
), "i" (ASI_INTR_W
));
533 if (nack_busy_id
== 32) {
540 /* Now, poll for completion. */
542 u64 dispatch_stat
, nack_mask
;
545 stuck
= 100000 * nack_busy_id
;
546 nack_mask
= busy_mask
<< 1;
548 __asm__
__volatile__("ldxa [%%g0] %1, %0"
549 : "=r" (dispatch_stat
)
550 : "i" (ASI_INTR_DISPATCH_STAT
));
551 if (!(dispatch_stat
& (busy_mask
| nack_mask
))) {
552 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
554 if (unlikely(need_more
)) {
556 for_each_cpu_mask(i
, mask
) {
568 } while (dispatch_stat
& busy_mask
);
570 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
573 if (dispatch_stat
& busy_mask
) {
574 /* Busy bits will not clear, continue instead
575 * of freezing up on this cpu.
577 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
578 smp_processor_id(), dispatch_stat
);
580 int i
, this_busy_nack
= 0;
582 /* Delay some random time with interrupts enabled
583 * to prevent deadlock.
585 udelay(2 * nack_busy_id
);
587 /* Clear out the mask bits for cpus which did not
590 for_each_cpu_mask(i
, mask
) {
594 check_mask
= (0x2UL
<< (2*i
));
596 check_mask
= (0x2UL
<<
598 if ((dispatch_stat
& check_mask
) == 0)
601 if (this_busy_nack
== 64)
610 /* Multi-cpu list version. */
611 static void hypervisor_xcall_deliver(u64 data0
, u64 data1
, u64 data2
, cpumask_t mask
)
613 struct trap_per_cpu
*tb
;
616 cpumask_t error_mask
;
617 unsigned long flags
, status
;
618 int cnt
, retries
, this_cpu
, prev_sent
, i
;
620 if (cpus_empty(mask
))
623 /* We have to do this whole thing with interrupts fully disabled.
624 * Otherwise if we send an xcall from interrupt context it will
625 * corrupt both our mondo block and cpu list state.
627 * One consequence of this is that we cannot use timeout mechanisms
628 * that depend upon interrupts being delivered locally. So, for
629 * example, we cannot sample jiffies and expect it to advance.
631 * Fortunately, udelay() uses %stick/%tick so we can use that.
633 local_irq_save(flags
);
635 this_cpu
= smp_processor_id();
636 tb
= &trap_block
[this_cpu
];
638 mondo
= __va(tb
->cpu_mondo_block_pa
);
644 cpu_list
= __va(tb
->cpu_list_pa
);
646 /* Setup the initial cpu list. */
648 for_each_cpu_mask(i
, mask
)
651 cpus_clear(error_mask
);
655 int forward_progress
, n_sent
;
657 status
= sun4v_cpu_mondo_send(cnt
,
659 tb
->cpu_mondo_block_pa
);
661 /* HV_EOK means all cpus received the xcall, we're done. */
662 if (likely(status
== HV_EOK
))
665 /* First, see if we made any forward progress.
667 * The hypervisor indicates successful sends by setting
668 * cpu list entries to the value 0xffff.
671 for (i
= 0; i
< cnt
; i
++) {
672 if (likely(cpu_list
[i
] == 0xffff))
676 forward_progress
= 0;
677 if (n_sent
> prev_sent
)
678 forward_progress
= 1;
682 /* If we get a HV_ECPUERROR, then one or more of the cpus
683 * in the list are in error state. Use the cpu_state()
684 * hypervisor call to find out which cpus are in error state.
686 if (unlikely(status
== HV_ECPUERROR
)) {
687 for (i
= 0; i
< cnt
; i
++) {
695 err
= sun4v_cpu_state(cpu
);
697 err
== HV_CPU_STATE_ERROR
) {
698 cpu_list
[i
] = 0xffff;
699 cpu_set(cpu
, error_mask
);
702 } else if (unlikely(status
!= HV_EWOULDBLOCK
))
703 goto fatal_mondo_error
;
705 /* Don't bother rewriting the CPU list, just leave the
706 * 0xffff and non-0xffff entries in there and the
707 * hypervisor will do the right thing.
709 * Only advance timeout state if we didn't make any
712 if (unlikely(!forward_progress
)) {
713 if (unlikely(++retries
> 10000))
714 goto fatal_mondo_timeout
;
716 /* Delay a little bit to let other cpus catch up
717 * on their cpu mondo queue work.
723 local_irq_restore(flags
);
725 if (unlikely(!cpus_empty(error_mask
)))
726 goto fatal_mondo_cpu_error
;
730 fatal_mondo_cpu_error
:
731 printk(KERN_CRIT
"CPU[%d]: SUN4V mondo cpu error, some target cpus "
732 "were in error state\n",
734 printk(KERN_CRIT
"CPU[%d]: Error mask [ ", this_cpu
);
735 for_each_cpu_mask(i
, error_mask
)
741 local_irq_restore(flags
);
742 printk(KERN_CRIT
"CPU[%d]: SUN4V mondo timeout, no forward "
743 " progress after %d retries.\n",
745 goto dump_cpu_list_and_out
;
748 local_irq_restore(flags
);
749 printk(KERN_CRIT
"CPU[%d]: Unexpected SUN4V mondo error %lu\n",
751 printk(KERN_CRIT
"CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
752 "mondo_block_pa(%lx)\n",
753 this_cpu
, cnt
, tb
->cpu_list_pa
, tb
->cpu_mondo_block_pa
);
755 dump_cpu_list_and_out
:
756 printk(KERN_CRIT
"CPU[%d]: CPU list [ ", this_cpu
);
757 for (i
= 0; i
< cnt
; i
++)
758 printk("%u ", cpu_list
[i
]);
762 /* Send cross call to all processors mentioned in MASK
765 static void smp_cross_call_masked(unsigned long *func
, u32 ctx
, u64 data1
, u64 data2
, cpumask_t mask
)
767 u64 data0
= (((u64
)ctx
)<<32 | (((u64
)func
) & 0xffffffff));
768 int this_cpu
= get_cpu();
770 cpus_and(mask
, mask
, cpu_online_map
);
771 cpu_clear(this_cpu
, mask
);
773 if (tlb_type
== spitfire
)
774 spitfire_xcall_deliver(data0
, data1
, data2
, mask
);
775 else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
)
776 cheetah_xcall_deliver(data0
, data1
, data2
, mask
);
778 hypervisor_xcall_deliver(data0
, data1
, data2
, mask
);
779 /* NOTE: Caller runs local copy on master. */
784 extern unsigned long xcall_sync_tick
;
786 static void smp_start_sync_tick_client(int cpu
)
788 cpumask_t mask
= cpumask_of_cpu(cpu
);
790 smp_cross_call_masked(&xcall_sync_tick
,
794 /* Send cross call to all processors except self. */
795 #define smp_cross_call(func, ctx, data1, data2) \
796 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
798 struct call_data_struct
{
799 void (*func
) (void *info
);
805 static struct call_data_struct
*call_data
;
807 extern unsigned long xcall_call_function
;
810 * smp_call_function(): Run a function on all other CPUs.
811 * @func: The function to run. This must be fast and non-blocking.
812 * @info: An arbitrary pointer to pass to the function.
813 * @nonatomic: currently unused.
814 * @wait: If true, wait (atomically) until function has completed on other CPUs.
816 * Returns 0 on success, else a negative status code. Does not return until
817 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
819 * You must not call this function with disabled interrupts or from a
820 * hardware interrupt handler or from a bottom half handler.
822 static int smp_call_function_mask(void (*func
)(void *info
), void *info
,
823 int nonatomic
, int wait
, cpumask_t mask
)
825 struct call_data_struct data
;
828 /* Can deadlock when called with interrupts disabled */
829 WARN_ON(irqs_disabled());
833 atomic_set(&data
.finished
, 0);
836 spin_lock(&call_lock
);
838 cpu_clear(smp_processor_id(), mask
);
839 cpus
= cpus_weight(mask
);
846 smp_cross_call_masked(&xcall_call_function
, 0, 0, 0, mask
);
848 /* Wait for response */
849 while (atomic_read(&data
.finished
) != cpus
)
853 spin_unlock(&call_lock
);
858 int smp_call_function(void (*func
)(void *info
), void *info
,
859 int nonatomic
, int wait
)
861 return smp_call_function_mask(func
, info
, nonatomic
, wait
,
865 void smp_call_function_client(int irq
, struct pt_regs
*regs
)
867 void (*func
) (void *info
) = call_data
->func
;
868 void *info
= call_data
->info
;
870 clear_softint(1 << irq
);
871 if (call_data
->wait
) {
872 /* let initiator proceed only after completion */
874 atomic_inc(&call_data
->finished
);
876 /* let initiator proceed after getting data */
877 atomic_inc(&call_data
->finished
);
882 static void tsb_sync(void *info
)
884 struct trap_per_cpu
*tp
= &trap_block
[raw_smp_processor_id()];
885 struct mm_struct
*mm
= info
;
887 /* It is not valid to test "currrent->active_mm == mm" here.
889 * The value of "current" is not changed atomically with
890 * switch_mm(). But that's OK, we just need to check the
891 * current cpu's trap block PGD physical address.
893 if (tp
->pgd_paddr
== __pa(mm
->pgd
))
894 tsb_context_switch(mm
);
897 void smp_tsb_sync(struct mm_struct
*mm
)
899 smp_call_function_mask(tsb_sync
, mm
, 0, 1, mm
->cpu_vm_mask
);
902 extern unsigned long xcall_flush_tlb_mm
;
903 extern unsigned long xcall_flush_tlb_pending
;
904 extern unsigned long xcall_flush_tlb_kernel_range
;
905 extern unsigned long xcall_report_regs
;
906 extern unsigned long xcall_receive_signal
;
907 extern unsigned long xcall_new_mmu_context_version
;
909 #ifdef DCACHE_ALIASING_POSSIBLE
910 extern unsigned long xcall_flush_dcache_page_cheetah
;
912 extern unsigned long xcall_flush_dcache_page_spitfire
;
914 #ifdef CONFIG_DEBUG_DCFLUSH
915 extern atomic_t dcpage_flushes
;
916 extern atomic_t dcpage_flushes_xcall
;
919 static inline void __local_flush_dcache_page(struct page
*page
)
921 #ifdef DCACHE_ALIASING_POSSIBLE
922 __flush_dcache_page(page_address(page
),
923 ((tlb_type
== spitfire
) &&
924 page_mapping(page
) != NULL
));
926 if (page_mapping(page
) != NULL
&&
927 tlb_type
== spitfire
)
928 __flush_icache_page(__pa(page_address(page
)));
932 void smp_flush_dcache_page_impl(struct page
*page
, int cpu
)
934 cpumask_t mask
= cpumask_of_cpu(cpu
);
937 if (tlb_type
== hypervisor
)
940 #ifdef CONFIG_DEBUG_DCFLUSH
941 atomic_inc(&dcpage_flushes
);
944 this_cpu
= get_cpu();
946 if (cpu
== this_cpu
) {
947 __local_flush_dcache_page(page
);
948 } else if (cpu_online(cpu
)) {
949 void *pg_addr
= page_address(page
);
952 if (tlb_type
== spitfire
) {
954 ((u64
)&xcall_flush_dcache_page_spitfire
);
955 if (page_mapping(page
) != NULL
)
956 data0
|= ((u64
)1 << 32);
957 spitfire_xcall_deliver(data0
,
961 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
962 #ifdef DCACHE_ALIASING_POSSIBLE
964 ((u64
)&xcall_flush_dcache_page_cheetah
);
965 cheetah_xcall_deliver(data0
,
970 #ifdef CONFIG_DEBUG_DCFLUSH
971 atomic_inc(&dcpage_flushes_xcall
);
978 void flush_dcache_page_all(struct mm_struct
*mm
, struct page
*page
)
980 void *pg_addr
= page_address(page
);
981 cpumask_t mask
= cpu_online_map
;
985 if (tlb_type
== hypervisor
)
988 this_cpu
= get_cpu();
990 cpu_clear(this_cpu
, mask
);
992 #ifdef CONFIG_DEBUG_DCFLUSH
993 atomic_inc(&dcpage_flushes
);
995 if (cpus_empty(mask
))
997 if (tlb_type
== spitfire
) {
998 data0
= ((u64
)&xcall_flush_dcache_page_spitfire
);
999 if (page_mapping(page
) != NULL
)
1000 data0
|= ((u64
)1 << 32);
1001 spitfire_xcall_deliver(data0
,
1005 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
1006 #ifdef DCACHE_ALIASING_POSSIBLE
1007 data0
= ((u64
)&xcall_flush_dcache_page_cheetah
);
1008 cheetah_xcall_deliver(data0
,
1013 #ifdef CONFIG_DEBUG_DCFLUSH
1014 atomic_inc(&dcpage_flushes_xcall
);
1017 __local_flush_dcache_page(page
);
1022 static void __smp_receive_signal_mask(cpumask_t mask
)
1024 smp_cross_call_masked(&xcall_receive_signal
, 0, 0, 0, mask
);
1027 void smp_receive_signal(int cpu
)
1029 cpumask_t mask
= cpumask_of_cpu(cpu
);
1031 if (cpu_online(cpu
))
1032 __smp_receive_signal_mask(mask
);
1035 void smp_receive_signal_client(int irq
, struct pt_regs
*regs
)
1037 clear_softint(1 << irq
);
1040 void smp_new_mmu_context_version_client(int irq
, struct pt_regs
*regs
)
1042 struct mm_struct
*mm
;
1043 unsigned long flags
;
1045 clear_softint(1 << irq
);
1047 /* See if we need to allocate a new TLB context because
1048 * the version of the one we are using is now out of date.
1050 mm
= current
->active_mm
;
1051 if (unlikely(!mm
|| (mm
== &init_mm
)))
1054 spin_lock_irqsave(&mm
->context
.lock
, flags
);
1056 if (unlikely(!CTX_VALID(mm
->context
)))
1057 get_new_mmu_context(mm
);
1059 spin_unlock_irqrestore(&mm
->context
.lock
, flags
);
1061 load_secondary_context(mm
);
1062 __flush_tlb_mm(CTX_HWBITS(mm
->context
),
1066 void smp_new_mmu_context_version(void)
1068 smp_cross_call(&xcall_new_mmu_context_version
, 0, 0, 0);
1071 void smp_report_regs(void)
1073 smp_cross_call(&xcall_report_regs
, 0, 0, 0);
1076 /* We know that the window frames of the user have been flushed
1077 * to the stack before we get here because all callers of us
1078 * are flush_tlb_*() routines, and these run after flush_cache_*()
1079 * which performs the flushw.
1081 * The SMP TLB coherency scheme we use works as follows:
1083 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1084 * space has (potentially) executed on, this is the heuristic
1085 * we use to avoid doing cross calls.
1087 * Also, for flushing from kswapd and also for clones, we
1088 * use cpu_vm_mask as the list of cpus to make run the TLB.
1090 * 2) TLB context numbers are shared globally across all processors
1091 * in the system, this allows us to play several games to avoid
1094 * One invariant is that when a cpu switches to a process, and
1095 * that processes tsk->active_mm->cpu_vm_mask does not have the
1096 * current cpu's bit set, that tlb context is flushed locally.
1098 * If the address space is non-shared (ie. mm->count == 1) we avoid
1099 * cross calls when we want to flush the currently running process's
1100 * tlb state. This is done by clearing all cpu bits except the current
1101 * processor's in current->active_mm->cpu_vm_mask and performing the
1102 * flush locally only. This will force any subsequent cpus which run
1103 * this task to flush the context from the local tlb if the process
1104 * migrates to another cpu (again).
1106 * 3) For shared address spaces (threads) and swapping we bite the
1107 * bullet for most cases and perform the cross call (but only to
1108 * the cpus listed in cpu_vm_mask).
1110 * The performance gain from "optimizing" away the cross call for threads is
1111 * questionable (in theory the big win for threads is the massive sharing of
1112 * address space state across processors).
1115 /* This currently is only used by the hugetlb arch pre-fault
1116 * hook on UltraSPARC-III+ and later when changing the pagesize
1117 * bits of the context register for an address space.
1119 void smp_flush_tlb_mm(struct mm_struct
*mm
)
1121 u32 ctx
= CTX_HWBITS(mm
->context
);
1122 int cpu
= get_cpu();
1124 if (atomic_read(&mm
->mm_users
) == 1) {
1125 mm
->cpu_vm_mask
= cpumask_of_cpu(cpu
);
1126 goto local_flush_and_out
;
1129 smp_cross_call_masked(&xcall_flush_tlb_mm
,
1133 local_flush_and_out
:
1134 __flush_tlb_mm(ctx
, SECONDARY_CONTEXT
);
1139 void smp_flush_tlb_pending(struct mm_struct
*mm
, unsigned long nr
, unsigned long *vaddrs
)
1141 u32 ctx
= CTX_HWBITS(mm
->context
);
1142 int cpu
= get_cpu();
1144 if (mm
== current
->active_mm
&& atomic_read(&mm
->mm_users
) == 1)
1145 mm
->cpu_vm_mask
= cpumask_of_cpu(cpu
);
1147 smp_cross_call_masked(&xcall_flush_tlb_pending
,
1148 ctx
, nr
, (unsigned long) vaddrs
,
1151 __flush_tlb_pending(ctx
, nr
, vaddrs
);
1156 void smp_flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
1159 end
= PAGE_ALIGN(end
);
1161 smp_cross_call(&xcall_flush_tlb_kernel_range
,
1164 __flush_tlb_kernel_range(start
, end
);
1169 /* #define CAPTURE_DEBUG */
1170 extern unsigned long xcall_capture
;
1172 static atomic_t smp_capture_depth
= ATOMIC_INIT(0);
1173 static atomic_t smp_capture_registry
= ATOMIC_INIT(0);
1174 static unsigned long penguins_are_doing_time
;
1176 void smp_capture(void)
1178 int result
= atomic_add_ret(1, &smp_capture_depth
);
1181 int ncpus
= num_online_cpus();
1183 #ifdef CAPTURE_DEBUG
1184 printk("CPU[%d]: Sending penguins to jail...",
1185 smp_processor_id());
1187 penguins_are_doing_time
= 1;
1188 membar_storestore_loadstore();
1189 atomic_inc(&smp_capture_registry
);
1190 smp_cross_call(&xcall_capture
, 0, 0, 0);
1191 while (atomic_read(&smp_capture_registry
) != ncpus
)
1193 #ifdef CAPTURE_DEBUG
1199 void smp_release(void)
1201 if (atomic_dec_and_test(&smp_capture_depth
)) {
1202 #ifdef CAPTURE_DEBUG
1203 printk("CPU[%d]: Giving pardon to "
1204 "imprisoned penguins\n",
1205 smp_processor_id());
1207 penguins_are_doing_time
= 0;
1208 membar_storeload_storestore();
1209 atomic_dec(&smp_capture_registry
);
1213 /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
1214 * can service tlb flush xcalls...
1216 extern void prom_world(int);
1218 void smp_penguin_jailcell(int irq
, struct pt_regs
*regs
)
1220 clear_softint(1 << irq
);
1224 __asm__
__volatile__("flushw");
1226 atomic_inc(&smp_capture_registry
);
1227 membar_storeload_storestore();
1228 while (penguins_are_doing_time
)
1230 atomic_dec(&smp_capture_registry
);
1236 /* /proc/profile writes can call this, don't __init it please. */
1237 int setup_profiling_timer(unsigned int multiplier
)
1242 void __init
smp_prepare_cpus(unsigned int max_cpus
)
1246 void __devinit
smp_prepare_boot_cpu(void)
1250 void __devinit
smp_fill_in_sib_core_maps(void)
1254 for_each_present_cpu(i
) {
1257 cpus_clear(cpu_core_map
[i
]);
1258 if (cpu_data(i
).core_id
== 0) {
1259 cpu_set(i
, cpu_core_map
[i
]);
1263 for_each_present_cpu(j
) {
1264 if (cpu_data(i
).core_id
==
1265 cpu_data(j
).core_id
)
1266 cpu_set(j
, cpu_core_map
[i
]);
1270 for_each_present_cpu(i
) {
1273 cpus_clear(per_cpu(cpu_sibling_map
, i
));
1274 if (cpu_data(i
).proc_id
== -1) {
1275 cpu_set(i
, per_cpu(cpu_sibling_map
, i
));
1279 for_each_present_cpu(j
) {
1280 if (cpu_data(i
).proc_id
==
1281 cpu_data(j
).proc_id
)
1282 cpu_set(j
, per_cpu(cpu_sibling_map
, i
));
1287 int __cpuinit
__cpu_up(unsigned int cpu
)
1289 int ret
= smp_boot_one_cpu(cpu
);
1292 cpu_set(cpu
, smp_commenced_mask
);
1293 while (!cpu_isset(cpu
, cpu_online_map
))
1295 if (!cpu_isset(cpu
, cpu_online_map
)) {
1298 /* On SUN4V, writes to %tick and %stick are
1301 if (tlb_type
!= hypervisor
)
1302 smp_synchronize_one_tick(cpu
);
1308 #ifdef CONFIG_HOTPLUG_CPU
1309 void cpu_play_dead(void)
1311 int cpu
= smp_processor_id();
1312 unsigned long pstate
;
1316 if (tlb_type
== hypervisor
) {
1317 struct trap_per_cpu
*tb
= &trap_block
[cpu
];
1319 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO
,
1320 tb
->cpu_mondo_pa
, 0);
1321 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO
,
1322 tb
->dev_mondo_pa
, 0);
1323 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR
,
1324 tb
->resum_mondo_pa
, 0);
1325 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR
,
1326 tb
->nonresum_mondo_pa
, 0);
1329 cpu_clear(cpu
, smp_commenced_mask
);
1330 membar_safe("#Sync");
1332 local_irq_disable();
1334 __asm__
__volatile__(
1335 "rdpr %%pstate, %0\n\t"
1336 "wrpr %0, %1, %%pstate"
1344 int __cpu_disable(void)
1346 int cpu
= smp_processor_id();
1350 for_each_cpu_mask(i
, cpu_core_map
[cpu
])
1351 cpu_clear(cpu
, cpu_core_map
[i
]);
1352 cpus_clear(cpu_core_map
[cpu
]);
1354 for_each_cpu_mask(i
, per_cpu(cpu_sibling_map
, cpu
))
1355 cpu_clear(cpu
, per_cpu(cpu_sibling_map
, i
));
1356 cpus_clear(per_cpu(cpu_sibling_map
, cpu
));
1363 spin_lock(&call_lock
);
1364 cpu_clear(cpu
, cpu_online_map
);
1365 spin_unlock(&call_lock
);
1369 /* Make sure no interrupts point to this cpu. */
1374 local_irq_disable();
1379 void __cpu_die(unsigned int cpu
)
1383 for (i
= 0; i
< 100; i
++) {
1385 if (!cpu_isset(cpu
, smp_commenced_mask
))
1389 if (cpu_isset(cpu
, smp_commenced_mask
)) {
1390 printk(KERN_ERR
"CPU %u didn't die...\n", cpu
);
1392 #if defined(CONFIG_SUN_LDOMS)
1393 unsigned long hv_err
;
1397 hv_err
= sun4v_cpu_stop(cpu
);
1398 if (hv_err
== HV_EOK
) {
1399 cpu_clear(cpu
, cpu_present_map
);
1402 } while (--limit
> 0);
1404 printk(KERN_ERR
"sun4v_cpu_stop() fails err=%lu\n",
1412 void __init
smp_cpus_done(unsigned int max_cpus
)
1416 void smp_send_reschedule(int cpu
)
1418 smp_receive_signal(cpu
);
1421 /* This is a nop because we capture all other cpus
1422 * anyways when making the PROM active.
1424 void smp_send_stop(void)
1428 unsigned long __per_cpu_base __read_mostly
;
1429 unsigned long __per_cpu_shift __read_mostly
;
1431 EXPORT_SYMBOL(__per_cpu_base
);
1432 EXPORT_SYMBOL(__per_cpu_shift
);
1434 void __init
real_setup_per_cpu_areas(void)
1436 unsigned long goal
, size
, i
;
1439 /* Copy section for each CPU (we discard the original) */
1440 goal
= PERCPU_ENOUGH_ROOM
;
1442 __per_cpu_shift
= PAGE_SHIFT
;
1443 for (size
= PAGE_SIZE
; size
< goal
; size
<<= 1UL)
1446 ptr
= alloc_bootmem_pages(size
* NR_CPUS
);
1448 __per_cpu_base
= ptr
- __per_cpu_start
;
1450 for (i
= 0; i
< NR_CPUS
; i
++, ptr
+= size
)
1451 memcpy(ptr
, __per_cpu_start
, __per_cpu_end
- __per_cpu_start
);
1453 /* Setup %g5 for the boot cpu. */
1454 __local_per_cpu_offset
= __per_cpu_offset(smp_processor_id());