1 // SPDX-License-Identifier: GPL-2.0
2 /* smp.c: Sparc64 SMP support.
4 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
7 #include <linux/export.h>
8 #include <linux/kernel.h>
9 #include <linux/sched/mm.h>
10 #include <linux/sched/hotplug.h>
12 #include <linux/pagemap.h>
13 #include <linux/threads.h>
14 #include <linux/smp.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/spinlock.h>
21 #include <linux/seq_file.h>
22 #include <linux/cache.h>
23 #include <linux/jiffies.h>
24 #include <linux/profile.h>
25 #include <linux/memblock.h>
26 #include <linux/vmalloc.h>
27 #include <linux/ftrace.h>
28 #include <linux/cpu.h>
29 #include <linux/slab.h>
30 #include <linux/kgdb.h>
33 #include <asm/ptrace.h>
34 #include <linux/atomic.h>
35 #include <asm/tlbflush.h>
36 #include <asm/mmu_context.h>
37 #include <asm/cpudata.h>
38 #include <asm/hvtramp.h>
40 #include <asm/timer.h>
41 #include <asm/setup.h>
44 #include <asm/irq_regs.h>
46 #include <asm/oplib.h>
47 #include <linux/uaccess.h>
48 #include <asm/starfire.h>
50 #include <asm/pgalloc.h>
51 #include <asm/sections.h>
53 #include <asm/mdesc.h>
55 #include <asm/hypervisor.h>
61 DEFINE_PER_CPU(cpumask_t
, cpu_sibling_map
) = CPU_MASK_NONE
;
62 cpumask_t cpu_core_map
[NR_CPUS
] __read_mostly
=
63 { [0 ... NR_CPUS
-1] = CPU_MASK_NONE
};
65 cpumask_t cpu_core_sib_map
[NR_CPUS
] __read_mostly
= {
66 [0 ... NR_CPUS
-1] = CPU_MASK_NONE
};
68 cpumask_t cpu_core_sib_cache_map
[NR_CPUS
] __read_mostly
= {
69 [0 ... NR_CPUS
- 1] = CPU_MASK_NONE
};
71 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map
);
72 EXPORT_SYMBOL(cpu_core_map
);
73 EXPORT_SYMBOL(cpu_core_sib_map
);
74 EXPORT_SYMBOL(cpu_core_sib_cache_map
);
76 static cpumask_t smp_commenced_mask
;
78 static DEFINE_PER_CPU(bool, poke
);
81 void smp_info(struct seq_file
*m
)
85 seq_printf(m
, "State:\n");
86 for_each_online_cpu(i
)
87 seq_printf(m
, "CPU%d:\t\tonline\n", i
);
90 void smp_bogo(struct seq_file
*m
)
94 for_each_online_cpu(i
)
96 "Cpu%dClkTck\t: %016lx\n",
97 i
, cpu_data(i
).clock_tick
);
100 extern void setup_sparc64_timer(void);
102 static volatile unsigned long callin_flag
= 0;
104 void smp_callin(void)
106 int cpuid
= hard_smp_processor_id();
108 __local_per_cpu_offset
= __per_cpu_offset(cpuid
);
110 if (tlb_type
== hypervisor
)
111 sun4v_ktsb_register();
115 setup_sparc64_timer();
117 if (cheetah_pcache_forced_on
)
118 cheetah_enable_pcache();
121 __asm__
__volatile__("membar #Sync\n\t"
122 "flush %%g6" : : : "memory");
124 /* Clear this or we will die instantly when we
125 * schedule back to this idler...
127 current_thread_info()->new_child
= 0;
129 /* Attach to the address space of init_task. */
131 current
->active_mm
= &init_mm
;
133 /* inform the notifiers about the new cpu */
134 notify_cpu_starting(cpuid
);
136 while (!cpumask_test_cpu(cpuid
, &smp_commenced_mask
))
139 set_cpu_online(cpuid
, true);
143 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE
);
148 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
149 panic("SMP bolixed\n");
152 /* This tick register synchronization scheme is taken entirely from
153 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
155 * The only change I've made is to rework it so that the master
156 * initiates the synchonization instead of the slave. -DaveM
160 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
162 #define NUM_ROUNDS 64 /* magic value */
163 #define NUM_ITERS 5 /* likewise */
165 static DEFINE_RAW_SPINLOCK(itc_sync_lock
);
166 static unsigned long go
[SLAVE
+ 1];
168 #define DEBUG_TICK_SYNC 0
170 static inline long get_delta (long *rt
, long *master
)
172 unsigned long best_t0
= 0, best_t1
= ~0UL, best_tm
= 0;
173 unsigned long tcenter
, t0
, t1
, tm
;
176 for (i
= 0; i
< NUM_ITERS
; i
++) {
177 t0
= tick_ops
->get_tick();
179 membar_safe("#StoreLoad");
180 while (!(tm
= go
[SLAVE
]))
184 t1
= tick_ops
->get_tick();
186 if (t1
- t0
< best_t1
- best_t0
)
187 best_t0
= t0
, best_t1
= t1
, best_tm
= tm
;
190 *rt
= best_t1
- best_t0
;
191 *master
= best_tm
- best_t0
;
193 /* average best_t0 and best_t1 without overflow: */
194 tcenter
= (best_t0
/2 + best_t1
/2);
195 if (best_t0
% 2 + best_t1
% 2 == 2)
197 return tcenter
- best_tm
;
200 void smp_synchronize_tick_client(void)
202 long i
, delta
, adj
, adjust_latency
= 0, done
= 0;
203 unsigned long flags
, rt
, master_time_stamp
;
206 long rt
; /* roundtrip time */
207 long master
; /* master's timestamp */
208 long diff
; /* difference between midpoint and master's timestamp */
209 long lat
; /* estimate of itc adjustment latency */
218 local_irq_save(flags
);
220 for (i
= 0; i
< NUM_ROUNDS
; i
++) {
221 delta
= get_delta(&rt
, &master_time_stamp
);
223 done
= 1; /* let's lock on to this... */
227 adjust_latency
+= -delta
;
228 adj
= -delta
+ adjust_latency
/4;
232 tick_ops
->add_tick(adj
);
236 t
[i
].master
= master_time_stamp
;
238 t
[i
].lat
= adjust_latency
/4;
242 local_irq_restore(flags
);
245 for (i
= 0; i
< NUM_ROUNDS
; i
++)
246 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
247 t
[i
].rt
, t
[i
].master
, t
[i
].diff
, t
[i
].lat
);
250 printk(KERN_INFO
"CPU %d: synchronized TICK with master CPU "
251 "(last diff %ld cycles, maxerr %lu cycles)\n",
252 smp_processor_id(), delta
, rt
);
255 static void smp_start_sync_tick_client(int cpu
);
257 static void smp_synchronize_one_tick(int cpu
)
259 unsigned long flags
, i
;
263 smp_start_sync_tick_client(cpu
);
265 /* wait for client to be ready */
269 /* now let the client proceed into his loop */
271 membar_safe("#StoreLoad");
273 raw_spin_lock_irqsave(&itc_sync_lock
, flags
);
275 for (i
= 0; i
< NUM_ROUNDS
*NUM_ITERS
; i
++) {
280 go
[SLAVE
] = tick_ops
->get_tick();
281 membar_safe("#StoreLoad");
284 raw_spin_unlock_irqrestore(&itc_sync_lock
, flags
);
287 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
288 static void ldom_startcpu_cpuid(unsigned int cpu
, unsigned long thread_reg
,
291 extern unsigned long sparc64_ttable_tl0
;
292 extern unsigned long kern_locked_tte_data
;
293 struct hvtramp_descr
*hdesc
;
294 unsigned long trampoline_ra
;
295 struct trap_per_cpu
*tb
;
296 u64 tte_vaddr
, tte_data
;
297 unsigned long hv_err
;
300 hdesc
= kzalloc(sizeof(*hdesc
) +
301 (sizeof(struct hvtramp_mapping
) *
302 num_kernel_image_mappings
- 1),
305 printk(KERN_ERR
"ldom_startcpu_cpuid: Cannot allocate "
312 hdesc
->num_mappings
= num_kernel_image_mappings
;
314 tb
= &trap_block
[cpu
];
316 hdesc
->fault_info_va
= (unsigned long) &tb
->fault_info
;
317 hdesc
->fault_info_pa
= kimage_addr_to_ra(&tb
->fault_info
);
319 hdesc
->thread_reg
= thread_reg
;
321 tte_vaddr
= (unsigned long) KERNBASE
;
322 tte_data
= kern_locked_tte_data
;
324 for (i
= 0; i
< hdesc
->num_mappings
; i
++) {
325 hdesc
->maps
[i
].vaddr
= tte_vaddr
;
326 hdesc
->maps
[i
].tte
= tte_data
;
327 tte_vaddr
+= 0x400000;
328 tte_data
+= 0x400000;
331 trampoline_ra
= kimage_addr_to_ra(hv_cpu_startup
);
333 hv_err
= sun4v_cpu_start(cpu
, trampoline_ra
,
334 kimage_addr_to_ra(&sparc64_ttable_tl0
),
337 printk(KERN_ERR
"ldom_startcpu_cpuid: sun4v_cpu_start() "
338 "gives error %lu\n", hv_err
);
342 extern unsigned long sparc64_cpu_startup
;
344 /* The OBP cpu startup callback truncates the 3rd arg cookie to
345 * 32-bits (I think) so to be safe we have it read the pointer
346 * contained here so we work on >4GB machines. -DaveM
348 static struct thread_info
*cpu_new_thread
= NULL
;
350 static int smp_boot_one_cpu(unsigned int cpu
, struct task_struct
*idle
)
352 unsigned long entry
=
353 (unsigned long)(&sparc64_cpu_startup
);
354 unsigned long cookie
=
355 (unsigned long)(&cpu_new_thread
);
360 cpu_new_thread
= task_thread_info(idle
);
362 if (tlb_type
== hypervisor
) {
363 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
364 if (ldom_domaining_enabled
)
365 ldom_startcpu_cpuid(cpu
,
366 (unsigned long) cpu_new_thread
,
370 prom_startcpu_cpuid(cpu
, entry
, cookie
);
372 struct device_node
*dp
= of_find_node_by_cpuid(cpu
);
374 prom_startcpu(dp
->phandle
, entry
, cookie
);
377 for (timeout
= 0; timeout
< 50000; timeout
++) {
386 printk("Processor %d is stuck.\n", cpu
);
389 cpu_new_thread
= NULL
;
396 static void spitfire_xcall_helper(u64 data0
, u64 data1
, u64 data2
, u64 pstate
, unsigned long cpu
)
401 if (this_is_starfire
) {
402 /* map to real upaid */
403 cpu
= (((cpu
& 0x3c) << 1) |
404 ((cpu
& 0x40) >> 4) |
408 target
= (cpu
<< 14) | 0x70;
410 /* Ok, this is the real Spitfire Errata #54.
411 * One must read back from a UDB internal register
412 * after writes to the UDB interrupt dispatch, but
413 * before the membar Sync for that write.
414 * So we use the high UDB control register (ASI 0x7f,
415 * ADDR 0x20) for the dummy read. -DaveM
418 __asm__
__volatile__(
419 "wrpr %1, %2, %%pstate\n\t"
420 "stxa %4, [%0] %3\n\t"
421 "stxa %5, [%0+%8] %3\n\t"
423 "stxa %6, [%0+%8] %3\n\t"
425 "stxa %%g0, [%7] %3\n\t"
428 "ldxa [%%g1] 0x7f, %%g0\n\t"
431 : "r" (pstate
), "i" (PSTATE_IE
), "i" (ASI_INTR_W
),
432 "r" (data0
), "r" (data1
), "r" (data2
), "r" (target
),
433 "r" (0x10), "0" (tmp
)
436 /* NOTE: PSTATE_IE is still clear. */
439 __asm__
__volatile__("ldxa [%%g0] %1, %0"
441 : "i" (ASI_INTR_DISPATCH_STAT
));
443 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
450 } while (result
& 0x1);
451 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
454 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
455 smp_processor_id(), result
);
462 static void spitfire_xcall_deliver(struct trap_per_cpu
*tb
, int cnt
)
464 u64
*mondo
, data0
, data1
, data2
;
469 __asm__
__volatile__("rdpr %%pstate, %0" : "=r" (pstate
));
470 cpu_list
= __va(tb
->cpu_list_pa
);
471 mondo
= __va(tb
->cpu_mondo_block_pa
);
475 for (i
= 0; i
< cnt
; i
++)
476 spitfire_xcall_helper(data0
, data1
, data2
, pstate
, cpu_list
[i
]);
479 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
480 * packet, but we have no use for that. However we do take advantage of
481 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
483 static void cheetah_xcall_deliver(struct trap_per_cpu
*tb
, int cnt
)
485 int nack_busy_id
, is_jbus
, need_more
;
486 u64
*mondo
, pstate
, ver
, busy_mask
;
489 cpu_list
= __va(tb
->cpu_list_pa
);
490 mondo
= __va(tb
->cpu_mondo_block_pa
);
492 /* Unfortunately, someone at Sun had the brilliant idea to make the
493 * busy/nack fields hard-coded by ITID number for this Ultra-III
494 * derivative processor.
496 __asm__ ("rdpr %%ver, %0" : "=r" (ver
));
497 is_jbus
= ((ver
>> 32) == __JALAPENO_ID
||
498 (ver
>> 32) == __SERRANO_ID
);
500 __asm__
__volatile__("rdpr %%pstate, %0" : "=r" (pstate
));
504 __asm__
__volatile__("wrpr %0, %1, %%pstate\n\t"
505 : : "r" (pstate
), "i" (PSTATE_IE
));
507 /* Setup the dispatch data registers. */
508 __asm__
__volatile__("stxa %0, [%3] %6\n\t"
509 "stxa %1, [%4] %6\n\t"
510 "stxa %2, [%5] %6\n\t"
513 : "r" (mondo
[0]), "r" (mondo
[1]), "r" (mondo
[2]),
514 "r" (0x40), "r" (0x50), "r" (0x60),
522 for (i
= 0; i
< cnt
; i
++) {
529 target
= (nr
<< 14) | 0x70;
531 busy_mask
|= (0x1UL
<< (nr
* 2));
533 target
|= (nack_busy_id
<< 24);
534 busy_mask
|= (0x1UL
<<
537 __asm__
__volatile__(
538 "stxa %%g0, [%0] %1\n\t"
541 : "r" (target
), "i" (ASI_INTR_W
));
543 if (nack_busy_id
== 32) {
550 /* Now, poll for completion. */
552 u64 dispatch_stat
, nack_mask
;
555 stuck
= 100000 * nack_busy_id
;
556 nack_mask
= busy_mask
<< 1;
558 __asm__
__volatile__("ldxa [%%g0] %1, %0"
559 : "=r" (dispatch_stat
)
560 : "i" (ASI_INTR_DISPATCH_STAT
));
561 if (!(dispatch_stat
& (busy_mask
| nack_mask
))) {
562 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
564 if (unlikely(need_more
)) {
566 for (i
= 0; i
< cnt
; i
++) {
567 if (cpu_list
[i
] == 0xffff)
569 cpu_list
[i
] = 0xffff;
580 } while (dispatch_stat
& busy_mask
);
582 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
585 if (dispatch_stat
& busy_mask
) {
586 /* Busy bits will not clear, continue instead
587 * of freezing up on this cpu.
589 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
590 smp_processor_id(), dispatch_stat
);
592 int i
, this_busy_nack
= 0;
594 /* Delay some random time with interrupts enabled
595 * to prevent deadlock.
597 udelay(2 * nack_busy_id
);
599 /* Clear out the mask bits for cpus which did not
602 for (i
= 0; i
< cnt
; i
++) {
610 check_mask
= (0x2UL
<< (2*nr
));
612 check_mask
= (0x2UL
<<
614 if ((dispatch_stat
& check_mask
) == 0)
615 cpu_list
[i
] = 0xffff;
617 if (this_busy_nack
== 64)
626 #define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid])
627 #define MONDO_USEC_WAIT_MIN 2
628 #define MONDO_USEC_WAIT_MAX 100
629 #define MONDO_RETRY_LIMIT 500000
631 /* Multi-cpu list version.
633 * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
634 * Sometimes not all cpus receive the mondo, requiring us to re-send
635 * the mondo until all cpus have received, or cpus are truly stuck
636 * unable to receive mondo, and we timeout.
637 * Occasionally a target cpu strand is borrowed briefly by hypervisor to
638 * perform guest service, such as PCIe error handling. Consider the
639 * service time, 1 second overall wait is reasonable for 1 cpu.
640 * Here two in-between mondo check wait time are defined: 2 usec for
641 * single cpu quick turn around and up to 100usec for large cpu count.
642 * Deliver mondo to large number of cpus could take longer, we adjusts
643 * the retry count as long as target cpus are making forward progress.
645 static void hypervisor_xcall_deliver(struct trap_per_cpu
*tb
, int cnt
)
647 int this_cpu
, tot_cpus
, prev_sent
, i
, rem
;
648 int usec_wait
, retries
, tot_retries
;
649 u16 first_cpu
= 0xffff;
650 unsigned long xc_rcvd
= 0;
651 unsigned long status
;
652 int ecpuerror_id
= 0;
657 this_cpu
= smp_processor_id();
658 cpu_list
= __va(tb
->cpu_list_pa
);
659 usec_wait
= cnt
* MONDO_USEC_WAIT_MIN
;
660 if (usec_wait
> MONDO_USEC_WAIT_MAX
)
661 usec_wait
= MONDO_USEC_WAIT_MAX
;
662 retries
= tot_retries
= 0;
667 int n_sent
, mondo_delivered
, target_cpu_busy
;
669 status
= sun4v_cpu_mondo_send(cnt
,
671 tb
->cpu_mondo_block_pa
);
673 /* HV_EOK means all cpus received the xcall, we're done. */
674 if (likely(status
== HV_EOK
))
677 /* If not these non-fatal errors, panic */
678 if (unlikely((status
!= HV_EWOULDBLOCK
) &&
679 (status
!= HV_ECPUERROR
) &&
680 (status
!= HV_ENOCPU
)))
683 /* First, see if we made any forward progress.
685 * Go through the cpu_list, count the target cpus that have
686 * received our mondo (n_sent), and those that did not (rem).
687 * Re-pack cpu_list with the cpus remain to be retried in the
688 * front - this simplifies tracking the truly stalled cpus.
690 * The hypervisor indicates successful sends by setting
691 * cpu list entries to the value 0xffff.
693 * EWOULDBLOCK means some target cpus did not receive the
694 * mondo and retry usually helps.
696 * ECPUERROR means at least one target cpu is in error state,
697 * it's usually safe to skip the faulty cpu and retry.
699 * ENOCPU means one of the target cpu doesn't belong to the
700 * domain, perhaps offlined which is unexpected, but not
701 * fatal and it's okay to skip the offlined cpu.
705 for (i
= 0; i
< cnt
; i
++) {
707 if (likely(cpu
== 0xffff)) {
709 } else if ((status
== HV_ECPUERROR
) &&
710 (sun4v_cpu_state(cpu
) == HV_CPU_STATE_ERROR
)) {
711 ecpuerror_id
= cpu
+ 1;
712 } else if (status
== HV_ENOCPU
&& !cpu_online(cpu
)) {
715 cpu_list
[rem
++] = cpu
;
719 /* No cpu remained, we're done. */
723 /* Otherwise, update the cpu count for retry. */
726 /* Record the overall number of mondos received by the
727 * first of the remaining cpus.
729 if (first_cpu
!= cpu_list
[0]) {
730 first_cpu
= cpu_list
[0];
731 xc_rcvd
= CPU_MONDO_COUNTER(first_cpu
);
734 /* Was any mondo delivered successfully? */
735 mondo_delivered
= (n_sent
> prev_sent
);
738 /* or, was any target cpu busy processing other mondos? */
739 target_cpu_busy
= (xc_rcvd
< CPU_MONDO_COUNTER(first_cpu
));
740 xc_rcvd
= CPU_MONDO_COUNTER(first_cpu
);
742 /* Retry count is for no progress. If we're making progress,
743 * reset the retry count.
745 if (likely(mondo_delivered
|| target_cpu_busy
)) {
746 tot_retries
+= retries
;
748 } else if (unlikely(retries
> MONDO_RETRY_LIMIT
)) {
749 goto fatal_mondo_timeout
;
752 /* Delay a little bit to let other cpus catch up on
753 * their cpu mondo queue work.
755 if (!mondo_delivered
)
762 if (unlikely(ecpuerror_id
> 0)) {
763 pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
764 this_cpu
, ecpuerror_id
- 1);
765 } else if (unlikely(enocpu_id
> 0)) {
766 pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
767 this_cpu
, enocpu_id
- 1);
772 /* fatal errors include bad alignment, etc */
773 pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
774 this_cpu
, tot_cpus
, tb
->cpu_list_pa
, tb
->cpu_mondo_block_pa
);
775 panic("Unexpected SUN4V mondo error %lu\n", status
);
778 /* some cpus being non-responsive to the cpu mondo */
779 pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
780 this_cpu
, first_cpu
, (tot_retries
+ retries
), tot_cpus
);
781 panic("SUN4V mondo timeout panic\n");
784 static void (*xcall_deliver_impl
)(struct trap_per_cpu
*, int);
786 static void xcall_deliver(u64 data0
, u64 data1
, u64 data2
, const cpumask_t
*mask
)
788 struct trap_per_cpu
*tb
;
789 int this_cpu
, i
, cnt
;
794 /* We have to do this whole thing with interrupts fully disabled.
795 * Otherwise if we send an xcall from interrupt context it will
796 * corrupt both our mondo block and cpu list state.
798 * One consequence of this is that we cannot use timeout mechanisms
799 * that depend upon interrupts being delivered locally. So, for
800 * example, we cannot sample jiffies and expect it to advance.
802 * Fortunately, udelay() uses %stick/%tick so we can use that.
804 local_irq_save(flags
);
806 this_cpu
= smp_processor_id();
807 tb
= &trap_block
[this_cpu
];
809 mondo
= __va(tb
->cpu_mondo_block_pa
);
815 cpu_list
= __va(tb
->cpu_list_pa
);
817 /* Setup the initial cpu list. */
819 for_each_cpu(i
, mask
) {
820 if (i
== this_cpu
|| !cpu_online(i
))
826 xcall_deliver_impl(tb
, cnt
);
828 local_irq_restore(flags
);
831 /* Send cross call to all processors mentioned in MASK_P
832 * except self. Really, there are only two cases currently,
833 * "cpu_online_mask" and "mm_cpumask(mm)".
835 static void smp_cross_call_masked(unsigned long *func
, u32 ctx
, u64 data1
, u64 data2
, const cpumask_t
*mask
)
837 u64 data0
= (((u64
)ctx
)<<32 | (((u64
)func
) & 0xffffffff));
839 xcall_deliver(data0
, data1
, data2
, mask
);
842 /* Send cross call to all processors except self. */
843 static void smp_cross_call(unsigned long *func
, u32 ctx
, u64 data1
, u64 data2
)
845 smp_cross_call_masked(func
, ctx
, data1
, data2
, cpu_online_mask
);
848 extern unsigned long xcall_sync_tick
;
850 static void smp_start_sync_tick_client(int cpu
)
852 xcall_deliver((u64
) &xcall_sync_tick
, 0, 0,
856 extern unsigned long xcall_call_function
;
858 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
860 xcall_deliver((u64
) &xcall_call_function
, 0, 0, mask
);
863 extern unsigned long xcall_call_function_single
;
865 void arch_send_call_function_single_ipi(int cpu
)
867 xcall_deliver((u64
) &xcall_call_function_single
, 0, 0,
871 void __irq_entry
smp_call_function_client(int irq
, struct pt_regs
*regs
)
873 clear_softint(1 << irq
);
875 generic_smp_call_function_interrupt();
879 void __irq_entry
smp_call_function_single_client(int irq
, struct pt_regs
*regs
)
881 clear_softint(1 << irq
);
883 generic_smp_call_function_single_interrupt();
887 static void tsb_sync(void *info
)
889 struct trap_per_cpu
*tp
= &trap_block
[raw_smp_processor_id()];
890 struct mm_struct
*mm
= info
;
892 /* It is not valid to test "current->active_mm == mm" here.
894 * The value of "current" is not changed atomically with
895 * switch_mm(). But that's OK, we just need to check the
896 * current cpu's trap block PGD physical address.
898 if (tp
->pgd_paddr
== __pa(mm
->pgd
))
899 tsb_context_switch(mm
);
902 void smp_tsb_sync(struct mm_struct
*mm
)
904 smp_call_function_many(mm_cpumask(mm
), tsb_sync
, mm
, 1);
907 extern unsigned long xcall_flush_tlb_mm
;
908 extern unsigned long xcall_flush_tlb_page
;
909 extern unsigned long xcall_flush_tlb_kernel_range
;
910 extern unsigned long xcall_fetch_glob_regs
;
911 extern unsigned long xcall_fetch_glob_pmu
;
912 extern unsigned long xcall_fetch_glob_pmu_n4
;
913 extern unsigned long xcall_receive_signal
;
914 extern unsigned long xcall_new_mmu_context_version
;
916 extern unsigned long xcall_kgdb_capture
;
919 #ifdef DCACHE_ALIASING_POSSIBLE
920 extern unsigned long xcall_flush_dcache_page_cheetah
;
922 extern unsigned long xcall_flush_dcache_page_spitfire
;
924 static inline void __local_flush_dcache_folio(struct folio
*folio
)
926 unsigned int i
, nr
= folio_nr_pages(folio
);
928 #ifdef DCACHE_ALIASING_POSSIBLE
929 for (i
= 0; i
< nr
; i
++)
930 __flush_dcache_page(folio_address(folio
) + i
* PAGE_SIZE
,
931 ((tlb_type
== spitfire
) &&
932 folio_flush_mapping(folio
) != NULL
));
934 if (folio_flush_mapping(folio
) != NULL
&&
935 tlb_type
== spitfire
) {
936 unsigned long pfn
= folio_pfn(folio
)
937 for (i
= 0; i
< nr
; i
++)
938 __flush_icache_page((pfn
+ i
) * PAGE_SIZE
);
943 void smp_flush_dcache_folio_impl(struct folio
*folio
, int cpu
)
947 if (tlb_type
== hypervisor
)
950 #ifdef CONFIG_DEBUG_DCFLUSH
951 atomic_inc(&dcpage_flushes
);
954 this_cpu
= get_cpu();
956 if (cpu
== this_cpu
) {
957 __local_flush_dcache_folio(folio
);
958 } else if (cpu_online(cpu
)) {
959 void *pg_addr
= folio_address(folio
);
962 if (tlb_type
== spitfire
) {
963 data0
= ((u64
)&xcall_flush_dcache_page_spitfire
);
964 if (folio_flush_mapping(folio
) != NULL
)
965 data0
|= ((u64
)1 << 32);
966 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
967 #ifdef DCACHE_ALIASING_POSSIBLE
968 data0
= ((u64
)&xcall_flush_dcache_page_cheetah
);
972 unsigned int i
, nr
= folio_nr_pages(folio
);
974 for (i
= 0; i
< nr
; i
++) {
975 xcall_deliver(data0
, __pa(pg_addr
),
976 (u64
) pg_addr
, cpumask_of(cpu
));
977 #ifdef CONFIG_DEBUG_DCFLUSH
978 atomic_inc(&dcpage_flushes_xcall
);
980 pg_addr
+= PAGE_SIZE
;
988 void flush_dcache_folio_all(struct mm_struct
*mm
, struct folio
*folio
)
993 if (tlb_type
== hypervisor
)
998 #ifdef CONFIG_DEBUG_DCFLUSH
999 atomic_inc(&dcpage_flushes
);
1002 pg_addr
= folio_address(folio
);
1003 if (tlb_type
== spitfire
) {
1004 data0
= ((u64
)&xcall_flush_dcache_page_spitfire
);
1005 if (folio_flush_mapping(folio
) != NULL
)
1006 data0
|= ((u64
)1 << 32);
1007 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
1008 #ifdef DCACHE_ALIASING_POSSIBLE
1009 data0
= ((u64
)&xcall_flush_dcache_page_cheetah
);
1013 unsigned int i
, nr
= folio_nr_pages(folio
);
1015 for (i
= 0; i
< nr
; i
++) {
1016 xcall_deliver(data0
, __pa(pg_addr
),
1017 (u64
) pg_addr
, cpu_online_mask
);
1018 #ifdef CONFIG_DEBUG_DCFLUSH
1019 atomic_inc(&dcpage_flushes_xcall
);
1021 pg_addr
+= PAGE_SIZE
;
1024 __local_flush_dcache_folio(folio
);
1030 void kgdb_roundup_cpus(void)
1032 smp_cross_call(&xcall_kgdb_capture
, 0, 0, 0);
1036 void smp_fetch_global_regs(void)
1038 smp_cross_call(&xcall_fetch_glob_regs
, 0, 0, 0);
1041 void smp_fetch_global_pmu(void)
1043 if (tlb_type
== hypervisor
&&
1044 sun4v_chip_type
>= SUN4V_CHIP_NIAGARA4
)
1045 smp_cross_call(&xcall_fetch_glob_pmu_n4
, 0, 0, 0);
1047 smp_cross_call(&xcall_fetch_glob_pmu
, 0, 0, 0);
1050 /* We know that the window frames of the user have been flushed
1051 * to the stack before we get here because all callers of us
1052 * are flush_tlb_*() routines, and these run after flush_cache_*()
1053 * which performs the flushw.
1055 * mm->cpu_vm_mask is a bit mask of which cpus an address
1056 * space has (potentially) executed on, this is the heuristic
1057 * we use to limit cross calls.
1060 /* This currently is only used by the hugetlb arch pre-fault
1061 * hook on UltraSPARC-III+ and later when changing the pagesize
1062 * bits of the context register for an address space.
1064 void smp_flush_tlb_mm(struct mm_struct
*mm
)
1066 u32 ctx
= CTX_HWBITS(mm
->context
);
1070 smp_cross_call_masked(&xcall_flush_tlb_mm
,
1074 __flush_tlb_mm(ctx
, SECONDARY_CONTEXT
);
1079 struct tlb_pending_info
{
1082 unsigned long *vaddrs
;
1085 static void tlb_pending_func(void *info
)
1087 struct tlb_pending_info
*t
= info
;
1089 __flush_tlb_pending(t
->ctx
, t
->nr
, t
->vaddrs
);
1092 void smp_flush_tlb_pending(struct mm_struct
*mm
, unsigned long nr
, unsigned long *vaddrs
)
1094 u32 ctx
= CTX_HWBITS(mm
->context
);
1095 struct tlb_pending_info info
;
1101 info
.vaddrs
= vaddrs
;
1103 smp_call_function_many(mm_cpumask(mm
), tlb_pending_func
,
1106 __flush_tlb_pending(ctx
, nr
, vaddrs
);
1111 void smp_flush_tlb_page(struct mm_struct
*mm
, unsigned long vaddr
)
1113 unsigned long context
= CTX_HWBITS(mm
->context
);
1117 smp_cross_call_masked(&xcall_flush_tlb_page
,
1121 __flush_tlb_page(context
, vaddr
);
1126 void smp_flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
1129 end
= PAGE_ALIGN(end
);
1131 smp_cross_call(&xcall_flush_tlb_kernel_range
,
1134 __flush_tlb_kernel_range(start
, end
);
1139 /* #define CAPTURE_DEBUG */
1140 extern unsigned long xcall_capture
;
1142 static atomic_t smp_capture_depth
= ATOMIC_INIT(0);
1143 static atomic_t smp_capture_registry
= ATOMIC_INIT(0);
1144 static unsigned long penguins_are_doing_time
;
1146 void smp_capture(void)
1148 int result
= atomic_add_return(1, &smp_capture_depth
);
1151 int ncpus
= num_online_cpus();
1153 #ifdef CAPTURE_DEBUG
1154 printk("CPU[%d]: Sending penguins to jail...",
1155 smp_processor_id());
1157 penguins_are_doing_time
= 1;
1158 atomic_inc(&smp_capture_registry
);
1159 smp_cross_call(&xcall_capture
, 0, 0, 0);
1160 while (atomic_read(&smp_capture_registry
) != ncpus
)
1162 #ifdef CAPTURE_DEBUG
1168 void smp_release(void)
1170 if (atomic_dec_and_test(&smp_capture_depth
)) {
1171 #ifdef CAPTURE_DEBUG
1172 printk("CPU[%d]: Giving pardon to "
1173 "imprisoned penguins\n",
1174 smp_processor_id());
1176 penguins_are_doing_time
= 0;
1177 membar_safe("#StoreLoad");
1178 atomic_dec(&smp_capture_registry
);
1182 /* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE
1183 * set, so they can service tlb flush xcalls...
1185 extern void prom_world(int);
1187 void __irq_entry
smp_penguin_jailcell(int irq
, struct pt_regs
*regs
)
1189 clear_softint(1 << irq
);
1193 __asm__
__volatile__("flushw");
1195 atomic_inc(&smp_capture_registry
);
1196 membar_safe("#StoreLoad");
1197 while (penguins_are_doing_time
)
1199 atomic_dec(&smp_capture_registry
);
1205 void __init
smp_prepare_cpus(unsigned int max_cpus
)
1209 void __init
smp_setup_processor_id(void)
1211 if (tlb_type
== spitfire
)
1212 xcall_deliver_impl
= spitfire_xcall_deliver
;
1213 else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
)
1214 xcall_deliver_impl
= cheetah_xcall_deliver
;
1216 xcall_deliver_impl
= hypervisor_xcall_deliver
;
1219 void smp_fill_in_sib_core_maps(void)
1223 for_each_present_cpu(i
) {
1226 cpumask_clear(&cpu_core_map
[i
]);
1227 if (cpu_data(i
).core_id
== 0) {
1228 cpumask_set_cpu(i
, &cpu_core_map
[i
]);
1232 for_each_present_cpu(j
) {
1233 if (cpu_data(i
).core_id
==
1234 cpu_data(j
).core_id
)
1235 cpumask_set_cpu(j
, &cpu_core_map
[i
]);
1239 for_each_present_cpu(i
) {
1242 for_each_present_cpu(j
) {
1243 if (cpu_data(i
).max_cache_id
==
1244 cpu_data(j
).max_cache_id
)
1245 cpumask_set_cpu(j
, &cpu_core_sib_cache_map
[i
]);
1247 if (cpu_data(i
).sock_id
== cpu_data(j
).sock_id
)
1248 cpumask_set_cpu(j
, &cpu_core_sib_map
[i
]);
1252 for_each_present_cpu(i
) {
1255 cpumask_clear(&per_cpu(cpu_sibling_map
, i
));
1256 if (cpu_data(i
).proc_id
== -1) {
1257 cpumask_set_cpu(i
, &per_cpu(cpu_sibling_map
, i
));
1261 for_each_present_cpu(j
) {
1262 if (cpu_data(i
).proc_id
==
1263 cpu_data(j
).proc_id
)
1264 cpumask_set_cpu(j
, &per_cpu(cpu_sibling_map
, i
));
1269 int __cpu_up(unsigned int cpu
, struct task_struct
*tidle
)
1271 int ret
= smp_boot_one_cpu(cpu
, tidle
);
1274 cpumask_set_cpu(cpu
, &smp_commenced_mask
);
1275 while (!cpu_online(cpu
))
1277 if (!cpu_online(cpu
)) {
1280 /* On SUN4V, writes to %tick and %stick are
1283 if (tlb_type
!= hypervisor
)
1284 smp_synchronize_one_tick(cpu
);
1290 #ifdef CONFIG_HOTPLUG_CPU
1291 void cpu_play_dead(void)
1293 int cpu
= smp_processor_id();
1294 unsigned long pstate
;
1298 if (tlb_type
== hypervisor
) {
1299 struct trap_per_cpu
*tb
= &trap_block
[cpu
];
1301 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO
,
1302 tb
->cpu_mondo_pa
, 0);
1303 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO
,
1304 tb
->dev_mondo_pa
, 0);
1305 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR
,
1306 tb
->resum_mondo_pa
, 0);
1307 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR
,
1308 tb
->nonresum_mondo_pa
, 0);
1311 cpumask_clear_cpu(cpu
, &smp_commenced_mask
);
1312 membar_safe("#Sync");
1314 local_irq_disable();
1316 __asm__
__volatile__(
1317 "rdpr %%pstate, %0\n\t"
1318 "wrpr %0, %1, %%pstate"
1326 int __cpu_disable(void)
1328 int cpu
= smp_processor_id();
1332 for_each_cpu(i
, &cpu_core_map
[cpu
])
1333 cpumask_clear_cpu(cpu
, &cpu_core_map
[i
]);
1334 cpumask_clear(&cpu_core_map
[cpu
]);
1336 for_each_cpu(i
, &per_cpu(cpu_sibling_map
, cpu
))
1337 cpumask_clear_cpu(cpu
, &per_cpu(cpu_sibling_map
, i
));
1338 cpumask_clear(&per_cpu(cpu_sibling_map
, cpu
));
1347 /* Make sure no interrupts point to this cpu. */
1352 local_irq_disable();
1354 set_cpu_online(cpu
, false);
1361 void __cpu_die(unsigned int cpu
)
1365 for (i
= 0; i
< 100; i
++) {
1367 if (!cpumask_test_cpu(cpu
, &smp_commenced_mask
))
1371 if (cpumask_test_cpu(cpu
, &smp_commenced_mask
)) {
1372 printk(KERN_ERR
"CPU %u didn't die...\n", cpu
);
1374 #if defined(CONFIG_SUN_LDOMS)
1375 unsigned long hv_err
;
1379 hv_err
= sun4v_cpu_stop(cpu
);
1380 if (hv_err
== HV_EOK
) {
1381 set_cpu_present(cpu
, false);
1384 } while (--limit
> 0);
1386 printk(KERN_ERR
"sun4v_cpu_stop() fails err=%lu\n",
1394 void __init
smp_cpus_done(unsigned int max_cpus
)
1398 static void send_cpu_ipi(int cpu
)
1400 xcall_deliver((u64
) &xcall_receive_signal
,
1401 0, 0, cpumask_of(cpu
));
1404 void scheduler_poke(void)
1409 if (!__this_cpu_read(poke
))
1412 __this_cpu_write(poke
, false);
1413 set_softint(1 << PIL_SMP_RECEIVE_SIGNAL
);
1416 static unsigned long send_cpu_poke(int cpu
)
1418 unsigned long hv_err
;
1420 per_cpu(poke
, cpu
) = true;
1421 hv_err
= sun4v_cpu_poke(cpu
);
1422 if (hv_err
!= HV_EOK
) {
1423 per_cpu(poke
, cpu
) = false;
1424 pr_err_ratelimited("%s: sun4v_cpu_poke() fails err=%lu\n",
1431 void arch_smp_send_reschedule(int cpu
)
1433 if (cpu
== smp_processor_id()) {
1434 WARN_ON_ONCE(preemptible());
1435 set_softint(1 << PIL_SMP_RECEIVE_SIGNAL
);
1439 /* Use cpu poke to resume idle cpu if supported. */
1440 if (cpu_poke
&& idle_cpu(cpu
)) {
1443 ret
= send_cpu_poke(cpu
);
1448 /* Use IPI in following cases:
1449 * - cpu poke not supported
1451 * - send_cpu_poke() returns with error
1456 void smp_init_cpu_poke(void)
1458 unsigned long major
;
1459 unsigned long minor
;
1462 if (tlb_type
!= hypervisor
)
1465 ret
= sun4v_hvapi_get(HV_GRP_CORE
, &major
, &minor
);
1467 pr_debug("HV_GRP_CORE is not registered\n");
1471 if (major
== 1 && minor
>= 6) {
1472 /* CPU POKE is registered. */
1477 pr_debug("CPU_POKE not supported\n");
1480 void __irq_entry
smp_receive_signal_client(int irq
, struct pt_regs
*regs
)
1482 clear_softint(1 << irq
);
1486 static void stop_this_cpu(void *dummy
)
1488 set_cpu_online(smp_processor_id(), false);
1492 void smp_send_stop(void)
1496 if (tlb_type
== hypervisor
) {
1497 int this_cpu
= smp_processor_id();
1498 #ifdef CONFIG_SERIAL_SUNHV
1499 sunhv_migrate_hvcons_irq(this_cpu
);
1501 for_each_online_cpu(cpu
) {
1502 if (cpu
== this_cpu
)
1505 set_cpu_online(cpu
, false);
1506 #ifdef CONFIG_SUN_LDOMS
1507 if (ldom_domaining_enabled
) {
1508 unsigned long hv_err
;
1509 hv_err
= sun4v_cpu_stop(cpu
);
1511 printk(KERN_ERR
"sun4v_cpu_stop() "
1512 "failed err=%lu\n", hv_err
);
1515 prom_stopcpu_cpuid(cpu
);
1518 smp_call_function(stop_this_cpu
, NULL
, 0);
1521 static int __init
pcpu_cpu_distance(unsigned int from
, unsigned int to
)
1523 if (cpu_to_node(from
) == cpu_to_node(to
))
1524 return LOCAL_DISTANCE
;
1526 return REMOTE_DISTANCE
;
1529 static int __init
pcpu_cpu_to_node(int cpu
)
1531 return cpu_to_node(cpu
);
1534 void __init
setup_per_cpu_areas(void)
1536 unsigned long delta
;
1540 if (pcpu_chosen_fc
!= PCPU_FC_PAGE
) {
1541 rc
= pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE
,
1542 PERCPU_DYNAMIC_RESERVE
, 4 << 20,
1546 pr_warn("PERCPU: %s allocator failed (%d), "
1547 "falling back to page size\n",
1548 pcpu_fc_names
[pcpu_chosen_fc
], rc
);
1551 rc
= pcpu_page_first_chunk(PERCPU_MODULE_RESERVE
,
1554 panic("cannot initialize percpu area (err=%d)", rc
);
1556 delta
= (unsigned long)pcpu_base_addr
- (unsigned long)__per_cpu_start
;
1557 for_each_possible_cpu(cpu
)
1558 __per_cpu_offset(cpu
) = delta
+ pcpu_unit_offsets
[cpu
];
1560 /* Setup %g5 for the boot cpu. */
1561 __local_per_cpu_offset
= __per_cpu_offset(smp_processor_id());
1563 of_fill_in_cpu_data();
1564 if (tlb_type
== hypervisor
)
1565 mdesc_fill_in_cpu_data(cpu_all_mask
);