1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/mmzone.h>
4 #include <linux/nodemask.h>
5 #include <linux/spinlock.h>
7 #include <linux/atomic.h>
8 #include <asm/sn/types.h>
9 #include <asm/sn/addrs.h>
10 #include <asm/sn/nmi.h>
11 #include <asm/sn/arch.h>
12 #include <asm/sn/agent.h>
14 #include "ip27-common.h"
17 #define NODE_NUM_CPUS(n) CNODE_NUM_CPUS(n)
19 #define NODE_NUM_CPUS(n) CPUS_PER_NODE
22 #define SEND_NMI(_nasid, _slice) \
23 REMOTE_HUB_S((_nasid), (PI_NMI_A + ((_slice) * PI_NMI_OFFSET)), 1)
25 typedef unsigned long machreg_t
;
27 static arch_spinlock_t nmi_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
28 static void nmi_dump(void);
30 void install_cpu_nmi_handler(int slice
)
34 nmi_addr
= (nmi_t
*)NMI_ADDR(get_nasid(), slice
);
35 if (nmi_addr
->call_addr
)
37 nmi_addr
->magic
= NMI_MAGIC
;
38 nmi_addr
->call_addr
= (void *)nmi_dump
;
39 nmi_addr
->call_addr_c
=
40 (void *)(~((unsigned long)(nmi_addr
->call_addr
)));
41 nmi_addr
->call_parm
= 0;
45 * Copy the cpu registers which have been saved in the IP27prom format
46 * into the eframe format for the node under consideration.
49 static void nmi_cpu_eframe_save(nasid_t nasid
, int slice
)
51 struct reg_struct
*nr
;
54 /* Get the pointer to the current cpu's register set. */
55 nr
= (struct reg_struct
*)
56 (TO_UNCAC(TO_NODE(nasid
, IP27_NMI_KREGS_OFFSET
)) +
57 slice
* IP27_NMI_KREGS_CPU_SIZE
);
59 pr_emerg("NMI nasid %d: slice %d\n", nasid
, slice
);
62 * Saved main processor registers
64 for (i
= 0; i
< 32; ) {
66 pr_emerg("$%2d :", i
);
67 pr_cont(" %016lx", nr
->gpr
[i
]);
74 pr_emerg("Hi : (value lost)\n");
75 pr_emerg("Lo : (value lost)\n");
80 pr_emerg("epc : %016lx %pS\n", nr
->epc
, (void *)nr
->epc
);
81 pr_emerg("%s\n", print_tainted());
82 pr_emerg("ErrEPC: %016lx %pS\n", nr
->error_epc
, (void *)nr
->error_epc
);
83 pr_emerg("ra : %016lx %pS\n", nr
->gpr
[31], (void *)nr
->gpr
[31]);
84 pr_emerg("Status: %08lx ", nr
->sr
);
93 switch (nr
->sr
& ST0_KSU
) {
98 pr_cont("SUPERVISOR ");
104 pr_cont("BAD_MODE ");
108 if (nr
->sr
& ST0_ERL
)
110 if (nr
->sr
& ST0_EXL
)
116 pr_emerg("Cause : %08lx\n", nr
->cause
);
117 pr_emerg("PrId : %08x\n", read_c0_prid());
118 pr_emerg("BadVA : %016lx\n", nr
->badva
);
119 pr_emerg("CErr : %016lx\n", nr
->cache_err
);
120 pr_emerg("NMI_SR: %016lx\n", nr
->nmi_sr
);
125 static void nmi_dump_hub_irq(nasid_t nasid
, int slice
)
127 u64 mask0
, mask1
, pend0
, pend1
;
129 if (slice
== 0) { /* Slice A */
130 mask0
= REMOTE_HUB_L(nasid
, PI_INT_MASK0_A
);
131 mask1
= REMOTE_HUB_L(nasid
, PI_INT_MASK1_A
);
132 } else { /* Slice B */
133 mask0
= REMOTE_HUB_L(nasid
, PI_INT_MASK0_B
);
134 mask1
= REMOTE_HUB_L(nasid
, PI_INT_MASK1_B
);
137 pend0
= REMOTE_HUB_L(nasid
, PI_INT_PEND0
);
138 pend1
= REMOTE_HUB_L(nasid
, PI_INT_PEND1
);
140 pr_emerg("PI_INT_MASK0: %16llx PI_INT_MASK1: %16llx\n", mask0
, mask1
);
141 pr_emerg("PI_INT_PEND0: %16llx PI_INT_PEND1: %16llx\n", pend0
, pend1
);
146 * Copy the cpu registers which have been saved in the IP27prom format
147 * into the eframe format for the node under consideration.
149 static void nmi_node_eframe_save(nasid_t nasid
)
153 if (nasid
== INVALID_NASID
)
156 /* Save the registers into eframe for each cpu */
157 for (slice
= 0; slice
< NODE_NUM_CPUS(slice
); slice
++) {
158 nmi_cpu_eframe_save(nasid
, slice
);
159 nmi_dump_hub_irq(nasid
, slice
);
164 * Save the nmi cpu registers for all cpus in the system.
166 static void nmi_eframes_save(void)
170 for_each_online_node(nasid
)
171 nmi_node_eframe_save(nasid
);
174 static void nmi_dump(void)
176 #ifndef REAL_NMI_SIGNAL
177 static atomic_t nmied_cpus
= ATOMIC_INIT(0);
179 atomic_inc(&nmied_cpus
);
182 * Only allow 1 cpu to proceed
184 arch_spin_lock(&nmi_lock
);
186 #ifdef REAL_NMI_SIGNAL
188 * Wait up to 15 seconds for the other cpus to respond to the NMI.
189 * If a cpu has not responded after 10 sec, send it 1 additional NMI.
190 * This is for 2 reasons:
191 * - sometimes a MMSC fail to NMI all cpus.
192 * - on 512p SN0 system, the MMSC will only send NMIs to
193 * half the cpus. Unfortunately, we don't know which cpus may be
194 * NMIed - it depends on how the site chooses to configure.
196 * Note: it has been measure that it takes the MMSC up to 2.3 secs to
197 * send NMIs to all cpus on a 256p system.
199 for (i
=0; i
< 1500; i
++) {
200 for_each_online_node(node
)
201 if (NODEPDA(node
)->dump_count
== 0)
203 if (node
== MAX_NUMNODES
)
206 for_each_online_node(node
)
207 if (NODEPDA(node
)->dump_count
== 0) {
208 cpu
= cpumask_first(cpumask_of_node(node
));
209 for (n
=0; n
< CNODE_NUM_CPUS(node
); cpu
++, n
++) {
210 CPUMASK_SETB(nmied_cpus
, cpu
);
212 * cputonasid, cputoslice
215 SEND_NMI((cputonasid(cpu
)), (cputoslice(cpu
)));
223 while (atomic_read(&nmied_cpus
) != num_online_cpus());
227 * Save the nmi cpu registers for all cpu in the eframe format.
230 LOCAL_HUB_S(NI_PORT_RESET
, NPR_PORTRESET
| NPR_LOCALRESET
);