1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright 2012 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 #include <linux/types.h>
8 #include <linux/string.h>
10 #include <linux/kvm_host.h>
11 #include <linux/kernel.h>
14 #include <asm/machdep.h>
15 #include <asm/cputhreads.h>
17 #include <asm/kvm_ppc.h>
19 /* SRR1 bits for machine check on POWER7 */
20 #define SRR1_MC_LDSTERR (1ul << (63-42))
21 #define SRR1_MC_IFETCH_SH (63-45)
22 #define SRR1_MC_IFETCH_MASK 0x7
23 #define SRR1_MC_IFETCH_SLBPAR 2 /* SLB parity error */
24 #define SRR1_MC_IFETCH_SLBMULTI 3 /* SLB multi-hit */
25 #define SRR1_MC_IFETCH_SLBPARMULTI 4 /* SLB parity + multi-hit */
26 #define SRR1_MC_IFETCH_TLBMULTI 5 /* I-TLB multi-hit */
28 /* DSISR bits for machine check on POWER7 */
29 #define DSISR_MC_DERAT_MULTI 0x800 /* D-ERAT multi-hit */
30 #define DSISR_MC_TLB_MULTI 0x400 /* D-TLB multi-hit */
31 #define DSISR_MC_SLB_PARITY 0x100 /* SLB parity error */
32 #define DSISR_MC_SLB_MULTI 0x080 /* SLB multi-hit */
33 #define DSISR_MC_SLB_PARMULTI 0x040 /* SLB parity + multi-hit */
35 /* POWER7 SLB flush and reload */
36 static void reload_slb(struct kvm_vcpu
*vcpu
)
38 struct slb_shadow
*slb
;
41 /* First clear out SLB */
42 asm volatile("slbmte %0,%0; slbia" : : "r" (0));
44 /* Do they have an SLB shadow buffer registered? */
45 slb
= vcpu
->arch
.slb_shadow
.pinned_addr
;
50 n
= min_t(u32
, be32_to_cpu(slb
->persistent
), SLB_MIN_SIZE
);
51 if ((void *) &slb
->save_area
[n
] > vcpu
->arch
.slb_shadow
.pinned_end
)
54 /* Load up the SLB from that */
55 for (i
= 0; i
< n
; ++i
) {
56 unsigned long rb
= be64_to_cpu(slb
->save_area
[i
].esid
);
57 unsigned long rs
= be64_to_cpu(slb
->save_area
[i
].vsid
);
59 rb
= (rb
& ~0xFFFul
) | i
; /* insert entry number */
60 asm volatile("slbmte %0,%1" : : "r" (rs
), "r" (rb
));
65 * On POWER7, see if we can handle a machine check that occurred inside
66 * the guest in real mode, without switching to the host partition.
68 static long kvmppc_realmode_mc_power7(struct kvm_vcpu
*vcpu
)
70 unsigned long srr1
= vcpu
->arch
.shregs
.msr
;
73 if (srr1
& SRR1_MC_LDSTERR
) {
74 /* error on load/store */
75 unsigned long dsisr
= vcpu
->arch
.shregs
.dsisr
;
77 if (dsisr
& (DSISR_MC_SLB_PARMULTI
| DSISR_MC_SLB_MULTI
|
78 DSISR_MC_SLB_PARITY
| DSISR_MC_DERAT_MULTI
)) {
79 /* flush and reload SLB; flushes D-ERAT too */
81 dsisr
&= ~(DSISR_MC_SLB_PARMULTI
| DSISR_MC_SLB_MULTI
|
82 DSISR_MC_SLB_PARITY
| DSISR_MC_DERAT_MULTI
);
84 if (dsisr
& DSISR_MC_TLB_MULTI
) {
85 tlbiel_all_lpid(vcpu
->kvm
->arch
.radix
);
86 dsisr
&= ~DSISR_MC_TLB_MULTI
;
88 /* Any other errors we don't understand? */
89 if (dsisr
& 0xffffffffUL
)
93 switch ((srr1
>> SRR1_MC_IFETCH_SH
) & SRR1_MC_IFETCH_MASK
) {
96 case SRR1_MC_IFETCH_SLBPAR
:
97 case SRR1_MC_IFETCH_SLBMULTI
:
98 case SRR1_MC_IFETCH_SLBPARMULTI
:
101 case SRR1_MC_IFETCH_TLBMULTI
:
102 tlbiel_all_lpid(vcpu
->kvm
->arch
.radix
);
111 void kvmppc_realmode_machine_check(struct kvm_vcpu
*vcpu
)
113 struct machine_check_event mce_evt
;
116 if (vcpu
->kvm
->arch
.fwnmi_enabled
) {
117 /* FWNMI guests handle their own recovery */
120 handled
= kvmppc_realmode_mc_power7(vcpu
);
124 * Now get the event and stash it in the vcpu struct so it can
125 * be handled by the primary thread in virtual mode. We can't
126 * call machine_check_queue_event() here if we are running on
127 * an offline secondary thread.
129 if (get_mce_event(&mce_evt
, MCE_EVENT_RELEASE
)) {
130 if (handled
&& mce_evt
.version
== MCE_V1
)
131 mce_evt
.disposition
= MCE_DISPOSITION_RECOVERED
;
133 memset(&mce_evt
, 0, sizeof(mce_evt
));
136 vcpu
->arch
.mce_evt
= mce_evt
;
139 /* Check if dynamic split is in force and return subcore size accordingly. */
140 static inline int kvmppc_cur_subcore_size(void)
142 if (local_paca
->kvm_hstate
.kvm_split_mode
)
143 return local_paca
->kvm_hstate
.kvm_split_mode
->subcore_size
;
145 return threads_per_subcore
;
148 void kvmppc_subcore_enter_guest(void)
150 int thread_id
, subcore_id
;
152 thread_id
= cpu_thread_in_core(local_paca
->paca_index
);
153 subcore_id
= thread_id
/ kvmppc_cur_subcore_size();
155 local_paca
->sibling_subcore_state
->in_guest
[subcore_id
] = 1;
157 EXPORT_SYMBOL_GPL(kvmppc_subcore_enter_guest
);
159 void kvmppc_subcore_exit_guest(void)
161 int thread_id
, subcore_id
;
163 thread_id
= cpu_thread_in_core(local_paca
->paca_index
);
164 subcore_id
= thread_id
/ kvmppc_cur_subcore_size();
166 local_paca
->sibling_subcore_state
->in_guest
[subcore_id
] = 0;
168 EXPORT_SYMBOL_GPL(kvmppc_subcore_exit_guest
);
170 static bool kvmppc_tb_resync_required(void)
172 if (test_and_set_bit(CORE_TB_RESYNC_REQ_BIT
,
173 &local_paca
->sibling_subcore_state
->flags
))
179 static void kvmppc_tb_resync_done(void)
181 clear_bit(CORE_TB_RESYNC_REQ_BIT
,
182 &local_paca
->sibling_subcore_state
->flags
);
186 * kvmppc_realmode_hmi_handler() is called only by primary thread during
189 * There are multiple reasons why HMI could occur, one of them is
190 * Timebase (TB) error. If this HMI is due to TB error, then TB would
191 * have been in stopped state. The opal hmi handler Will fix it and
192 * restore the TB value with host timebase value. For HMI caused due
193 * to non-TB errors, opal hmi handler will not touch/restore TB register
194 * and hence there won't be any change in TB value.
196 * Since we are not sure about the cause of this HMI, we can't be sure
197 * about the content of TB register whether it holds guest or host timebase
198 * value. Hence the idea is to resync the TB on every HMI, so that we
199 * know about the exact state of the TB value. Resync TB call will
200 * restore TB to host timebase.
202 * Things to consider:
203 * - On TB error, HMI interrupt is reported on all the threads of the core
204 * that has encountered TB error irrespective of split-core mode.
205 * - The very first thread on the core that get chance to fix TB error
206 * would rsync the TB with local chipTOD value.
207 * - The resync TB is a core level action i.e. it will sync all the TBs
208 * in that core independent of split-core mode. This means if we trigger
209 * TB sync from a thread from one subcore, it would affect TB values of
210 * sibling subcores of the same core.
212 * All threads need to co-ordinate before making opal hmi handler.
213 * All threads will use sibling_subcore_state->in_guest[] (shared by all
214 * threads in the core) in paca which holds information about whether
215 * sibling subcores are in Guest mode or host mode. The in_guest[] array
216 * is of size MAX_SUBCORE_PER_CORE=4, indexed using subcore id to set/unset
217 * subcore status. Only primary threads from each subcore is responsible
218 * to set/unset its designated array element while entering/exiting the
221 * After invoking opal hmi handler call, one of the thread (of entire core)
222 * will need to resync the TB. Bit 63 from subcore state bitmap flags
223 * (sibling_subcore_state->flags) will be used to co-ordinate between
224 * primary threads to decide who takes up the responsibility.
226 * This is what we do:
227 * - Primary thread from each subcore tries to set resync required bit[63]
228 * of paca->sibling_subcore_state->flags.
229 * - The first primary thread that is able to set the flag takes the
230 * responsibility of TB resync. (Let us call it as thread leader)
231 * - All other threads which are in host will call
232 * wait_for_subcore_guest_exit() and wait for in_guest[0-3] from
233 * paca->sibling_subcore_state to get cleared.
234 * - All the primary thread will clear its subcore status from subcore
235 * state in_guest[] array respectively.
236 * - Once all primary threads clear in_guest[0-3], all of them will invoke
238 * - Now all threads will wait for TB resync to complete by invoking
239 * wait_for_tb_resync() except the thread leader.
240 * - Thread leader will do a TB resync by invoking opal_resync_timebase()
241 * call and the it will clear the resync required bit.
242 * - All other threads will now come out of resync wait loop and proceed
243 * with individual execution.
244 * - On return of this function, primary thread will signal all
245 * secondary threads to proceed.
246 * - All secondary threads will eventually call opal hmi handler on
249 * Returns 1 if the timebase offset should be applied, 0 if not.
252 long kvmppc_realmode_hmi_handler(void)
256 local_paca
->hmi_irqs
++;
258 if (hmi_handle_debugtrig(NULL
) >= 0)
262 * By now primary thread has already completed guest->host
263 * partition switch but haven't signaled secondaries yet.
264 * All the secondary threads on this subcore is waiting
265 * for primary thread to signal them to go ahead.
267 * For threads from subcore which isn't in guest, they all will
268 * wait until all other subcores on this core exit the guest.
270 * Now set the resync required bit. If you are the first to
271 * set this bit then kvmppc_tb_resync_required() function will
272 * return true. For rest all other subcores
273 * kvmppc_tb_resync_required() will return false.
275 * If resync_req == true, then this thread is responsible to
276 * initiate TB resync after hmi handler has completed.
277 * All other threads on this core will wait until this thread
278 * clears the resync required bit flag.
280 resync_req
= kvmppc_tb_resync_required();
282 /* Reset the subcore status to indicate it has exited guest */
283 kvmppc_subcore_exit_guest();
286 * Wait for other subcores on this core to exit the guest.
287 * All the primary threads and threads from subcore that are
288 * not in guest will wait here until all subcores are out
291 wait_for_subcore_guest_exit();
294 * At this point we are sure that primary threads from each
295 * subcore on this core have completed guest->host partition
296 * switch. Now it is safe to call HMI handler.
298 if (ppc_md
.hmi_exception_early
)
299 ppc_md
.hmi_exception_early(NULL
);
302 * Check if this thread is responsible to resync TB.
303 * All other threads will wait until this thread completes the
307 opal_resync_timebase();
308 /* Reset TB resync req bit */
309 kvmppc_tb_resync_done();
311 wait_for_tb_resync();
315 * Reset tb_offset_applied so the guest exit code won't try
316 * to subtract the previous timebase offset from the timebase.
318 if (local_paca
->kvm_hstate
.kvm_vcore
)
319 local_paca
->kvm_hstate
.kvm_vcore
->tb_offset_applied
= 0;