1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2017 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
6 #include <linux/kvm_host.h>
8 #include <asm/kvm_ppc.h>
9 #include <asm/kvm_book3s.h>
10 #include <asm/kvm_book3s_64.h>
12 #include <asm/ppc-opcode.h>
15 * This handles the cases where the guest is in real suspend mode
16 * and we want to get back to the guest without dooming the transaction.
17 * The caller has checked that the guest is in real-suspend mode
18 * (MSR[TS] = S and the fake-suspend flag is not set).
20 int kvmhv_p9_tm_emulation_early(struct kvm_vcpu
*vcpu
)
22 u32 instr
= vcpu
->arch
.emul_inst
;
23 u64 newmsr
, msr
, bescr
;
27 * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
28 * in these instructions, so masking bit 31 out doesn't change these
29 * instructions. For the tsr. instruction if bit 31 = 0 then it is per
30 * ISA an invalid form, however P9 UM, in section 4.6.10 Book II Invalid
31 * Forms, informs specifically that ignoring bit 31 is an acceptable way
32 * to handle TM-related invalid forms that have bit 31 = 0. Moreover,
33 * for emulation purposes both forms (w/ and wo/ bit 31 set) can
34 * generate a softpatch interrupt. Hence both forms are handled below
35 * for tsr. to make them behave the same way.
37 switch (instr
& PO_XOP_OPCODE_MASK
) {
39 /* XXX do we need to check for PR=0 here? */
40 newmsr
= vcpu
->arch
.shregs
.srr1
;
41 /* should only get here for Sx -> T1 transition */
42 if (!(MSR_TM_TRANSACTIONAL(newmsr
) && (newmsr
& MSR_TM
)))
44 newmsr
= sanitize_msr(newmsr
);
45 vcpu
->arch
.shregs
.msr
= newmsr
;
46 vcpu
->arch
.cfar
= vcpu
->arch
.regs
.nip
- 4;
47 vcpu
->arch
.regs
.nip
= vcpu
->arch
.shregs
.srr0
;
51 /* check for PR=1 and arch 2.06 bit set in PCR */
52 msr
= vcpu
->arch
.shregs
.msr
;
53 if ((msr
& MSR_PR
) && (vcpu
->arch
.vcore
->pcr
& PCR_ARCH_206
))
55 /* check EBB facility is available */
56 if (!(vcpu
->arch
.hfscr
& HFSCR_EBB
) ||
57 ((msr
& MSR_PR
) && !(mfspr(SPRN_FSCR
) & FSCR_EBB
)))
59 bescr
= mfspr(SPRN_BESCR
);
60 /* expect to see a S->T transition requested */
61 if (((bescr
>> 30) & 3) != 2)
64 if (instr
& (1 << 11))
66 mtspr(SPRN_BESCR
, bescr
);
67 msr
= (msr
& ~MSR_TS_MASK
) | MSR_TS_T
;
68 vcpu
->arch
.shregs
.msr
= msr
;
69 vcpu
->arch
.cfar
= vcpu
->arch
.regs
.nip
- 4;
70 vcpu
->arch
.regs
.nip
= mfspr(SPRN_EBBRR
);
74 /* XXX do we need to check for PR=0 here? */
75 rs
= (instr
>> 21) & 0x1f;
76 newmsr
= kvmppc_get_gpr(vcpu
, rs
);
77 msr
= vcpu
->arch
.shregs
.msr
;
78 /* check this is a Sx -> T1 transition */
79 if (!(MSR_TM_TRANSACTIONAL(newmsr
) && (newmsr
& MSR_TM
)))
81 /* mtmsrd doesn't change LE */
82 newmsr
= (newmsr
& ~MSR_LE
) | (msr
& MSR_LE
);
83 newmsr
= sanitize_msr(newmsr
);
84 vcpu
->arch
.shregs
.msr
= newmsr
;
87 /* ignore bit 31, see comment above */
88 case (PPC_INST_TSR
& PO_XOP_OPCODE_MASK
):
89 /* we know the MSR has the TS field = S (0b01) here */
90 msr
= vcpu
->arch
.shregs
.msr
;
91 /* check for PR=1 and arch 2.06 bit set in PCR */
92 if ((msr
& MSR_PR
) && (vcpu
->arch
.vcore
->pcr
& PCR_ARCH_206
))
94 /* check for TM disabled in the HFSCR or MSR */
95 if (!(vcpu
->arch
.hfscr
& HFSCR_TM
) || !(msr
& MSR_TM
))
97 /* L=1 => tresume => set TS to T (0b10) */
98 if (instr
& (1 << 21))
99 vcpu
->arch
.shregs
.msr
= (msr
& ~MSR_TS_MASK
) | MSR_TS_T
;
100 /* Set CR0 to 0b0010 */
101 vcpu
->arch
.regs
.ccr
= (vcpu
->arch
.regs
.ccr
& 0x0fffffff) |
110 * This is called when we are returning to a guest in TM transactional
111 * state. We roll the guest state back to the checkpointed state.
113 void kvmhv_emulate_tm_rollback(struct kvm_vcpu
*vcpu
)
115 vcpu
->arch
.shregs
.msr
&= ~MSR_TS_MASK
; /* go to N state */
116 vcpu
->arch
.regs
.nip
= vcpu
->arch
.tfhar
;
117 copy_from_checkpoint(vcpu
);
118 vcpu
->arch
.regs
.ccr
= (vcpu
->arch
.regs
.ccr
& 0x0fffffff) | 0xa0000000;