1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2017 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
6 #include <linux/kvm_host.h>
8 #include <asm/kvm_ppc.h>
9 #include <asm/kvm_book3s.h>
10 #include <asm/kvm_book3s_64.h>
12 #include <asm/ppc-opcode.h>
15 * This handles the cases where the guest is in real suspend mode
16 * and we want to get back to the guest without dooming the transaction.
17 * The caller has checked that the guest is in real-suspend mode
18 * (MSR[TS] = S and the fake-suspend flag is not set).
20 int kvmhv_p9_tm_emulation_early(struct kvm_vcpu
*vcpu
)
22 u32 instr
= vcpu
->arch
.emul_inst
;
23 u64 newmsr
, msr
, bescr
;
26 switch (instr
& 0xfc0007ff) {
28 /* XXX do we need to check for PR=0 here? */
29 newmsr
= vcpu
->arch
.shregs
.srr1
;
30 /* should only get here for Sx -> T1 transition */
31 if (!(MSR_TM_TRANSACTIONAL(newmsr
) && (newmsr
& MSR_TM
)))
33 newmsr
= sanitize_msr(newmsr
);
34 vcpu
->arch
.shregs
.msr
= newmsr
;
35 vcpu
->arch
.cfar
= vcpu
->arch
.regs
.nip
- 4;
36 vcpu
->arch
.regs
.nip
= vcpu
->arch
.shregs
.srr0
;
40 /* check for PR=1 and arch 2.06 bit set in PCR */
41 msr
= vcpu
->arch
.shregs
.msr
;
42 if ((msr
& MSR_PR
) && (vcpu
->arch
.vcore
->pcr
& PCR_ARCH_206
))
44 /* check EBB facility is available */
45 if (!(vcpu
->arch
.hfscr
& HFSCR_EBB
) ||
46 ((msr
& MSR_PR
) && !(mfspr(SPRN_FSCR
) & FSCR_EBB
)))
48 bescr
= mfspr(SPRN_BESCR
);
49 /* expect to see a S->T transition requested */
50 if (((bescr
>> 30) & 3) != 2)
53 if (instr
& (1 << 11))
55 mtspr(SPRN_BESCR
, bescr
);
56 msr
= (msr
& ~MSR_TS_MASK
) | MSR_TS_T
;
57 vcpu
->arch
.shregs
.msr
= msr
;
58 vcpu
->arch
.cfar
= vcpu
->arch
.regs
.nip
- 4;
59 vcpu
->arch
.regs
.nip
= mfspr(SPRN_EBBRR
);
63 /* XXX do we need to check for PR=0 here? */
64 rs
= (instr
>> 21) & 0x1f;
65 newmsr
= kvmppc_get_gpr(vcpu
, rs
);
66 msr
= vcpu
->arch
.shregs
.msr
;
67 /* check this is a Sx -> T1 transition */
68 if (!(MSR_TM_TRANSACTIONAL(newmsr
) && (newmsr
& MSR_TM
)))
70 /* mtmsrd doesn't change LE */
71 newmsr
= (newmsr
& ~MSR_LE
) | (msr
& MSR_LE
);
72 newmsr
= sanitize_msr(newmsr
);
73 vcpu
->arch
.shregs
.msr
= newmsr
;
77 /* we know the MSR has the TS field = S (0b01) here */
78 msr
= vcpu
->arch
.shregs
.msr
;
79 /* check for PR=1 and arch 2.06 bit set in PCR */
80 if ((msr
& MSR_PR
) && (vcpu
->arch
.vcore
->pcr
& PCR_ARCH_206
))
82 /* check for TM disabled in the HFSCR or MSR */
83 if (!(vcpu
->arch
.hfscr
& HFSCR_TM
) || !(msr
& MSR_TM
))
85 /* L=1 => tresume => set TS to T (0b10) */
86 if (instr
& (1 << 21))
87 vcpu
->arch
.shregs
.msr
= (msr
& ~MSR_TS_MASK
) | MSR_TS_T
;
88 /* Set CR0 to 0b0010 */
89 vcpu
->arch
.regs
.ccr
= (vcpu
->arch
.regs
.ccr
& 0x0fffffff) |
98 * This is called when we are returning to a guest in TM transactional
99 * state. We roll the guest state back to the checkpointed state.
101 void kvmhv_emulate_tm_rollback(struct kvm_vcpu
*vcpu
)
103 vcpu
->arch
.shregs
.msr
&= ~MSR_TS_MASK
; /* go to N state */
104 vcpu
->arch
.regs
.nip
= vcpu
->arch
.tfhar
;
105 copy_from_checkpoint(vcpu
);
106 vcpu
->arch
.regs
.ccr
= (vcpu
->arch
.regs
.ccr
& 0x0fffffff) | 0xa0000000;