treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / powerpc / kvm / book3s_hv_tm.c
blob0db93749716973b0852abe964d39db80cffd5f1a
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2017 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
4 */
6 #include <linux/kvm_host.h>
8 #include <asm/kvm_ppc.h>
9 #include <asm/kvm_book3s.h>
10 #include <asm/kvm_book3s_64.h>
11 #include <asm/reg.h>
12 #include <asm/ppc-opcode.h>
14 static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause)
16 u64 texasr, tfiar;
17 u64 msr = vcpu->arch.shregs.msr;
19 tfiar = vcpu->arch.regs.nip & ~0x3ull;
20 texasr = (failure_cause << 56) | TEXASR_ABORT | TEXASR_FS | TEXASR_EXACT;
21 if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr))
22 texasr |= TEXASR_SUSP;
23 if (msr & MSR_PR) {
24 texasr |= TEXASR_PR;
25 tfiar |= 1;
27 vcpu->arch.tfiar = tfiar;
28 /* Preserve ROT and TL fields of existing TEXASR */
29 vcpu->arch.texasr = (vcpu->arch.texasr & 0x3ffffff) | texasr;
33 * This gets called on a softpatch interrupt on POWER9 DD2.2 processors.
34 * We expect to find a TM-related instruction to be emulated. The
35 * instruction image is in vcpu->arch.emul_inst. If the guest was in
36 * TM suspended or transactional state, the checkpointed state has been
37 * reclaimed and is in the vcpu struct. The CPU is in virtual mode in
38 * host context.
40 int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
42 u32 instr = vcpu->arch.emul_inst;
43 u64 msr = vcpu->arch.shregs.msr;
44 u64 newmsr, bescr;
45 int ra, rs;
47 switch (instr & 0xfc0007ff) {
48 case PPC_INST_RFID:
49 /* XXX do we need to check for PR=0 here? */
50 newmsr = vcpu->arch.shregs.srr1;
51 /* should only get here for Sx -> T1 transition */
52 WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
53 MSR_TM_TRANSACTIONAL(newmsr) &&
54 (newmsr & MSR_TM)));
55 newmsr = sanitize_msr(newmsr);
56 vcpu->arch.shregs.msr = newmsr;
57 vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
58 vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
59 return RESUME_GUEST;
61 case PPC_INST_RFEBB:
62 if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
63 /* generate an illegal instruction interrupt */
64 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
65 return RESUME_GUEST;
67 /* check EBB facility is available */
68 if (!(vcpu->arch.hfscr & HFSCR_EBB)) {
69 /* generate an illegal instruction interrupt */
70 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
71 return RESUME_GUEST;
73 if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) {
74 /* generate a facility unavailable interrupt */
75 vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
76 ((u64)FSCR_EBB_LG << 56);
77 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
78 return RESUME_GUEST;
80 bescr = vcpu->arch.bescr;
81 /* expect to see a S->T transition requested */
82 WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
83 ((bescr >> 30) & 3) == 2));
84 bescr &= ~BESCR_GE;
85 if (instr & (1 << 11))
86 bescr |= BESCR_GE;
87 vcpu->arch.bescr = bescr;
88 msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
89 vcpu->arch.shregs.msr = msr;
90 vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
91 vcpu->arch.regs.nip = vcpu->arch.ebbrr;
92 return RESUME_GUEST;
94 case PPC_INST_MTMSRD:
95 /* XXX do we need to check for PR=0 here? */
96 rs = (instr >> 21) & 0x1f;
97 newmsr = kvmppc_get_gpr(vcpu, rs);
98 /* check this is a Sx -> T1 transition */
99 WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
100 MSR_TM_TRANSACTIONAL(newmsr) &&
101 (newmsr & MSR_TM)));
102 /* mtmsrd doesn't change LE */
103 newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE);
104 newmsr = sanitize_msr(newmsr);
105 vcpu->arch.shregs.msr = newmsr;
106 return RESUME_GUEST;
108 case PPC_INST_TSR:
109 /* check for PR=1 and arch 2.06 bit set in PCR */
110 if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
111 /* generate an illegal instruction interrupt */
112 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
113 return RESUME_GUEST;
115 /* check for TM disabled in the HFSCR or MSR */
116 if (!(vcpu->arch.hfscr & HFSCR_TM)) {
117 /* generate an illegal instruction interrupt */
118 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
119 return RESUME_GUEST;
121 if (!(msr & MSR_TM)) {
122 /* generate a facility unavailable interrupt */
123 vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
124 ((u64)FSCR_TM_LG << 56);
125 kvmppc_book3s_queue_irqprio(vcpu,
126 BOOK3S_INTERRUPT_FAC_UNAVAIL);
127 return RESUME_GUEST;
129 /* Set CR0 to indicate previous transactional state */
130 vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
131 (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
132 /* L=1 => tresume, L=0 => tsuspend */
133 if (instr & (1 << 21)) {
134 if (MSR_TM_SUSPENDED(msr))
135 msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
136 } else {
137 if (MSR_TM_TRANSACTIONAL(msr))
138 msr = (msr & ~MSR_TS_MASK) | MSR_TS_S;
140 vcpu->arch.shregs.msr = msr;
141 return RESUME_GUEST;
143 case PPC_INST_TRECLAIM:
144 /* check for TM disabled in the HFSCR or MSR */
145 if (!(vcpu->arch.hfscr & HFSCR_TM)) {
146 /* generate an illegal instruction interrupt */
147 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
148 return RESUME_GUEST;
150 if (!(msr & MSR_TM)) {
151 /* generate a facility unavailable interrupt */
152 vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
153 ((u64)FSCR_TM_LG << 56);
154 kvmppc_book3s_queue_irqprio(vcpu,
155 BOOK3S_INTERRUPT_FAC_UNAVAIL);
156 return RESUME_GUEST;
158 /* If no transaction active, generate TM bad thing */
159 if (!MSR_TM_ACTIVE(msr)) {
160 kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
161 return RESUME_GUEST;
163 /* If failure was not previously recorded, recompute TEXASR */
164 if (!(vcpu->arch.orig_texasr & TEXASR_FS)) {
165 ra = (instr >> 16) & 0x1f;
166 if (ra)
167 ra = kvmppc_get_gpr(vcpu, ra) & 0xff;
168 emulate_tx_failure(vcpu, ra);
171 copy_from_checkpoint(vcpu);
173 /* Set CR0 to indicate previous transactional state */
174 vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
175 (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
176 vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
177 return RESUME_GUEST;
179 case PPC_INST_TRECHKPT:
180 /* XXX do we need to check for PR=0 here? */
181 /* check for TM disabled in the HFSCR or MSR */
182 if (!(vcpu->arch.hfscr & HFSCR_TM)) {
183 /* generate an illegal instruction interrupt */
184 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
185 return RESUME_GUEST;
187 if (!(msr & MSR_TM)) {
188 /* generate a facility unavailable interrupt */
189 vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
190 ((u64)FSCR_TM_LG << 56);
191 kvmppc_book3s_queue_irqprio(vcpu,
192 BOOK3S_INTERRUPT_FAC_UNAVAIL);
193 return RESUME_GUEST;
195 /* If transaction active or TEXASR[FS] = 0, bad thing */
196 if (MSR_TM_ACTIVE(msr) || !(vcpu->arch.texasr & TEXASR_FS)) {
197 kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
198 return RESUME_GUEST;
201 copy_to_checkpoint(vcpu);
203 /* Set CR0 to indicate previous transactional state */
204 vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
205 (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
206 vcpu->arch.shregs.msr = msr | MSR_TS_S;
207 return RESUME_GUEST;
210 /* What should we do here? We didn't recognize the instruction */
211 WARN_ON_ONCE(1);
212 return RESUME_GUEST;