2 * Fault injection for both 32 and 64bit guests.
4 * Copyright (C) 2012,2013 - ARM Ltd
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 * Based on arch/arm/kvm/emulate.c
8 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
9 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 * This program is free software: you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 #include <linux/kvm_host.h>
25 #include <asm/kvm_emulate.h>
28 #define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
29 PSR_I_BIT | PSR_D_BIT)
31 #define CURRENT_EL_SP_EL0_VECTOR 0x0
32 #define CURRENT_EL_SP_ELx_VECTOR 0x200
33 #define LOWER_EL_AArch64_VECTOR 0x400
34 #define LOWER_EL_AArch32_VECTOR 0x600
37 * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
39 static const u8 return_offsets
[8][2] = {
40 [0] = { 0, 0 }, /* Reset, unused */
41 [1] = { 4, 2 }, /* Undefined */
42 [2] = { 0, 0 }, /* SVC, unused */
43 [3] = { 4, 4 }, /* Prefetch abort */
44 [4] = { 8, 8 }, /* Data abort */
45 [5] = { 0, 0 }, /* HVC, unused */
46 [6] = { 4, 4 }, /* IRQ, unused */
47 [7] = { 4, 4 }, /* FIQ, unused */
50 static void prepare_fault32(struct kvm_vcpu
*vcpu
, u32 mode
, u32 vect_offset
)
53 unsigned long new_spsr_value
= *vcpu_cpsr(vcpu
);
54 bool is_thumb
= (new_spsr_value
& COMPAT_PSR_T_BIT
);
55 u32 return_offset
= return_offsets
[vect_offset
>> 2][is_thumb
];
56 u32 sctlr
= vcpu_cp15(vcpu
, c1_SCTLR
);
58 cpsr
= mode
| COMPAT_PSR_I_BIT
;
60 if (sctlr
& (1 << 30))
61 cpsr
|= COMPAT_PSR_T_BIT
;
62 if (sctlr
& (1 << 25))
63 cpsr
|= COMPAT_PSR_E_BIT
;
65 *vcpu_cpsr(vcpu
) = cpsr
;
67 /* Note: These now point to the banked copies */
68 *vcpu_spsr(vcpu
) = new_spsr_value
;
69 *vcpu_reg32(vcpu
, 14) = *vcpu_pc(vcpu
) + return_offset
;
71 /* Branch to exception vector */
72 if (sctlr
& (1 << 13))
73 vect_offset
+= 0xffff0000;
74 else /* always have security exceptions */
75 vect_offset
+= vcpu_cp15(vcpu
, c12_VBAR
);
77 *vcpu_pc(vcpu
) = vect_offset
;
80 static void inject_undef32(struct kvm_vcpu
*vcpu
)
82 prepare_fault32(vcpu
, COMPAT_PSR_MODE_UND
, 4);
86 * Modelled after TakeDataAbortException() and TakePrefetchAbortException
89 static void inject_abt32(struct kvm_vcpu
*vcpu
, bool is_pabt
,
98 far
= &vcpu_cp15(vcpu
, c6_IFAR
);
99 fsr
= &vcpu_cp15(vcpu
, c5_IFSR
);
102 far
= &vcpu_cp15(vcpu
, c6_DFAR
);
103 fsr
= &vcpu_cp15(vcpu
, c5_DFSR
);
106 prepare_fault32(vcpu
, COMPAT_PSR_MODE_ABT
| COMPAT_PSR_A_BIT
, vect_offset
);
110 /* Give the guest an IMPLEMENTATION DEFINED exception */
111 is_lpae
= (vcpu_cp15(vcpu
, c2_TTBCR
) >> 31);
113 *fsr
= 1 << 9 | 0x34;
118 enum exception_type
{
119 except_type_sync
= 0,
120 except_type_irq
= 0x80,
121 except_type_fiq
= 0x100,
122 except_type_serror
= 0x180,
125 static u64
get_except_vector(struct kvm_vcpu
*vcpu
, enum exception_type type
)
129 switch (*vcpu_cpsr(vcpu
) & (PSR_MODE_MASK
| PSR_MODE32_BIT
)) {
131 exc_offset
= CURRENT_EL_SP_EL0_VECTOR
;
134 exc_offset
= CURRENT_EL_SP_ELx_VECTOR
;
137 exc_offset
= LOWER_EL_AArch64_VECTOR
;
140 exc_offset
= LOWER_EL_AArch32_VECTOR
;
143 return vcpu_sys_reg(vcpu
, VBAR_EL1
) + exc_offset
+ type
;
146 static void inject_abt64(struct kvm_vcpu
*vcpu
, bool is_iabt
, unsigned long addr
)
148 unsigned long cpsr
= *vcpu_cpsr(vcpu
);
149 bool is_aarch32
= vcpu_mode_is_32bit(vcpu
);
152 *vcpu_elr_el1(vcpu
) = *vcpu_pc(vcpu
);
153 *vcpu_pc(vcpu
) = get_except_vector(vcpu
, except_type_sync
);
155 *vcpu_cpsr(vcpu
) = PSTATE_FAULT_BITS_64
;
156 *vcpu_spsr(vcpu
) = cpsr
;
158 vcpu_sys_reg(vcpu
, FAR_EL1
) = addr
;
161 * Build an {i,d}abort, depending on the level and the
162 * instruction set. Report an external synchronous abort.
164 if (kvm_vcpu_trap_il_is32bit(vcpu
))
168 * Here, the guest runs in AArch64 mode when in EL1. If we get
169 * an AArch32 fault, it means we managed to trap an EL0 fault.
171 if (is_aarch32
|| (cpsr
& PSR_MODE_MASK
) == PSR_MODE_EL0t
)
172 esr
|= (ESR_ELx_EC_IABT_LOW
<< ESR_ELx_EC_SHIFT
);
174 esr
|= (ESR_ELx_EC_IABT_CUR
<< ESR_ELx_EC_SHIFT
);
177 esr
|= ESR_ELx_EC_DABT_LOW
<< ESR_ELx_EC_SHIFT
;
179 vcpu_sys_reg(vcpu
, ESR_EL1
) = esr
| ESR_ELx_FSC_EXTABT
;
182 static void inject_undef64(struct kvm_vcpu
*vcpu
)
184 unsigned long cpsr
= *vcpu_cpsr(vcpu
);
185 u32 esr
= (ESR_ELx_EC_UNKNOWN
<< ESR_ELx_EC_SHIFT
);
187 *vcpu_elr_el1(vcpu
) = *vcpu_pc(vcpu
);
188 *vcpu_pc(vcpu
) = get_except_vector(vcpu
, except_type_sync
);
190 *vcpu_cpsr(vcpu
) = PSTATE_FAULT_BITS_64
;
191 *vcpu_spsr(vcpu
) = cpsr
;
194 * Build an unknown exception, depending on the instruction
197 if (kvm_vcpu_trap_il_is32bit(vcpu
))
200 vcpu_sys_reg(vcpu
, ESR_EL1
) = esr
;
204 * kvm_inject_dabt - inject a data abort into the guest
205 * @vcpu: The VCPU to receive the undefined exception
206 * @addr: The address to report in the DFAR
208 * It is assumed that this code is called from the VCPU thread and that the
209 * VCPU therefore is not currently executing guest code.
211 void kvm_inject_dabt(struct kvm_vcpu
*vcpu
, unsigned long addr
)
213 if (!(vcpu
->arch
.hcr_el2
& HCR_RW
))
214 inject_abt32(vcpu
, false, addr
);
216 inject_abt64(vcpu
, false, addr
);
220 * kvm_inject_pabt - inject a prefetch abort into the guest
221 * @vcpu: The VCPU to receive the undefined exception
222 * @addr: The address to report in the DFAR
224 * It is assumed that this code is called from the VCPU thread and that the
225 * VCPU therefore is not currently executing guest code.
227 void kvm_inject_pabt(struct kvm_vcpu
*vcpu
, unsigned long addr
)
229 if (!(vcpu
->arch
.hcr_el2
& HCR_RW
))
230 inject_abt32(vcpu
, true, addr
);
232 inject_abt64(vcpu
, true, addr
);
236 * kvm_inject_undefined - inject an undefined instruction into the guest
238 * It is assumed that this code is called from the VCPU thread and that the
239 * VCPU therefore is not currently executing guest code.
241 void kvm_inject_undefined(struct kvm_vcpu
*vcpu
)
243 if (!(vcpu
->arch
.hcr_el2
& HCR_RW
))
244 inject_undef32(vcpu
);
246 inject_undef64(vcpu
);
250 * kvm_inject_vabt - inject an async abort / SError into the guest
251 * @vcpu: The VCPU to receive the exception
253 * It is assumed that this code is called from the VCPU thread and that the
254 * VCPU therefore is not currently executing guest code.
256 void kvm_inject_vabt(struct kvm_vcpu
*vcpu
)
258 vcpu_set_hcr(vcpu
, vcpu_get_hcr(vcpu
) | HCR_VSE
);