2 * Fault injection for both 32 and 64bit guests.
4 * Copyright (C) 2012,2013 - ARM Ltd
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 * Based on arch/arm/kvm/emulate.c
8 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
9 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 * This program is free software: you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 #include <linux/kvm_host.h>
25 #include <asm/kvm_emulate.h>
28 #define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
29 PSR_I_BIT | PSR_D_BIT)
31 #define CURRENT_EL_SP_EL0_VECTOR 0x0
32 #define CURRENT_EL_SP_ELx_VECTOR 0x200
33 #define LOWER_EL_AArch64_VECTOR 0x400
34 #define LOWER_EL_AArch32_VECTOR 0x600
36 static void prepare_fault32(struct kvm_vcpu
*vcpu
, u32 mode
, u32 vect_offset
)
39 unsigned long new_spsr_value
= *vcpu_cpsr(vcpu
);
40 bool is_thumb
= (new_spsr_value
& COMPAT_PSR_T_BIT
);
41 u32 return_offset
= (is_thumb
) ? 4 : 0;
42 u32 sctlr
= vcpu_cp15(vcpu
, c1_SCTLR
);
44 cpsr
= mode
| COMPAT_PSR_I_BIT
;
46 if (sctlr
& (1 << 30))
47 cpsr
|= COMPAT_PSR_T_BIT
;
48 if (sctlr
& (1 << 25))
49 cpsr
|= COMPAT_PSR_E_BIT
;
51 *vcpu_cpsr(vcpu
) = cpsr
;
53 /* Note: These now point to the banked copies */
54 *vcpu_spsr(vcpu
) = new_spsr_value
;
55 *vcpu_reg32(vcpu
, 14) = *vcpu_pc(vcpu
) + return_offset
;
57 /* Branch to exception vector */
58 if (sctlr
& (1 << 13))
59 vect_offset
+= 0xffff0000;
60 else /* always have security exceptions */
61 vect_offset
+= vcpu_cp15(vcpu
, c12_VBAR
);
63 *vcpu_pc(vcpu
) = vect_offset
;
66 static void inject_undef32(struct kvm_vcpu
*vcpu
)
68 prepare_fault32(vcpu
, COMPAT_PSR_MODE_UND
, 4);
72 * Modelled after TakeDataAbortException() and TakePrefetchAbortException
75 static void inject_abt32(struct kvm_vcpu
*vcpu
, bool is_pabt
,
84 far
= &vcpu_cp15(vcpu
, c6_IFAR
);
85 fsr
= &vcpu_cp15(vcpu
, c5_IFSR
);
88 far
= &vcpu_cp15(vcpu
, c6_DFAR
);
89 fsr
= &vcpu_cp15(vcpu
, c5_DFSR
);
92 prepare_fault32(vcpu
, COMPAT_PSR_MODE_ABT
| COMPAT_PSR_A_BIT
, vect_offset
);
96 /* Give the guest an IMPLEMENTATION DEFINED exception */
97 is_lpae
= (vcpu_cp15(vcpu
, c2_TTBCR
) >> 31);
104 enum exception_type
{
105 except_type_sync
= 0,
106 except_type_irq
= 0x80,
107 except_type_fiq
= 0x100,
108 except_type_serror
= 0x180,
111 static u64
get_except_vector(struct kvm_vcpu
*vcpu
, enum exception_type type
)
115 switch (*vcpu_cpsr(vcpu
) & (PSR_MODE_MASK
| PSR_MODE32_BIT
)) {
117 exc_offset
= CURRENT_EL_SP_EL0_VECTOR
;
120 exc_offset
= CURRENT_EL_SP_ELx_VECTOR
;
123 exc_offset
= LOWER_EL_AArch64_VECTOR
;
126 exc_offset
= LOWER_EL_AArch32_VECTOR
;
129 return vcpu_sys_reg(vcpu
, VBAR_EL1
) + exc_offset
+ type
;
132 static void inject_abt64(struct kvm_vcpu
*vcpu
, bool is_iabt
, unsigned long addr
)
134 unsigned long cpsr
= *vcpu_cpsr(vcpu
);
135 bool is_aarch32
= vcpu_mode_is_32bit(vcpu
);
138 *vcpu_elr_el1(vcpu
) = *vcpu_pc(vcpu
);
139 *vcpu_pc(vcpu
) = get_except_vector(vcpu
, except_type_sync
);
141 *vcpu_cpsr(vcpu
) = PSTATE_FAULT_BITS_64
;
142 *vcpu_spsr(vcpu
) = cpsr
;
144 vcpu_sys_reg(vcpu
, FAR_EL1
) = addr
;
147 * Build an {i,d}abort, depending on the level and the
148 * instruction set. Report an external synchronous abort.
150 if (kvm_vcpu_trap_il_is32bit(vcpu
))
154 * Here, the guest runs in AArch64 mode when in EL1. If we get
155 * an AArch32 fault, it means we managed to trap an EL0 fault.
157 if (is_aarch32
|| (cpsr
& PSR_MODE_MASK
) == PSR_MODE_EL0t
)
158 esr
|= (ESR_ELx_EC_IABT_LOW
<< ESR_ELx_EC_SHIFT
);
160 esr
|= (ESR_ELx_EC_IABT_CUR
<< ESR_ELx_EC_SHIFT
);
163 esr
|= ESR_ELx_EC_DABT_LOW
<< ESR_ELx_EC_SHIFT
;
165 vcpu_sys_reg(vcpu
, ESR_EL1
) = esr
| ESR_ELx_FSC_EXTABT
;
168 static void inject_undef64(struct kvm_vcpu
*vcpu
)
170 unsigned long cpsr
= *vcpu_cpsr(vcpu
);
171 u32 esr
= (ESR_ELx_EC_UNKNOWN
<< ESR_ELx_EC_SHIFT
);
173 *vcpu_elr_el1(vcpu
) = *vcpu_pc(vcpu
);
174 *vcpu_pc(vcpu
) = get_except_vector(vcpu
, except_type_sync
);
176 *vcpu_cpsr(vcpu
) = PSTATE_FAULT_BITS_64
;
177 *vcpu_spsr(vcpu
) = cpsr
;
180 * Build an unknown exception, depending on the instruction
183 if (kvm_vcpu_trap_il_is32bit(vcpu
))
186 vcpu_sys_reg(vcpu
, ESR_EL1
) = esr
;
190 * kvm_inject_dabt - inject a data abort into the guest
191 * @vcpu: The VCPU to receive the undefined exception
192 * @addr: The address to report in the DFAR
194 * It is assumed that this code is called from the VCPU thread and that the
195 * VCPU therefore is not currently executing guest code.
197 void kvm_inject_dabt(struct kvm_vcpu
*vcpu
, unsigned long addr
)
199 if (!(vcpu
->arch
.hcr_el2
& HCR_RW
))
200 inject_abt32(vcpu
, false, addr
);
202 inject_abt64(vcpu
, false, addr
);
206 * kvm_inject_pabt - inject a prefetch abort into the guest
207 * @vcpu: The VCPU to receive the undefined exception
208 * @addr: The address to report in the DFAR
210 * It is assumed that this code is called from the VCPU thread and that the
211 * VCPU therefore is not currently executing guest code.
213 void kvm_inject_pabt(struct kvm_vcpu
*vcpu
, unsigned long addr
)
215 if (!(vcpu
->arch
.hcr_el2
& HCR_RW
))
216 inject_abt32(vcpu
, true, addr
);
218 inject_abt64(vcpu
, true, addr
);
222 * kvm_inject_undefined - inject an undefined instruction into the guest
224 * It is assumed that this code is called from the VCPU thread and that the
225 * VCPU therefore is not currently executing guest code.
227 void kvm_inject_undefined(struct kvm_vcpu
*vcpu
)
229 if (!(vcpu
->arch
.hcr_el2
& HCR_RW
))
230 inject_undef32(vcpu
);
232 inject_undef64(vcpu
);
236 * kvm_inject_vabt - inject an async abort / SError into the guest
237 * @vcpu: The VCPU to receive the exception
239 * It is assumed that this code is called from the VCPU thread and that the
240 * VCPU therefore is not currently executing guest code.
242 void kvm_inject_vabt(struct kvm_vcpu
*vcpu
)
244 vcpu_set_hcr(vcpu
, vcpu_get_hcr(vcpu
) | HCR_VSE
);