Linux 3.15-rc1
[linux/fpc-iii.git] / arch / arm64 / kvm / inject_fault.c
blob81a02a8762b0540c09746614153fe64f89a2e035
1 /*
2 * Fault injection for both 32 and 64bit guests.
4 * Copyright (C) 2012,2013 - ARM Ltd
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 * Based on arch/arm/kvm/emulate.c
8 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
9 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 * This program is free software: you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 #include <linux/kvm_host.h>
25 #include <asm/kvm_emulate.h>
26 #include <asm/esr.h>
28 #define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
29 PSR_I_BIT | PSR_D_BIT)
30 #define EL1_EXCEPT_SYNC_OFFSET 0x200
32 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
34 unsigned long cpsr;
35 unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
36 bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
37 u32 return_offset = (is_thumb) ? 4 : 0;
38 u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
40 cpsr = mode | COMPAT_PSR_I_BIT;
42 if (sctlr & (1 << 30))
43 cpsr |= COMPAT_PSR_T_BIT;
44 if (sctlr & (1 << 25))
45 cpsr |= COMPAT_PSR_E_BIT;
47 *vcpu_cpsr(vcpu) = cpsr;
49 /* Note: These now point to the banked copies */
50 *vcpu_spsr(vcpu) = new_spsr_value;
51 *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
53 /* Branch to exception vector */
54 if (sctlr & (1 << 13))
55 vect_offset += 0xffff0000;
56 else /* always have security exceptions */
57 vect_offset += vcpu_cp15(vcpu, c12_VBAR);
59 *vcpu_pc(vcpu) = vect_offset;
62 static void inject_undef32(struct kvm_vcpu *vcpu)
64 prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4);
68 * Modelled after TakeDataAbortException() and TakePrefetchAbortException
69 * pseudocode.
71 static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
72 unsigned long addr)
74 u32 vect_offset;
75 u32 *far, *fsr;
76 bool is_lpae;
78 if (is_pabt) {
79 vect_offset = 12;
80 far = &vcpu_cp15(vcpu, c6_IFAR);
81 fsr = &vcpu_cp15(vcpu, c5_IFSR);
82 } else { /* !iabt */
83 vect_offset = 16;
84 far = &vcpu_cp15(vcpu, c6_DFAR);
85 fsr = &vcpu_cp15(vcpu, c5_DFSR);
88 prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset);
90 *far = addr;
92 /* Give the guest an IMPLEMENTATION DEFINED exception */
93 is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
94 if (is_lpae)
95 *fsr = 1 << 9 | 0x34;
96 else
97 *fsr = 0x14;
100 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
102 unsigned long cpsr = *vcpu_cpsr(vcpu);
103 bool is_aarch32;
104 u32 esr = 0;
106 is_aarch32 = vcpu_mode_is_32bit(vcpu);
108 *vcpu_spsr(vcpu) = cpsr;
109 *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
111 *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
112 *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
114 vcpu_sys_reg(vcpu, FAR_EL1) = addr;
117 * Build an {i,d}abort, depending on the level and the
118 * instruction set. Report an external synchronous abort.
120 if (kvm_vcpu_trap_il_is32bit(vcpu))
121 esr |= ESR_EL1_IL;
124 * Here, the guest runs in AArch64 mode when in EL1. If we get
125 * an AArch32 fault, it means we managed to trap an EL0 fault.
127 if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
128 esr |= (ESR_EL1_EC_IABT_EL0 << ESR_EL1_EC_SHIFT);
129 else
130 esr |= (ESR_EL1_EC_IABT_EL1 << ESR_EL1_EC_SHIFT);
132 if (!is_iabt)
133 esr |= ESR_EL1_EC_DABT_EL0;
135 vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_EL2_EC_xABT_xFSR_EXTABT;
138 static void inject_undef64(struct kvm_vcpu *vcpu)
140 unsigned long cpsr = *vcpu_cpsr(vcpu);
141 u32 esr = (ESR_EL1_EC_UNKNOWN << ESR_EL1_EC_SHIFT);
143 *vcpu_spsr(vcpu) = cpsr;
144 *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
146 *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
147 *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
150 * Build an unknown exception, depending on the instruction
151 * set.
153 if (kvm_vcpu_trap_il_is32bit(vcpu))
154 esr |= ESR_EL1_IL;
156 vcpu_sys_reg(vcpu, ESR_EL1) = esr;
160 * kvm_inject_dabt - inject a data abort into the guest
161 * @vcpu: The VCPU to receive the undefined exception
162 * @addr: The address to report in the DFAR
164 * It is assumed that this code is called from the VCPU thread and that the
165 * VCPU therefore is not currently executing guest code.
167 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
169 if (!(vcpu->arch.hcr_el2 & HCR_RW))
170 inject_abt32(vcpu, false, addr);
172 inject_abt64(vcpu, false, addr);
176 * kvm_inject_pabt - inject a prefetch abort into the guest
177 * @vcpu: The VCPU to receive the undefined exception
178 * @addr: The address to report in the DFAR
180 * It is assumed that this code is called from the VCPU thread and that the
181 * VCPU therefore is not currently executing guest code.
183 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
185 if (!(vcpu->arch.hcr_el2 & HCR_RW))
186 inject_abt32(vcpu, true, addr);
188 inject_abt64(vcpu, true, addr);
192 * kvm_inject_undefined - inject an undefined instruction into the guest
194 * It is assumed that this code is called from the VCPU thread and that the
195 * VCPU therefore is not currently executing guest code.
197 void kvm_inject_undefined(struct kvm_vcpu *vcpu)
199 if (!(vcpu->arch.hcr_el2 & HCR_RW))
200 inject_undef32(vcpu);
202 inject_undef64(vcpu);