1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/include/kvm_emulate.h
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 #ifndef __ARM64_KVM_EMULATE_H__
12 #define __ARM64_KVM_EMULATE_H__
14 #include <linux/kvm_host.h>
16 #include <asm/debug-monitors.h>
18 #include <asm/kvm_arm.h>
19 #include <asm/kvm_hyp.h>
20 #include <asm/ptrace.h>
21 #include <asm/cputype.h>
24 #define CURRENT_EL_SP_EL0_VECTOR 0x0
25 #define CURRENT_EL_SP_ELx_VECTOR 0x200
26 #define LOWER_EL_AArch64_VECTOR 0x400
27 #define LOWER_EL_AArch32_VECTOR 0x600
31 except_type_irq
= 0x80,
32 except_type_fiq
= 0x100,
33 except_type_serror
= 0x180,
36 bool kvm_condition_valid32(const struct kvm_vcpu
*vcpu
);
37 void kvm_skip_instr32(struct kvm_vcpu
*vcpu
);
39 void kvm_inject_undefined(struct kvm_vcpu
*vcpu
);
40 void kvm_inject_vabt(struct kvm_vcpu
*vcpu
);
41 void kvm_inject_dabt(struct kvm_vcpu
*vcpu
, unsigned long addr
);
42 void kvm_inject_pabt(struct kvm_vcpu
*vcpu
, unsigned long addr
);
44 static __always_inline
bool vcpu_el1_is_32bit(struct kvm_vcpu
*vcpu
)
46 return !(vcpu
->arch
.hcr_el2
& HCR_RW
);
49 static inline void vcpu_reset_hcr(struct kvm_vcpu
*vcpu
)
51 vcpu
->arch
.hcr_el2
= HCR_GUEST_FLAGS
;
52 if (is_kernel_in_hyp_mode())
53 vcpu
->arch
.hcr_el2
|= HCR_E2H
;
54 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN
)) {
55 /* route synchronous external abort exceptions to EL2 */
56 vcpu
->arch
.hcr_el2
|= HCR_TEA
;
57 /* trap error record accesses */
58 vcpu
->arch
.hcr_el2
|= HCR_TERR
;
61 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB
)) {
62 vcpu
->arch
.hcr_el2
|= HCR_FWB
;
65 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
66 * get set in SCTLR_EL1 such that we can detect when the guest
67 * MMU gets turned on and do the necessary cache maintenance
70 vcpu
->arch
.hcr_el2
|= HCR_TVM
;
73 if (test_bit(KVM_ARM_VCPU_EL1_32BIT
, vcpu
->arch
.features
))
74 vcpu
->arch
.hcr_el2
&= ~HCR_RW
;
77 * TID3: trap feature register accesses that we virtualise.
78 * For now this is conditional, since no AArch32 feature regs
79 * are currently virtualised.
81 if (!vcpu_el1_is_32bit(vcpu
))
82 vcpu
->arch
.hcr_el2
|= HCR_TID3
;
84 if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE
) ||
85 vcpu_el1_is_32bit(vcpu
))
86 vcpu
->arch
.hcr_el2
|= HCR_TID2
;
89 static inline unsigned long *vcpu_hcr(struct kvm_vcpu
*vcpu
)
91 return (unsigned long *)&vcpu
->arch
.hcr_el2
;
94 static inline void vcpu_clear_wfx_traps(struct kvm_vcpu
*vcpu
)
96 vcpu
->arch
.hcr_el2
&= ~HCR_TWE
;
97 if (atomic_read(&vcpu
->arch
.vgic_cpu
.vgic_v3
.its_vpe
.vlpi_count
) ||
98 vcpu
->kvm
->arch
.vgic
.nassgireq
)
99 vcpu
->arch
.hcr_el2
&= ~HCR_TWI
;
101 vcpu
->arch
.hcr_el2
|= HCR_TWI
;
104 static inline void vcpu_set_wfx_traps(struct kvm_vcpu
*vcpu
)
106 vcpu
->arch
.hcr_el2
|= HCR_TWE
;
107 vcpu
->arch
.hcr_el2
|= HCR_TWI
;
110 static inline void vcpu_ptrauth_enable(struct kvm_vcpu
*vcpu
)
112 vcpu
->arch
.hcr_el2
|= (HCR_API
| HCR_APK
);
115 static inline void vcpu_ptrauth_disable(struct kvm_vcpu
*vcpu
)
117 vcpu
->arch
.hcr_el2
&= ~(HCR_API
| HCR_APK
);
120 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu
*vcpu
)
122 return vcpu
->arch
.vsesr_el2
;
125 static inline void vcpu_set_vsesr(struct kvm_vcpu
*vcpu
, u64 vsesr
)
127 vcpu
->arch
.vsesr_el2
= vsesr
;
130 static __always_inline
unsigned long *vcpu_pc(const struct kvm_vcpu
*vcpu
)
132 return (unsigned long *)&vcpu_gp_regs(vcpu
)->pc
;
135 static __always_inline
unsigned long *vcpu_cpsr(const struct kvm_vcpu
*vcpu
)
137 return (unsigned long *)&vcpu_gp_regs(vcpu
)->pstate
;
140 static __always_inline
bool vcpu_mode_is_32bit(const struct kvm_vcpu
*vcpu
)
142 return !!(*vcpu_cpsr(vcpu
) & PSR_MODE32_BIT
);
145 static __always_inline
bool kvm_condition_valid(const struct kvm_vcpu
*vcpu
)
147 if (vcpu_mode_is_32bit(vcpu
))
148 return kvm_condition_valid32(vcpu
);
153 static inline void vcpu_set_thumb(struct kvm_vcpu
*vcpu
)
155 *vcpu_cpsr(vcpu
) |= PSR_AA32_T_BIT
;
159 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
160 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
161 * AArch32 with banked registers.
163 static __always_inline
unsigned long vcpu_get_reg(const struct kvm_vcpu
*vcpu
,
166 return (reg_num
== 31) ? 0 : vcpu_gp_regs(vcpu
)->regs
[reg_num
];
169 static __always_inline
void vcpu_set_reg(struct kvm_vcpu
*vcpu
, u8 reg_num
,
173 vcpu_gp_regs(vcpu
)->regs
[reg_num
] = val
;
177 * The layout of SPSR for an AArch32 state is different when observed from an
178 * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
179 * view given an AArch64 view.
181 * In ARM DDI 0487E.a see:
183 * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
184 * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
185 * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
187 * Which show the following differences:
189 * | Bit | AA64 | AA32 | Notes |
190 * +-----+------+------+-----------------------------|
191 * | 24 | DIT | J | J is RES0 in ARMv8 |
192 * | 21 | SS | DIT | SS doesn't exist in AArch32 |
194 * ... and all other bits are (currently) common.
196 static inline unsigned long host_spsr_to_spsr32(unsigned long spsr
)
198 const unsigned long overlap
= BIT(24) | BIT(21);
199 unsigned long dit
= !!(spsr
& PSR_AA32_DIT_BIT
);
208 static inline bool vcpu_mode_priv(const struct kvm_vcpu
*vcpu
)
212 if (vcpu_mode_is_32bit(vcpu
)) {
213 mode
= *vcpu_cpsr(vcpu
) & PSR_AA32_MODE_MASK
;
214 return mode
> PSR_AA32_MODE_USR
;
217 mode
= *vcpu_cpsr(vcpu
) & PSR_MODE_MASK
;
219 return mode
!= PSR_MODE_EL0t
;
222 static __always_inline u32
kvm_vcpu_get_esr(const struct kvm_vcpu
*vcpu
)
224 return vcpu
->arch
.fault
.esr_el2
;
227 static __always_inline
int kvm_vcpu_get_condition(const struct kvm_vcpu
*vcpu
)
229 u32 esr
= kvm_vcpu_get_esr(vcpu
);
231 if (esr
& ESR_ELx_CV
)
232 return (esr
& ESR_ELx_COND_MASK
) >> ESR_ELx_COND_SHIFT
;
237 static __always_inline
unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu
*vcpu
)
239 return vcpu
->arch
.fault
.far_el2
;
242 static __always_inline phys_addr_t
kvm_vcpu_get_fault_ipa(const struct kvm_vcpu
*vcpu
)
244 return ((phys_addr_t
)vcpu
->arch
.fault
.hpfar_el2
& HPFAR_MASK
) << 8;
247 static inline u64
kvm_vcpu_get_disr(const struct kvm_vcpu
*vcpu
)
249 return vcpu
->arch
.fault
.disr_el1
;
252 static inline u32
kvm_vcpu_hvc_get_imm(const struct kvm_vcpu
*vcpu
)
254 return kvm_vcpu_get_esr(vcpu
) & ESR_ELx_xVC_IMM_MASK
;
257 static __always_inline
bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu
*vcpu
)
259 return !!(kvm_vcpu_get_esr(vcpu
) & ESR_ELx_ISV
);
262 static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu
*vcpu
)
264 return kvm_vcpu_get_esr(vcpu
) & (ESR_ELx_CM
| ESR_ELx_WNR
| ESR_ELx_FSC
);
267 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu
*vcpu
)
269 return !!(kvm_vcpu_get_esr(vcpu
) & ESR_ELx_SSE
);
272 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu
*vcpu
)
274 return !!(kvm_vcpu_get_esr(vcpu
) & ESR_ELx_SF
);
277 static __always_inline
int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu
*vcpu
)
279 return (kvm_vcpu_get_esr(vcpu
) & ESR_ELx_SRT_MASK
) >> ESR_ELx_SRT_SHIFT
;
282 static __always_inline
bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu
*vcpu
)
284 return !!(kvm_vcpu_get_esr(vcpu
) & ESR_ELx_S1PTW
);
287 /* Always check for S1PTW *before* using this. */
288 static __always_inline
bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu
*vcpu
)
290 return kvm_vcpu_get_esr(vcpu
) & ESR_ELx_WNR
;
293 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu
*vcpu
)
295 return !!(kvm_vcpu_get_esr(vcpu
) & ESR_ELx_CM
);
298 static __always_inline
unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu
*vcpu
)
300 return 1 << ((kvm_vcpu_get_esr(vcpu
) & ESR_ELx_SAS
) >> ESR_ELx_SAS_SHIFT
);
303 /* This one is not specific to Data Abort */
304 static __always_inline
bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu
*vcpu
)
306 return !!(kvm_vcpu_get_esr(vcpu
) & ESR_ELx_IL
);
309 static __always_inline u8
kvm_vcpu_trap_get_class(const struct kvm_vcpu
*vcpu
)
311 return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu
));
314 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu
*vcpu
)
316 return kvm_vcpu_trap_get_class(vcpu
) == ESR_ELx_EC_IABT_LOW
;
319 static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu
*vcpu
)
321 return kvm_vcpu_trap_is_iabt(vcpu
) && !kvm_vcpu_abt_iss1tw(vcpu
);
324 static __always_inline u8
kvm_vcpu_trap_get_fault(const struct kvm_vcpu
*vcpu
)
326 return kvm_vcpu_get_esr(vcpu
) & ESR_ELx_FSC
;
329 static __always_inline u8
kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu
*vcpu
)
331 return kvm_vcpu_get_esr(vcpu
) & ESR_ELx_FSC_TYPE
;
334 static __always_inline u8
kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu
*vcpu
)
336 return kvm_vcpu_get_esr(vcpu
) & ESR_ELx_FSC_LEVEL
;
339 static __always_inline
bool kvm_vcpu_abt_issea(const struct kvm_vcpu
*vcpu
)
341 switch (kvm_vcpu_trap_get_fault(vcpu
)) {
358 static __always_inline
int kvm_vcpu_sys_get_rt(struct kvm_vcpu
*vcpu
)
360 u32 esr
= kvm_vcpu_get_esr(vcpu
);
361 return ESR_ELx_SYS64_ISS_RT(esr
);
364 static inline bool kvm_is_write_fault(struct kvm_vcpu
*vcpu
)
366 if (kvm_vcpu_abt_iss1tw(vcpu
))
369 if (kvm_vcpu_trap_is_iabt(vcpu
))
372 return kvm_vcpu_dabt_iswrite(vcpu
);
375 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu
*vcpu
)
377 return vcpu_read_sys_reg(vcpu
, MPIDR_EL1
) & MPIDR_HWID_BITMASK
;
380 static inline void kvm_vcpu_set_be(struct kvm_vcpu
*vcpu
)
382 if (vcpu_mode_is_32bit(vcpu
)) {
383 *vcpu_cpsr(vcpu
) |= PSR_AA32_E_BIT
;
385 u64 sctlr
= vcpu_read_sys_reg(vcpu
, SCTLR_EL1
);
387 vcpu_write_sys_reg(vcpu
, sctlr
, SCTLR_EL1
);
391 static inline bool kvm_vcpu_is_be(struct kvm_vcpu
*vcpu
)
393 if (vcpu_mode_is_32bit(vcpu
))
394 return !!(*vcpu_cpsr(vcpu
) & PSR_AA32_E_BIT
);
396 return !!(vcpu_read_sys_reg(vcpu
, SCTLR_EL1
) & (1 << 25));
399 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu
*vcpu
,
403 if (kvm_vcpu_is_be(vcpu
)) {
408 return be16_to_cpu(data
& 0xffff);
410 return be32_to_cpu(data
& 0xffffffff);
412 return be64_to_cpu(data
);
419 return le16_to_cpu(data
& 0xffff);
421 return le32_to_cpu(data
& 0xffffffff);
423 return le64_to_cpu(data
);
427 return data
; /* Leave LE untouched */
430 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu
*vcpu
,
434 if (kvm_vcpu_is_be(vcpu
)) {
439 return cpu_to_be16(data
& 0xffff);
441 return cpu_to_be32(data
& 0xffffffff);
443 return cpu_to_be64(data
);
450 return cpu_to_le16(data
& 0xffff);
452 return cpu_to_le32(data
& 0xffffffff);
454 return cpu_to_le64(data
);
458 return data
; /* Leave LE untouched */
461 static __always_inline
void kvm_incr_pc(struct kvm_vcpu
*vcpu
)
463 vcpu
->arch
.flags
|= KVM_ARM64_INCREMENT_PC
;
466 #endif /* __ARM64_KVM_EMULATE_H__ */