1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/include/kvm_emulate.h
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 #ifndef __ARM64_KVM_EMULATE_H__
12 #define __ARM64_KVM_EMULATE_H__
14 #include <linux/kvm_host.h>
16 #include <asm/debug-monitors.h>
18 #include <asm/kvm_arm.h>
19 #include <asm/kvm_hyp.h>
20 #include <asm/ptrace.h>
21 #include <asm/cputype.h>
24 unsigned long *vcpu_reg32(const struct kvm_vcpu
*vcpu
, u8 reg_num
);
25 unsigned long vcpu_read_spsr32(const struct kvm_vcpu
*vcpu
);
26 void vcpu_write_spsr32(struct kvm_vcpu
*vcpu
, unsigned long v
);
28 bool kvm_condition_valid32(const struct kvm_vcpu
*vcpu
);
29 void kvm_skip_instr32(struct kvm_vcpu
*vcpu
, bool is_wide_instr
);
31 void kvm_inject_undefined(struct kvm_vcpu
*vcpu
);
32 void kvm_inject_vabt(struct kvm_vcpu
*vcpu
);
33 void kvm_inject_dabt(struct kvm_vcpu
*vcpu
, unsigned long addr
);
34 void kvm_inject_pabt(struct kvm_vcpu
*vcpu
, unsigned long addr
);
35 void kvm_inject_undef32(struct kvm_vcpu
*vcpu
);
36 void kvm_inject_dabt32(struct kvm_vcpu
*vcpu
, unsigned long addr
);
37 void kvm_inject_pabt32(struct kvm_vcpu
*vcpu
, unsigned long addr
);
39 static inline bool vcpu_el1_is_32bit(struct kvm_vcpu
*vcpu
)
41 return !(vcpu
->arch
.hcr_el2
& HCR_RW
);
44 static inline void vcpu_reset_hcr(struct kvm_vcpu
*vcpu
)
46 vcpu
->arch
.hcr_el2
= HCR_GUEST_FLAGS
;
47 if (is_kernel_in_hyp_mode())
48 vcpu
->arch
.hcr_el2
|= HCR_E2H
;
49 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN
)) {
50 /* route synchronous external abort exceptions to EL2 */
51 vcpu
->arch
.hcr_el2
|= HCR_TEA
;
52 /* trap error record accesses */
53 vcpu
->arch
.hcr_el2
|= HCR_TERR
;
56 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB
)) {
57 vcpu
->arch
.hcr_el2
|= HCR_FWB
;
60 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
61 * get set in SCTLR_EL1 such that we can detect when the guest
62 * MMU gets turned on and do the necessary cache maintenance
65 vcpu
->arch
.hcr_el2
|= HCR_TVM
;
68 if (test_bit(KVM_ARM_VCPU_EL1_32BIT
, vcpu
->arch
.features
))
69 vcpu
->arch
.hcr_el2
&= ~HCR_RW
;
72 * TID3: trap feature register accesses that we virtualise.
73 * For now this is conditional, since no AArch32 feature regs
74 * are currently virtualised.
76 if (!vcpu_el1_is_32bit(vcpu
))
77 vcpu
->arch
.hcr_el2
|= HCR_TID3
;
79 if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE
) ||
80 vcpu_el1_is_32bit(vcpu
))
81 vcpu
->arch
.hcr_el2
|= HCR_TID2
;
84 static inline unsigned long *vcpu_hcr(struct kvm_vcpu
*vcpu
)
86 return (unsigned long *)&vcpu
->arch
.hcr_el2
;
89 static inline void vcpu_clear_wfx_traps(struct kvm_vcpu
*vcpu
)
91 vcpu
->arch
.hcr_el2
&= ~HCR_TWE
;
92 if (atomic_read(&vcpu
->arch
.vgic_cpu
.vgic_v3
.its_vpe
.vlpi_count
))
93 vcpu
->arch
.hcr_el2
&= ~HCR_TWI
;
95 vcpu
->arch
.hcr_el2
|= HCR_TWI
;
98 static inline void vcpu_set_wfx_traps(struct kvm_vcpu
*vcpu
)
100 vcpu
->arch
.hcr_el2
|= HCR_TWE
;
101 vcpu
->arch
.hcr_el2
|= HCR_TWI
;
104 static inline void vcpu_ptrauth_enable(struct kvm_vcpu
*vcpu
)
106 vcpu
->arch
.hcr_el2
|= (HCR_API
| HCR_APK
);
109 static inline void vcpu_ptrauth_disable(struct kvm_vcpu
*vcpu
)
111 vcpu
->arch
.hcr_el2
&= ~(HCR_API
| HCR_APK
);
114 static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu
*vcpu
)
116 if (vcpu_has_ptrauth(vcpu
))
117 vcpu_ptrauth_disable(vcpu
);
120 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu
*vcpu
)
122 return vcpu
->arch
.vsesr_el2
;
125 static inline void vcpu_set_vsesr(struct kvm_vcpu
*vcpu
, u64 vsesr
)
127 vcpu
->arch
.vsesr_el2
= vsesr
;
130 static inline unsigned long *vcpu_pc(const struct kvm_vcpu
*vcpu
)
132 return (unsigned long *)&vcpu_gp_regs(vcpu
)->regs
.pc
;
135 static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu
*vcpu
)
137 return (unsigned long *)&vcpu_gp_regs(vcpu
)->elr_el1
;
140 static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu
*vcpu
)
142 if (vcpu
->arch
.sysregs_loaded_on_cpu
)
143 return read_sysreg_el1(SYS_ELR
);
145 return *__vcpu_elr_el1(vcpu
);
148 static inline void vcpu_write_elr_el1(const struct kvm_vcpu
*vcpu
, unsigned long v
)
150 if (vcpu
->arch
.sysregs_loaded_on_cpu
)
151 write_sysreg_el1(v
, SYS_ELR
);
153 *__vcpu_elr_el1(vcpu
) = v
;
156 static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu
*vcpu
)
158 return (unsigned long *)&vcpu_gp_regs(vcpu
)->regs
.pstate
;
161 static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu
*vcpu
)
163 return !!(*vcpu_cpsr(vcpu
) & PSR_MODE32_BIT
);
166 static inline bool kvm_condition_valid(const struct kvm_vcpu
*vcpu
)
168 if (vcpu_mode_is_32bit(vcpu
))
169 return kvm_condition_valid32(vcpu
);
174 static inline void vcpu_set_thumb(struct kvm_vcpu
*vcpu
)
176 *vcpu_cpsr(vcpu
) |= PSR_AA32_T_BIT
;
180 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
181 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
182 * AArch32 with banked registers.
184 static inline unsigned long vcpu_get_reg(const struct kvm_vcpu
*vcpu
,
187 return (reg_num
== 31) ? 0 : vcpu_gp_regs(vcpu
)->regs
.regs
[reg_num
];
190 static inline void vcpu_set_reg(struct kvm_vcpu
*vcpu
, u8 reg_num
,
194 vcpu_gp_regs(vcpu
)->regs
.regs
[reg_num
] = val
;
197 static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu
*vcpu
)
199 if (vcpu_mode_is_32bit(vcpu
))
200 return vcpu_read_spsr32(vcpu
);
202 if (vcpu
->arch
.sysregs_loaded_on_cpu
)
203 return read_sysreg_el1(SYS_SPSR
);
205 return vcpu_gp_regs(vcpu
)->spsr
[KVM_SPSR_EL1
];
208 static inline void vcpu_write_spsr(struct kvm_vcpu
*vcpu
, unsigned long v
)
210 if (vcpu_mode_is_32bit(vcpu
)) {
211 vcpu_write_spsr32(vcpu
, v
);
215 if (vcpu
->arch
.sysregs_loaded_on_cpu
)
216 write_sysreg_el1(v
, SYS_SPSR
);
218 vcpu_gp_regs(vcpu
)->spsr
[KVM_SPSR_EL1
] = v
;
222 * The layout of SPSR for an AArch32 state is different when observed from an
223 * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
224 * view given an AArch64 view.
226 * In ARM DDI 0487E.a see:
228 * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
229 * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
230 * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
232 * Which show the following differences:
234 * | Bit | AA64 | AA32 | Notes |
235 * +-----+------+------+-----------------------------|
236 * | 24 | DIT | J | J is RES0 in ARMv8 |
237 * | 21 | SS | DIT | SS doesn't exist in AArch32 |
239 * ... and all other bits are (currently) common.
241 static inline unsigned long host_spsr_to_spsr32(unsigned long spsr
)
243 const unsigned long overlap
= BIT(24) | BIT(21);
244 unsigned long dit
= !!(spsr
& PSR_AA32_DIT_BIT
);
253 static inline bool vcpu_mode_priv(const struct kvm_vcpu
*vcpu
)
257 if (vcpu_mode_is_32bit(vcpu
)) {
258 mode
= *vcpu_cpsr(vcpu
) & PSR_AA32_MODE_MASK
;
259 return mode
> PSR_AA32_MODE_USR
;
262 mode
= *vcpu_cpsr(vcpu
) & PSR_MODE_MASK
;
264 return mode
!= PSR_MODE_EL0t
;
267 static inline u32
kvm_vcpu_get_hsr(const struct kvm_vcpu
*vcpu
)
269 return vcpu
->arch
.fault
.esr_el2
;
272 static inline int kvm_vcpu_get_condition(const struct kvm_vcpu
*vcpu
)
274 u32 esr
= kvm_vcpu_get_hsr(vcpu
);
276 if (esr
& ESR_ELx_CV
)
277 return (esr
& ESR_ELx_COND_MASK
) >> ESR_ELx_COND_SHIFT
;
282 static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu
*vcpu
)
284 return vcpu
->arch
.fault
.far_el2
;
287 static inline phys_addr_t
kvm_vcpu_get_fault_ipa(const struct kvm_vcpu
*vcpu
)
289 return ((phys_addr_t
)vcpu
->arch
.fault
.hpfar_el2
& HPFAR_MASK
) << 8;
292 static inline u64
kvm_vcpu_get_disr(const struct kvm_vcpu
*vcpu
)
294 return vcpu
->arch
.fault
.disr_el1
;
297 static inline u32
kvm_vcpu_hvc_get_imm(const struct kvm_vcpu
*vcpu
)
299 return kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_xVC_IMM_MASK
;
302 static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu
*vcpu
)
304 return !!(kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_ISV
);
307 static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu
*vcpu
)
309 return kvm_vcpu_get_hsr(vcpu
) & (ESR_ELx_CM
| ESR_ELx_WNR
| ESR_ELx_FSC
);
312 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu
*vcpu
)
314 return !!(kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_SSE
);
317 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu
*vcpu
)
319 return !!(kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_SF
);
322 static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu
*vcpu
)
324 return (kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_SRT_MASK
) >> ESR_ELx_SRT_SHIFT
;
327 static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu
*vcpu
)
329 return !!(kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_S1PTW
);
332 static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu
*vcpu
)
334 return !!(kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_WNR
) ||
335 kvm_vcpu_dabt_iss1tw(vcpu
); /* AF/DBM update */
338 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu
*vcpu
)
340 return !!(kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_CM
);
343 static inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu
*vcpu
)
345 return 1 << ((kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_SAS
) >> ESR_ELx_SAS_SHIFT
);
348 /* This one is not specific to Data Abort */
349 static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu
*vcpu
)
351 return !!(kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_IL
);
354 static inline u8
kvm_vcpu_trap_get_class(const struct kvm_vcpu
*vcpu
)
356 return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu
));
359 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu
*vcpu
)
361 return kvm_vcpu_trap_get_class(vcpu
) == ESR_ELx_EC_IABT_LOW
;
364 static inline u8
kvm_vcpu_trap_get_fault(const struct kvm_vcpu
*vcpu
)
366 return kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_FSC
;
369 static inline u8
kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu
*vcpu
)
371 return kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_FSC_TYPE
;
374 static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu
*vcpu
)
376 switch (kvm_vcpu_trap_get_fault(vcpu
)) {
393 static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu
*vcpu
)
395 u32 esr
= kvm_vcpu_get_hsr(vcpu
);
396 return ESR_ELx_SYS64_ISS_RT(esr
);
399 static inline bool kvm_is_write_fault(struct kvm_vcpu
*vcpu
)
401 if (kvm_vcpu_trap_is_iabt(vcpu
))
404 return kvm_vcpu_dabt_iswrite(vcpu
);
407 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu
*vcpu
)
409 return vcpu_read_sys_reg(vcpu
, MPIDR_EL1
) & MPIDR_HWID_BITMASK
;
412 static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu
*vcpu
)
414 return vcpu
->arch
.workaround_flags
& VCPU_WORKAROUND_2_FLAG
;
417 static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu
*vcpu
,
421 vcpu
->arch
.workaround_flags
|= VCPU_WORKAROUND_2_FLAG
;
423 vcpu
->arch
.workaround_flags
&= ~VCPU_WORKAROUND_2_FLAG
;
426 static inline void kvm_vcpu_set_be(struct kvm_vcpu
*vcpu
)
428 if (vcpu_mode_is_32bit(vcpu
)) {
429 *vcpu_cpsr(vcpu
) |= PSR_AA32_E_BIT
;
431 u64 sctlr
= vcpu_read_sys_reg(vcpu
, SCTLR_EL1
);
433 vcpu_write_sys_reg(vcpu
, sctlr
, SCTLR_EL1
);
437 static inline bool kvm_vcpu_is_be(struct kvm_vcpu
*vcpu
)
439 if (vcpu_mode_is_32bit(vcpu
))
440 return !!(*vcpu_cpsr(vcpu
) & PSR_AA32_E_BIT
);
442 return !!(vcpu_read_sys_reg(vcpu
, SCTLR_EL1
) & (1 << 25));
445 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu
*vcpu
,
449 if (kvm_vcpu_is_be(vcpu
)) {
454 return be16_to_cpu(data
& 0xffff);
456 return be32_to_cpu(data
& 0xffffffff);
458 return be64_to_cpu(data
);
465 return le16_to_cpu(data
& 0xffff);
467 return le32_to_cpu(data
& 0xffffffff);
469 return le64_to_cpu(data
);
473 return data
; /* Leave LE untouched */
476 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu
*vcpu
,
480 if (kvm_vcpu_is_be(vcpu
)) {
485 return cpu_to_be16(data
& 0xffff);
487 return cpu_to_be32(data
& 0xffffffff);
489 return cpu_to_be64(data
);
496 return cpu_to_le16(data
& 0xffff);
498 return cpu_to_le32(data
& 0xffffffff);
500 return cpu_to_le64(data
);
504 return data
; /* Leave LE untouched */
507 static inline void kvm_skip_instr(struct kvm_vcpu
*vcpu
, bool is_wide_instr
)
509 if (vcpu_mode_is_32bit(vcpu
))
510 kvm_skip_instr32(vcpu
, is_wide_instr
);
514 /* advance the singlestep state machine */
515 *vcpu_cpsr(vcpu
) &= ~DBG_SPSR_SS
;
519 * Skip an instruction which has been emulated at hyp while most guest sysregs
522 static inline void __hyp_text
__kvm_skip_instr(struct kvm_vcpu
*vcpu
)
524 *vcpu_pc(vcpu
) = read_sysreg_el2(SYS_ELR
);
525 vcpu
->arch
.ctxt
.gp_regs
.regs
.pstate
= read_sysreg_el2(SYS_SPSR
);
527 kvm_skip_instr(vcpu
, kvm_vcpu_trap_il_is32bit(vcpu
));
529 write_sysreg_el2(vcpu
->arch
.ctxt
.gp_regs
.regs
.pstate
, SYS_SPSR
);
530 write_sysreg_el2(*vcpu_pc(vcpu
), SYS_ELR
);
533 #endif /* __ARM64_KVM_EMULATE_H__ */