1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/include/kvm_emulate.h
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 #ifndef __ARM64_KVM_EMULATE_H__
12 #define __ARM64_KVM_EMULATE_H__
14 #include <linux/kvm_host.h>
16 #include <asm/debug-monitors.h>
18 #include <asm/kvm_arm.h>
19 #include <asm/kvm_hyp.h>
20 #include <asm/kvm_mmio.h>
21 #include <asm/ptrace.h>
22 #include <asm/cputype.h>
25 unsigned long *vcpu_reg32(const struct kvm_vcpu
*vcpu
, u8 reg_num
);
26 unsigned long vcpu_read_spsr32(const struct kvm_vcpu
*vcpu
);
27 void vcpu_write_spsr32(struct kvm_vcpu
*vcpu
, unsigned long v
);
29 bool kvm_condition_valid32(const struct kvm_vcpu
*vcpu
);
30 void kvm_skip_instr32(struct kvm_vcpu
*vcpu
, bool is_wide_instr
);
32 void kvm_inject_undefined(struct kvm_vcpu
*vcpu
);
33 void kvm_inject_vabt(struct kvm_vcpu
*vcpu
);
34 void kvm_inject_dabt(struct kvm_vcpu
*vcpu
, unsigned long addr
);
35 void kvm_inject_pabt(struct kvm_vcpu
*vcpu
, unsigned long addr
);
36 void kvm_inject_undef32(struct kvm_vcpu
*vcpu
);
37 void kvm_inject_dabt32(struct kvm_vcpu
*vcpu
, unsigned long addr
);
38 void kvm_inject_pabt32(struct kvm_vcpu
*vcpu
, unsigned long addr
);
40 static inline bool vcpu_el1_is_32bit(struct kvm_vcpu
*vcpu
)
42 return !(vcpu
->arch
.hcr_el2
& HCR_RW
);
45 static inline void vcpu_reset_hcr(struct kvm_vcpu
*vcpu
)
47 vcpu
->arch
.hcr_el2
= HCR_GUEST_FLAGS
;
48 if (is_kernel_in_hyp_mode())
49 vcpu
->arch
.hcr_el2
|= HCR_E2H
;
50 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN
)) {
51 /* route synchronous external abort exceptions to EL2 */
52 vcpu
->arch
.hcr_el2
|= HCR_TEA
;
53 /* trap error record accesses */
54 vcpu
->arch
.hcr_el2
|= HCR_TERR
;
56 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB
))
57 vcpu
->arch
.hcr_el2
|= HCR_FWB
;
59 if (test_bit(KVM_ARM_VCPU_EL1_32BIT
, vcpu
->arch
.features
))
60 vcpu
->arch
.hcr_el2
&= ~HCR_RW
;
63 * TID3: trap feature register accesses that we virtualise.
64 * For now this is conditional, since no AArch32 feature regs
65 * are currently virtualised.
67 if (!vcpu_el1_is_32bit(vcpu
))
68 vcpu
->arch
.hcr_el2
|= HCR_TID3
;
70 if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE
) ||
71 vcpu_el1_is_32bit(vcpu
))
72 vcpu
->arch
.hcr_el2
|= HCR_TID2
;
75 static inline unsigned long *vcpu_hcr(struct kvm_vcpu
*vcpu
)
77 return (unsigned long *)&vcpu
->arch
.hcr_el2
;
80 static inline void vcpu_clear_wfe_traps(struct kvm_vcpu
*vcpu
)
82 vcpu
->arch
.hcr_el2
&= ~HCR_TWE
;
85 static inline void vcpu_set_wfe_traps(struct kvm_vcpu
*vcpu
)
87 vcpu
->arch
.hcr_el2
|= HCR_TWE
;
90 static inline void vcpu_ptrauth_enable(struct kvm_vcpu
*vcpu
)
92 vcpu
->arch
.hcr_el2
|= (HCR_API
| HCR_APK
);
95 static inline void vcpu_ptrauth_disable(struct kvm_vcpu
*vcpu
)
97 vcpu
->arch
.hcr_el2
&= ~(HCR_API
| HCR_APK
);
100 static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu
*vcpu
)
102 if (vcpu_has_ptrauth(vcpu
))
103 vcpu_ptrauth_disable(vcpu
);
106 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu
*vcpu
)
108 return vcpu
->arch
.vsesr_el2
;
111 static inline void vcpu_set_vsesr(struct kvm_vcpu
*vcpu
, u64 vsesr
)
113 vcpu
->arch
.vsesr_el2
= vsesr
;
116 static inline unsigned long *vcpu_pc(const struct kvm_vcpu
*vcpu
)
118 return (unsigned long *)&vcpu_gp_regs(vcpu
)->regs
.pc
;
121 static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu
*vcpu
)
123 return (unsigned long *)&vcpu_gp_regs(vcpu
)->elr_el1
;
126 static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu
*vcpu
)
128 if (vcpu
->arch
.sysregs_loaded_on_cpu
)
129 return read_sysreg_el1(SYS_ELR
);
131 return *__vcpu_elr_el1(vcpu
);
134 static inline void vcpu_write_elr_el1(const struct kvm_vcpu
*vcpu
, unsigned long v
)
136 if (vcpu
->arch
.sysregs_loaded_on_cpu
)
137 write_sysreg_el1(v
, SYS_ELR
);
139 *__vcpu_elr_el1(vcpu
) = v
;
142 static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu
*vcpu
)
144 return (unsigned long *)&vcpu_gp_regs(vcpu
)->regs
.pstate
;
147 static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu
*vcpu
)
149 return !!(*vcpu_cpsr(vcpu
) & PSR_MODE32_BIT
);
152 static inline bool kvm_condition_valid(const struct kvm_vcpu
*vcpu
)
154 if (vcpu_mode_is_32bit(vcpu
))
155 return kvm_condition_valid32(vcpu
);
160 static inline void vcpu_set_thumb(struct kvm_vcpu
*vcpu
)
162 *vcpu_cpsr(vcpu
) |= PSR_AA32_T_BIT
;
166 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
167 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
168 * AArch32 with banked registers.
170 static inline unsigned long vcpu_get_reg(const struct kvm_vcpu
*vcpu
,
173 return (reg_num
== 31) ? 0 : vcpu_gp_regs(vcpu
)->regs
.regs
[reg_num
];
176 static inline void vcpu_set_reg(struct kvm_vcpu
*vcpu
, u8 reg_num
,
180 vcpu_gp_regs(vcpu
)->regs
.regs
[reg_num
] = val
;
183 static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu
*vcpu
)
185 if (vcpu_mode_is_32bit(vcpu
))
186 return vcpu_read_spsr32(vcpu
);
188 if (vcpu
->arch
.sysregs_loaded_on_cpu
)
189 return read_sysreg_el1(SYS_SPSR
);
191 return vcpu_gp_regs(vcpu
)->spsr
[KVM_SPSR_EL1
];
194 static inline void vcpu_write_spsr(struct kvm_vcpu
*vcpu
, unsigned long v
)
196 if (vcpu_mode_is_32bit(vcpu
)) {
197 vcpu_write_spsr32(vcpu
, v
);
201 if (vcpu
->arch
.sysregs_loaded_on_cpu
)
202 write_sysreg_el1(v
, SYS_SPSR
);
204 vcpu_gp_regs(vcpu
)->spsr
[KVM_SPSR_EL1
] = v
;
207 static inline bool vcpu_mode_priv(const struct kvm_vcpu
*vcpu
)
211 if (vcpu_mode_is_32bit(vcpu
)) {
212 mode
= *vcpu_cpsr(vcpu
) & PSR_AA32_MODE_MASK
;
213 return mode
> PSR_AA32_MODE_USR
;
216 mode
= *vcpu_cpsr(vcpu
) & PSR_MODE_MASK
;
218 return mode
!= PSR_MODE_EL0t
;
221 static inline u32
kvm_vcpu_get_hsr(const struct kvm_vcpu
*vcpu
)
223 return vcpu
->arch
.fault
.esr_el2
;
226 static inline int kvm_vcpu_get_condition(const struct kvm_vcpu
*vcpu
)
228 u32 esr
= kvm_vcpu_get_hsr(vcpu
);
230 if (esr
& ESR_ELx_CV
)
231 return (esr
& ESR_ELx_COND_MASK
) >> ESR_ELx_COND_SHIFT
;
236 static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu
*vcpu
)
238 return vcpu
->arch
.fault
.far_el2
;
241 static inline phys_addr_t
kvm_vcpu_get_fault_ipa(const struct kvm_vcpu
*vcpu
)
243 return ((phys_addr_t
)vcpu
->arch
.fault
.hpfar_el2
& HPFAR_MASK
) << 8;
246 static inline u64
kvm_vcpu_get_disr(const struct kvm_vcpu
*vcpu
)
248 return vcpu
->arch
.fault
.disr_el1
;
251 static inline u32
kvm_vcpu_hvc_get_imm(const struct kvm_vcpu
*vcpu
)
253 return kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_xVC_IMM_MASK
;
256 static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu
*vcpu
)
258 return !!(kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_ISV
);
261 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu
*vcpu
)
263 return !!(kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_SSE
);
266 static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu
*vcpu
)
268 return (kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_SRT_MASK
) >> ESR_ELx_SRT_SHIFT
;
271 static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu
*vcpu
)
273 return !!(kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_S1PTW
);
276 static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu
*vcpu
)
278 return !!(kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_WNR
) ||
279 kvm_vcpu_dabt_iss1tw(vcpu
); /* AF/DBM update */
282 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu
*vcpu
)
284 return !!(kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_CM
);
287 static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu
*vcpu
)
289 return 1 << ((kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_SAS
) >> ESR_ELx_SAS_SHIFT
);
292 /* This one is not specific to Data Abort */
293 static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu
*vcpu
)
295 return !!(kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_IL
);
298 static inline u8
kvm_vcpu_trap_get_class(const struct kvm_vcpu
*vcpu
)
300 return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu
));
303 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu
*vcpu
)
305 return kvm_vcpu_trap_get_class(vcpu
) == ESR_ELx_EC_IABT_LOW
;
308 static inline u8
kvm_vcpu_trap_get_fault(const struct kvm_vcpu
*vcpu
)
310 return kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_FSC
;
313 static inline u8
kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu
*vcpu
)
315 return kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_FSC_TYPE
;
318 static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu
*vcpu
)
320 switch (kvm_vcpu_trap_get_fault(vcpu
)) {
337 static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu
*vcpu
)
339 u32 esr
= kvm_vcpu_get_hsr(vcpu
);
340 return ESR_ELx_SYS64_ISS_RT(esr
);
343 static inline bool kvm_is_write_fault(struct kvm_vcpu
*vcpu
)
345 if (kvm_vcpu_trap_is_iabt(vcpu
))
348 return kvm_vcpu_dabt_iswrite(vcpu
);
351 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu
*vcpu
)
353 return vcpu_read_sys_reg(vcpu
, MPIDR_EL1
) & MPIDR_HWID_BITMASK
;
356 static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu
*vcpu
)
358 return vcpu
->arch
.workaround_flags
& VCPU_WORKAROUND_2_FLAG
;
361 static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu
*vcpu
,
365 vcpu
->arch
.workaround_flags
|= VCPU_WORKAROUND_2_FLAG
;
367 vcpu
->arch
.workaround_flags
&= ~VCPU_WORKAROUND_2_FLAG
;
370 static inline void kvm_vcpu_set_be(struct kvm_vcpu
*vcpu
)
372 if (vcpu_mode_is_32bit(vcpu
)) {
373 *vcpu_cpsr(vcpu
) |= PSR_AA32_E_BIT
;
375 u64 sctlr
= vcpu_read_sys_reg(vcpu
, SCTLR_EL1
);
377 vcpu_write_sys_reg(vcpu
, sctlr
, SCTLR_EL1
);
381 static inline bool kvm_vcpu_is_be(struct kvm_vcpu
*vcpu
)
383 if (vcpu_mode_is_32bit(vcpu
))
384 return !!(*vcpu_cpsr(vcpu
) & PSR_AA32_E_BIT
);
386 return !!(vcpu_read_sys_reg(vcpu
, SCTLR_EL1
) & (1 << 25));
389 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu
*vcpu
,
393 if (kvm_vcpu_is_be(vcpu
)) {
398 return be16_to_cpu(data
& 0xffff);
400 return be32_to_cpu(data
& 0xffffffff);
402 return be64_to_cpu(data
);
409 return le16_to_cpu(data
& 0xffff);
411 return le32_to_cpu(data
& 0xffffffff);
413 return le64_to_cpu(data
);
417 return data
; /* Leave LE untouched */
420 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu
*vcpu
,
424 if (kvm_vcpu_is_be(vcpu
)) {
429 return cpu_to_be16(data
& 0xffff);
431 return cpu_to_be32(data
& 0xffffffff);
433 return cpu_to_be64(data
);
440 return cpu_to_le16(data
& 0xffff);
442 return cpu_to_le32(data
& 0xffffffff);
444 return cpu_to_le64(data
);
448 return data
; /* Leave LE untouched */
451 static inline void kvm_skip_instr(struct kvm_vcpu
*vcpu
, bool is_wide_instr
)
453 if (vcpu_mode_is_32bit(vcpu
))
454 kvm_skip_instr32(vcpu
, is_wide_instr
);
458 /* advance the singlestep state machine */
459 *vcpu_cpsr(vcpu
) &= ~DBG_SPSR_SS
;
463 * Skip an instruction which has been emulated at hyp while most guest sysregs
466 static inline void __hyp_text
__kvm_skip_instr(struct kvm_vcpu
*vcpu
)
468 *vcpu_pc(vcpu
) = read_sysreg_el2(SYS_ELR
);
469 vcpu
->arch
.ctxt
.gp_regs
.regs
.pstate
= read_sysreg_el2(SYS_SPSR
);
471 kvm_skip_instr(vcpu
, kvm_vcpu_trap_il_is32bit(vcpu
));
473 write_sysreg_el2(vcpu
->arch
.ctxt
.gp_regs
.regs
.pstate
, SYS_SPSR
);
474 write_sysreg_el2(*vcpu_pc(vcpu
), SYS_ELR
);
477 #endif /* __ARM64_KVM_EMULATE_H__ */