1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/include/kvm_emulate.h
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 #ifndef __ARM64_KVM_EMULATE_H__
12 #define __ARM64_KVM_EMULATE_H__
14 #include <linux/bitfield.h>
15 #include <linux/kvm_host.h>
17 #include <asm/debug-monitors.h>
19 #include <asm/kvm_arm.h>
20 #include <asm/kvm_hyp.h>
21 #include <asm/kvm_nested.h>
22 #include <asm/ptrace.h>
23 #include <asm/cputype.h>
26 #define CURRENT_EL_SP_EL0_VECTOR 0x0
27 #define CURRENT_EL_SP_ELx_VECTOR 0x200
28 #define LOWER_EL_AArch64_VECTOR 0x400
29 #define LOWER_EL_AArch32_VECTOR 0x600
33 except_type_irq
= 0x80,
34 except_type_fiq
= 0x100,
35 except_type_serror
= 0x180,
38 #define kvm_exception_type_names \
39 { except_type_sync, "SYNC" }, \
40 { except_type_irq, "IRQ" }, \
41 { except_type_fiq, "FIQ" }, \
42 { except_type_serror, "SERROR" }
44 bool kvm_condition_valid32(const struct kvm_vcpu
*vcpu
);
45 void kvm_skip_instr32(struct kvm_vcpu
*vcpu
);
47 void kvm_inject_undefined(struct kvm_vcpu
*vcpu
);
48 void kvm_inject_vabt(struct kvm_vcpu
*vcpu
);
49 void kvm_inject_dabt(struct kvm_vcpu
*vcpu
, unsigned long addr
);
50 void kvm_inject_pabt(struct kvm_vcpu
*vcpu
, unsigned long addr
);
51 void kvm_inject_size_fault(struct kvm_vcpu
*vcpu
);
53 void kvm_vcpu_wfi(struct kvm_vcpu
*vcpu
);
55 void kvm_emulate_nested_eret(struct kvm_vcpu
*vcpu
);
56 int kvm_inject_nested_sync(struct kvm_vcpu
*vcpu
, u64 esr_el2
);
57 int kvm_inject_nested_irq(struct kvm_vcpu
*vcpu
);
59 static inline void kvm_inject_nested_sve_trap(struct kvm_vcpu
*vcpu
)
61 u64 esr
= FIELD_PREP(ESR_ELx_EC_MASK
, ESR_ELx_EC_SVE
) |
64 kvm_inject_nested_sync(vcpu
, esr
);
67 #if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
68 static __always_inline
bool vcpu_el1_is_32bit(struct kvm_vcpu
*vcpu
)
70 return !(vcpu
->arch
.hcr_el2
& HCR_RW
);
73 static __always_inline
bool vcpu_el1_is_32bit(struct kvm_vcpu
*vcpu
)
75 return vcpu_has_feature(vcpu
, KVM_ARM_VCPU_EL1_32BIT
);
79 static inline void vcpu_reset_hcr(struct kvm_vcpu
*vcpu
)
81 if (!vcpu_has_run_once(vcpu
))
82 vcpu
->arch
.hcr_el2
= HCR_GUEST_FLAGS
;
85 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
86 * get set in SCTLR_EL1 such that we can detect when the guest
87 * MMU gets turned on and do the necessary cache maintenance
90 if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB
))
91 vcpu
->arch
.hcr_el2
|= HCR_TVM
;
94 static inline unsigned long *vcpu_hcr(struct kvm_vcpu
*vcpu
)
96 return (unsigned long *)&vcpu
->arch
.hcr_el2
;
99 static inline void vcpu_clear_wfx_traps(struct kvm_vcpu
*vcpu
)
101 vcpu
->arch
.hcr_el2
&= ~HCR_TWE
;
102 if (atomic_read(&vcpu
->arch
.vgic_cpu
.vgic_v3
.its_vpe
.vlpi_count
) ||
103 vcpu
->kvm
->arch
.vgic
.nassgireq
)
104 vcpu
->arch
.hcr_el2
&= ~HCR_TWI
;
106 vcpu
->arch
.hcr_el2
|= HCR_TWI
;
109 static inline void vcpu_set_wfx_traps(struct kvm_vcpu
*vcpu
)
111 vcpu
->arch
.hcr_el2
|= HCR_TWE
;
112 vcpu
->arch
.hcr_el2
|= HCR_TWI
;
115 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu
*vcpu
)
117 return vcpu
->arch
.vsesr_el2
;
120 static inline void vcpu_set_vsesr(struct kvm_vcpu
*vcpu
, u64 vsesr
)
122 vcpu
->arch
.vsesr_el2
= vsesr
;
125 static __always_inline
unsigned long *vcpu_pc(const struct kvm_vcpu
*vcpu
)
127 return (unsigned long *)&vcpu_gp_regs(vcpu
)->pc
;
130 static __always_inline
unsigned long *vcpu_cpsr(const struct kvm_vcpu
*vcpu
)
132 return (unsigned long *)&vcpu_gp_regs(vcpu
)->pstate
;
135 static __always_inline
bool vcpu_mode_is_32bit(const struct kvm_vcpu
*vcpu
)
137 return !!(*vcpu_cpsr(vcpu
) & PSR_MODE32_BIT
);
140 static __always_inline
bool kvm_condition_valid(const struct kvm_vcpu
*vcpu
)
142 if (vcpu_mode_is_32bit(vcpu
))
143 return kvm_condition_valid32(vcpu
);
148 static inline void vcpu_set_thumb(struct kvm_vcpu
*vcpu
)
150 *vcpu_cpsr(vcpu
) |= PSR_AA32_T_BIT
;
154 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
155 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
156 * AArch32 with banked registers.
158 static __always_inline
unsigned long vcpu_get_reg(const struct kvm_vcpu
*vcpu
,
161 return (reg_num
== 31) ? 0 : vcpu_gp_regs(vcpu
)->regs
[reg_num
];
164 static __always_inline
void vcpu_set_reg(struct kvm_vcpu
*vcpu
, u8 reg_num
,
168 vcpu_gp_regs(vcpu
)->regs
[reg_num
] = val
;
171 static inline bool vcpu_is_el2_ctxt(const struct kvm_cpu_context
*ctxt
)
173 switch (ctxt
->regs
.pstate
& (PSR_MODE32_BIT
| PSR_MODE_MASK
)) {
182 static inline bool vcpu_is_el2(const struct kvm_vcpu
*vcpu
)
184 return vcpu_is_el2_ctxt(&vcpu
->arch
.ctxt
);
187 static inline bool __vcpu_el2_e2h_is_set(const struct kvm_cpu_context
*ctxt
)
189 return (!cpus_have_final_cap(ARM64_HAS_HCR_NV1
) ||
190 (ctxt_sys_reg(ctxt
, HCR_EL2
) & HCR_E2H
));
193 static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu
*vcpu
)
195 return __vcpu_el2_e2h_is_set(&vcpu
->arch
.ctxt
);
198 static inline bool __vcpu_el2_tge_is_set(const struct kvm_cpu_context
*ctxt
)
200 return ctxt_sys_reg(ctxt
, HCR_EL2
) & HCR_TGE
;
203 static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu
*vcpu
)
205 return __vcpu_el2_tge_is_set(&vcpu
->arch
.ctxt
);
208 static inline bool __is_hyp_ctxt(const struct kvm_cpu_context
*ctxt
)
211 * We are in a hypervisor context if the vcpu mode is EL2 or
212 * E2H and TGE bits are set. The latter means we are in the user space
213 * of the VHE kernel. ARMv8.1 ARM describes this as 'InHost'
215 * Note that the HCR_EL2.{E2H,TGE}={0,1} isn't really handled in the
216 * rest of the KVM code, and will result in a misbehaving guest.
218 return vcpu_is_el2_ctxt(ctxt
) ||
219 (__vcpu_el2_e2h_is_set(ctxt
) && __vcpu_el2_tge_is_set(ctxt
)) ||
220 __vcpu_el2_tge_is_set(ctxt
);
223 static inline bool is_hyp_ctxt(const struct kvm_vcpu
*vcpu
)
225 return vcpu_has_nv(vcpu
) && __is_hyp_ctxt(&vcpu
->arch
.ctxt
);
229 * The layout of SPSR for an AArch32 state is different when observed from an
230 * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
231 * view given an AArch64 view.
233 * In ARM DDI 0487E.a see:
235 * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
236 * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
237 * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
239 * Which show the following differences:
241 * | Bit | AA64 | AA32 | Notes |
242 * +-----+------+------+-----------------------------|
243 * | 24 | DIT | J | J is RES0 in ARMv8 |
244 * | 21 | SS | DIT | SS doesn't exist in AArch32 |
246 * ... and all other bits are (currently) common.
248 static inline unsigned long host_spsr_to_spsr32(unsigned long spsr
)
250 const unsigned long overlap
= BIT(24) | BIT(21);
251 unsigned long dit
= !!(spsr
& PSR_AA32_DIT_BIT
);
260 static inline bool vcpu_mode_priv(const struct kvm_vcpu
*vcpu
)
264 if (vcpu_mode_is_32bit(vcpu
)) {
265 mode
= *vcpu_cpsr(vcpu
) & PSR_AA32_MODE_MASK
;
266 return mode
> PSR_AA32_MODE_USR
;
269 mode
= *vcpu_cpsr(vcpu
) & PSR_MODE_MASK
;
271 return mode
!= PSR_MODE_EL0t
;
274 static __always_inline u64
kvm_vcpu_get_esr(const struct kvm_vcpu
*vcpu
)
276 return vcpu
->arch
.fault
.esr_el2
;
279 static __always_inline
int kvm_vcpu_get_condition(const struct kvm_vcpu
*vcpu
)
281 u64 esr
= kvm_vcpu_get_esr(vcpu
);
283 if (esr
& ESR_ELx_CV
)
284 return (esr
& ESR_ELx_COND_MASK
) >> ESR_ELx_COND_SHIFT
;
289 static __always_inline
unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu
*vcpu
)
291 return vcpu
->arch
.fault
.far_el2
;
294 static __always_inline phys_addr_t
kvm_vcpu_get_fault_ipa(const struct kvm_vcpu
*vcpu
)
296 return ((phys_addr_t
)vcpu
->arch
.fault
.hpfar_el2
& HPFAR_MASK
) << 8;
299 static inline u64
kvm_vcpu_get_disr(const struct kvm_vcpu
*vcpu
)
301 return vcpu
->arch
.fault
.disr_el1
;
304 static inline u32
kvm_vcpu_hvc_get_imm(const struct kvm_vcpu
*vcpu
)
306 return kvm_vcpu_get_esr(vcpu
) & ESR_ELx_xVC_IMM_MASK
;
309 static __always_inline
bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu
*vcpu
)
311 return !!(kvm_vcpu_get_esr(vcpu
) & ESR_ELx_ISV
);
314 static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu
*vcpu
)
316 return kvm_vcpu_get_esr(vcpu
) & (ESR_ELx_CM
| ESR_ELx_WNR
| ESR_ELx_FSC
);
319 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu
*vcpu
)
321 return !!(kvm_vcpu_get_esr(vcpu
) & ESR_ELx_SSE
);
324 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu
*vcpu
)
326 return !!(kvm_vcpu_get_esr(vcpu
) & ESR_ELx_SF
);
329 static __always_inline
int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu
*vcpu
)
331 return (kvm_vcpu_get_esr(vcpu
) & ESR_ELx_SRT_MASK
) >> ESR_ELx_SRT_SHIFT
;
334 static __always_inline
bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu
*vcpu
)
336 return !!(kvm_vcpu_get_esr(vcpu
) & ESR_ELx_S1PTW
);
339 /* Always check for S1PTW *before* using this. */
340 static __always_inline
bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu
*vcpu
)
342 return kvm_vcpu_get_esr(vcpu
) & ESR_ELx_WNR
;
345 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu
*vcpu
)
347 return !!(kvm_vcpu_get_esr(vcpu
) & ESR_ELx_CM
);
350 static __always_inline
unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu
*vcpu
)
352 return 1 << ((kvm_vcpu_get_esr(vcpu
) & ESR_ELx_SAS
) >> ESR_ELx_SAS_SHIFT
);
355 /* This one is not specific to Data Abort */
356 static __always_inline
bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu
*vcpu
)
358 return !!(kvm_vcpu_get_esr(vcpu
) & ESR_ELx_IL
);
361 static __always_inline u8
kvm_vcpu_trap_get_class(const struct kvm_vcpu
*vcpu
)
363 return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu
));
366 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu
*vcpu
)
368 return kvm_vcpu_trap_get_class(vcpu
) == ESR_ELx_EC_IABT_LOW
;
371 static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu
*vcpu
)
373 return kvm_vcpu_trap_is_iabt(vcpu
) && !kvm_vcpu_abt_iss1tw(vcpu
);
376 static __always_inline u8
kvm_vcpu_trap_get_fault(const struct kvm_vcpu
*vcpu
)
378 return kvm_vcpu_get_esr(vcpu
) & ESR_ELx_FSC
;
382 bool kvm_vcpu_trap_is_permission_fault(const struct kvm_vcpu
*vcpu
)
384 return esr_fsc_is_permission_fault(kvm_vcpu_get_esr(vcpu
));
388 bool kvm_vcpu_trap_is_translation_fault(const struct kvm_vcpu
*vcpu
)
390 return esr_fsc_is_translation_fault(kvm_vcpu_get_esr(vcpu
));
394 u64
kvm_vcpu_trap_get_perm_fault_granule(const struct kvm_vcpu
*vcpu
)
396 unsigned long esr
= kvm_vcpu_get_esr(vcpu
);
398 BUG_ON(!esr_fsc_is_permission_fault(esr
));
399 return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(esr
& ESR_ELx_FSC_LEVEL
));
402 static __always_inline
bool kvm_vcpu_abt_issea(const struct kvm_vcpu
*vcpu
)
404 switch (kvm_vcpu_trap_get_fault(vcpu
)) {
405 case ESR_ELx_FSC_EXTABT
:
406 case ESR_ELx_FSC_SEA_TTW(-1) ... ESR_ELx_FSC_SEA_TTW(3):
407 case ESR_ELx_FSC_SECC
:
408 case ESR_ELx_FSC_SECC_TTW(-1) ... ESR_ELx_FSC_SECC_TTW(3):
415 static __always_inline
int kvm_vcpu_sys_get_rt(struct kvm_vcpu
*vcpu
)
417 u64 esr
= kvm_vcpu_get_esr(vcpu
);
418 return ESR_ELx_SYS64_ISS_RT(esr
);
421 static inline bool kvm_is_write_fault(struct kvm_vcpu
*vcpu
)
423 if (kvm_vcpu_abt_iss1tw(vcpu
)) {
425 * Only a permission fault on a S1PTW should be
426 * considered as a write. Otherwise, page tables baked
427 * in a read-only memslot will result in an exception
428 * being delivered in the guest.
430 * The drawback is that we end-up faulting twice if the
431 * guest is using any of HW AF/DB: a translation fault
432 * to map the page containing the PT (read only at
433 * first), then a permission fault to allow the flags
436 return kvm_vcpu_trap_is_permission_fault(vcpu
);
439 if (kvm_vcpu_trap_is_iabt(vcpu
))
442 return kvm_vcpu_dabt_iswrite(vcpu
);
445 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu
*vcpu
)
447 return __vcpu_sys_reg(vcpu
, MPIDR_EL1
) & MPIDR_HWID_BITMASK
;
450 static inline void kvm_vcpu_set_be(struct kvm_vcpu
*vcpu
)
452 if (vcpu_mode_is_32bit(vcpu
)) {
453 *vcpu_cpsr(vcpu
) |= PSR_AA32_E_BIT
;
455 u64 sctlr
= vcpu_read_sys_reg(vcpu
, SCTLR_EL1
);
456 sctlr
|= SCTLR_ELx_EE
;
457 vcpu_write_sys_reg(vcpu
, sctlr
, SCTLR_EL1
);
461 static inline bool kvm_vcpu_is_be(struct kvm_vcpu
*vcpu
)
463 if (vcpu_mode_is_32bit(vcpu
))
464 return !!(*vcpu_cpsr(vcpu
) & PSR_AA32_E_BIT
);
466 if (vcpu_mode_priv(vcpu
))
467 return !!(vcpu_read_sys_reg(vcpu
, SCTLR_EL1
) & SCTLR_ELx_EE
);
469 return !!(vcpu_read_sys_reg(vcpu
, SCTLR_EL1
) & SCTLR_EL1_E0E
);
472 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu
*vcpu
,
476 if (kvm_vcpu_is_be(vcpu
)) {
481 return be16_to_cpu(data
& 0xffff);
483 return be32_to_cpu(data
& 0xffffffff);
485 return be64_to_cpu(data
);
492 return le16_to_cpu(data
& 0xffff);
494 return le32_to_cpu(data
& 0xffffffff);
496 return le64_to_cpu(data
);
500 return data
; /* Leave LE untouched */
503 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu
*vcpu
,
507 if (kvm_vcpu_is_be(vcpu
)) {
512 return cpu_to_be16(data
& 0xffff);
514 return cpu_to_be32(data
& 0xffffffff);
516 return cpu_to_be64(data
);
523 return cpu_to_le16(data
& 0xffff);
525 return cpu_to_le32(data
& 0xffffffff);
527 return cpu_to_le64(data
);
531 return data
; /* Leave LE untouched */
534 static __always_inline
void kvm_incr_pc(struct kvm_vcpu
*vcpu
)
536 WARN_ON(vcpu_get_flag(vcpu
, PENDING_EXCEPTION
));
537 vcpu_set_flag(vcpu
, INCREMENT_PC
);
540 #define kvm_pend_exception(v, e) \
542 WARN_ON(vcpu_get_flag((v), INCREMENT_PC)); \
543 vcpu_set_flag((v), PENDING_EXCEPTION); \
544 vcpu_set_flag((v), e); \
547 #define __build_check_all_or_none(r, bits) \
548 BUILD_BUG_ON(((r) & (bits)) && ((r) & (bits)) != (bits))
550 #define __cpacr_to_cptr_clr(clr, set) \
554 if ((set) & CPACR_ELx_FPEN) \
555 cptr |= CPTR_EL2_TFP; \
556 if ((set) & CPACR_ELx_ZEN) \
557 cptr |= CPTR_EL2_TZ; \
558 if ((set) & CPACR_ELx_SMEN) \
559 cptr |= CPTR_EL2_TSM; \
560 if ((clr) & CPACR_ELx_TTA) \
561 cptr |= CPTR_EL2_TTA; \
562 if ((clr) & CPTR_EL2_TAM) \
563 cptr |= CPTR_EL2_TAM; \
564 if ((clr) & CPTR_EL2_TCPAC) \
565 cptr |= CPTR_EL2_TCPAC; \
570 #define __cpacr_to_cptr_set(clr, set) \
574 if ((clr) & CPACR_ELx_FPEN) \
575 cptr |= CPTR_EL2_TFP; \
576 if ((clr) & CPACR_ELx_ZEN) \
577 cptr |= CPTR_EL2_TZ; \
578 if ((clr) & CPACR_ELx_SMEN) \
579 cptr |= CPTR_EL2_TSM; \
580 if ((set) & CPACR_ELx_TTA) \
581 cptr |= CPTR_EL2_TTA; \
582 if ((set) & CPTR_EL2_TAM) \
583 cptr |= CPTR_EL2_TAM; \
584 if ((set) & CPTR_EL2_TCPAC) \
585 cptr |= CPTR_EL2_TCPAC; \
590 #define cpacr_clear_set(clr, set) \
592 BUILD_BUG_ON((set) & CPTR_VHE_EL2_RES0); \
593 BUILD_BUG_ON((clr) & CPACR_ELx_E0POE); \
594 __build_check_all_or_none((clr), CPACR_ELx_FPEN); \
595 __build_check_all_or_none((set), CPACR_ELx_FPEN); \
596 __build_check_all_or_none((clr), CPACR_ELx_ZEN); \
597 __build_check_all_or_none((set), CPACR_ELx_ZEN); \
598 __build_check_all_or_none((clr), CPACR_ELx_SMEN); \
599 __build_check_all_or_none((set), CPACR_ELx_SMEN); \
601 if (has_vhe() || has_hvhe()) \
602 sysreg_clear_set(cpacr_el1, clr, set); \
604 sysreg_clear_set(cptr_el2, \
605 __cpacr_to_cptr_clr(clr, set), \
606 __cpacr_to_cptr_set(clr, set));\
609 static __always_inline
void kvm_write_cptr_el2(u64 val
)
611 if (has_vhe() || has_hvhe())
612 write_sysreg(val
, cpacr_el1
);
614 write_sysreg(val
, cptr_el2
);
617 static __always_inline u64
kvm_get_reset_cptr_el2(struct kvm_vcpu
*vcpu
)
622 val
= (CPACR_ELx_FPEN
| CPACR_EL1_ZEN_EL1EN
);
623 if (cpus_have_final_cap(ARM64_SME
))
624 val
|= CPACR_EL1_SMEN_EL1EN
;
625 } else if (has_hvhe()) {
626 val
= CPACR_ELx_FPEN
;
628 if (!vcpu_has_sve(vcpu
) || !guest_owns_fp_regs())
629 val
|= CPACR_ELx_ZEN
;
630 if (cpus_have_final_cap(ARM64_SME
))
631 val
|= CPACR_ELx_SMEN
;
633 val
= CPTR_NVHE_EL2_RES1
;
635 if (vcpu_has_sve(vcpu
) && guest_owns_fp_regs())
637 if (cpus_have_final_cap(ARM64_SME
))
638 val
&= ~CPTR_EL2_TSM
;
644 static __always_inline
void kvm_reset_cptr_el2(struct kvm_vcpu
*vcpu
)
646 u64 val
= kvm_get_reset_cptr_el2(vcpu
);
648 kvm_write_cptr_el2(val
);
652 * Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE
653 * format if E2H isn't set.
655 static inline u64
vcpu_sanitised_cptr_el2(const struct kvm_vcpu
*vcpu
)
657 u64 cptr
= __vcpu_sys_reg(vcpu
, CPTR_EL2
);
659 if (!vcpu_el2_e2h_is_set(vcpu
))
660 cptr
= translate_cptr_el2_to_cpacr_el1(cptr
);
665 static inline bool ____cptr_xen_trap_enabled(const struct kvm_vcpu
*vcpu
,
673 return vcpu_el2_tge_is_set(vcpu
) && !vcpu_is_el2(vcpu
);
680 #define __guest_hyp_cptr_xen_trap_enabled(vcpu, xen) \
681 (!vcpu_has_nv(vcpu) ? false : \
682 ____cptr_xen_trap_enabled(vcpu, \
683 SYS_FIELD_GET(CPACR_ELx, xen, \
684 vcpu_sanitised_cptr_el2(vcpu))))
686 static inline bool guest_hyp_fpsimd_traps_enabled(const struct kvm_vcpu
*vcpu
)
688 return __guest_hyp_cptr_xen_trap_enabled(vcpu
, FPEN
);
691 static inline bool guest_hyp_sve_traps_enabled(const struct kvm_vcpu
*vcpu
)
693 return __guest_hyp_cptr_xen_trap_enabled(vcpu
, ZEN
);
696 #endif /* __ARM64_KVM_EMULATE_H__ */