1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/kvm/guest.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 #include <linux/bits.h>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/nospec.h>
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/stddef.h>
18 #include <linux/string.h>
19 #include <linux/vmalloc.h>
21 #include <kvm/arm_psci.h>
22 #include <asm/cputype.h>
23 #include <linux/uaccess.h>
24 #include <asm/fpsimd.h>
26 #include <asm/kvm_emulate.h>
27 #include <asm/kvm_coproc.h>
28 #include <asm/kvm_host.h>
29 #include <asm/sigcontext.h>
33 #define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
34 #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
36 struct kvm_stats_debugfs_item debugfs_entries
[] = {
37 VCPU_STAT(halt_successful_poll
),
38 VCPU_STAT(halt_attempted_poll
),
39 VCPU_STAT(halt_poll_invalid
),
40 VCPU_STAT(halt_wakeup
),
41 VCPU_STAT(hvc_exit_stat
),
42 VCPU_STAT(wfe_exit_stat
),
43 VCPU_STAT(wfi_exit_stat
),
44 VCPU_STAT(mmio_exit_user
),
45 VCPU_STAT(mmio_exit_kernel
),
50 static bool core_reg_offset_is_vreg(u64 off
)
52 return off
>= KVM_REG_ARM_CORE_REG(fp_regs
.vregs
) &&
53 off
< KVM_REG_ARM_CORE_REG(fp_regs
.fpsr
);
56 static u64
core_reg_offset_from_id(u64 id
)
58 return id
& ~(KVM_REG_ARCH_MASK
| KVM_REG_SIZE_MASK
| KVM_REG_ARM_CORE
);
61 static int core_reg_size_from_offset(const struct kvm_vcpu
*vcpu
, u64 off
)
66 case KVM_REG_ARM_CORE_REG(regs
.regs
[0]) ...
67 KVM_REG_ARM_CORE_REG(regs
.regs
[30]):
68 case KVM_REG_ARM_CORE_REG(regs
.sp
):
69 case KVM_REG_ARM_CORE_REG(regs
.pc
):
70 case KVM_REG_ARM_CORE_REG(regs
.pstate
):
71 case KVM_REG_ARM_CORE_REG(sp_el1
):
72 case KVM_REG_ARM_CORE_REG(elr_el1
):
73 case KVM_REG_ARM_CORE_REG(spsr
[0]) ...
74 KVM_REG_ARM_CORE_REG(spsr
[KVM_NR_SPSR
- 1]):
78 case KVM_REG_ARM_CORE_REG(fp_regs
.vregs
[0]) ...
79 KVM_REG_ARM_CORE_REG(fp_regs
.vregs
[31]):
80 size
= sizeof(__uint128_t
);
83 case KVM_REG_ARM_CORE_REG(fp_regs
.fpsr
):
84 case KVM_REG_ARM_CORE_REG(fp_regs
.fpcr
):
92 if (!IS_ALIGNED(off
, size
/ sizeof(__u32
)))
96 * The KVM_REG_ARM64_SVE regs must be used instead of
97 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
100 if (vcpu_has_sve(vcpu
) && core_reg_offset_is_vreg(off
))
106 static int validate_core_offset(const struct kvm_vcpu
*vcpu
,
107 const struct kvm_one_reg
*reg
)
109 u64 off
= core_reg_offset_from_id(reg
->id
);
110 int size
= core_reg_size_from_offset(vcpu
, off
);
115 if (KVM_REG_SIZE(reg
->id
) != size
)
121 static int get_core_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
124 * Because the kvm_regs structure is a mix of 32, 64 and
125 * 128bit fields, we index it as if it was a 32bit
126 * array. Hence below, nr_regs is the number of entries, and
127 * off the index in the "array".
129 __u32 __user
*uaddr
= (__u32 __user
*)(unsigned long)reg
->addr
;
130 struct kvm_regs
*regs
= vcpu_gp_regs(vcpu
);
131 int nr_regs
= sizeof(*regs
) / sizeof(__u32
);
134 /* Our ID is an index into the kvm_regs struct. */
135 off
= core_reg_offset_from_id(reg
->id
);
136 if (off
>= nr_regs
||
137 (off
+ (KVM_REG_SIZE(reg
->id
) / sizeof(__u32
))) >= nr_regs
)
140 if (validate_core_offset(vcpu
, reg
))
143 if (copy_to_user(uaddr
, ((u32
*)regs
) + off
, KVM_REG_SIZE(reg
->id
)))
149 static int set_core_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
151 __u32 __user
*uaddr
= (__u32 __user
*)(unsigned long)reg
->addr
;
152 struct kvm_regs
*regs
= vcpu_gp_regs(vcpu
);
153 int nr_regs
= sizeof(*regs
) / sizeof(__u32
);
159 /* Our ID is an index into the kvm_regs struct. */
160 off
= core_reg_offset_from_id(reg
->id
);
161 if (off
>= nr_regs
||
162 (off
+ (KVM_REG_SIZE(reg
->id
) / sizeof(__u32
))) >= nr_regs
)
165 if (validate_core_offset(vcpu
, reg
))
168 if (KVM_REG_SIZE(reg
->id
) > sizeof(tmp
))
171 if (copy_from_user(valp
, uaddr
, KVM_REG_SIZE(reg
->id
))) {
176 if (off
== KVM_REG_ARM_CORE_REG(regs
.pstate
)) {
177 u64 mode
= (*(u64
*)valp
) & PSR_AA32_MODE_MASK
;
179 case PSR_AA32_MODE_USR
:
180 if (!system_supports_32bit_el0())
183 case PSR_AA32_MODE_FIQ
:
184 case PSR_AA32_MODE_IRQ
:
185 case PSR_AA32_MODE_SVC
:
186 case PSR_AA32_MODE_ABT
:
187 case PSR_AA32_MODE_UND
:
188 if (!vcpu_el1_is_32bit(vcpu
))
194 if (vcpu_el1_is_32bit(vcpu
))
203 memcpy((u32
*)regs
+ off
, valp
, KVM_REG_SIZE(reg
->id
));
208 #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
209 #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
210 #define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq)))
212 static int get_sve_vls(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
214 unsigned int max_vq
, vq
;
215 u64 vqs
[KVM_ARM64_SVE_VLS_WORDS
];
217 if (!vcpu_has_sve(vcpu
))
220 if (WARN_ON(!sve_vl_valid(vcpu
->arch
.sve_max_vl
)))
223 memset(vqs
, 0, sizeof(vqs
));
225 max_vq
= sve_vq_from_vl(vcpu
->arch
.sve_max_vl
);
226 for (vq
= SVE_VQ_MIN
; vq
<= max_vq
; ++vq
)
227 if (sve_vq_available(vq
))
228 vqs
[vq_word(vq
)] |= vq_mask(vq
);
230 if (copy_to_user((void __user
*)reg
->addr
, vqs
, sizeof(vqs
)))
236 static int set_sve_vls(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
238 unsigned int max_vq
, vq
;
239 u64 vqs
[KVM_ARM64_SVE_VLS_WORDS
];
241 if (!vcpu_has_sve(vcpu
))
244 if (kvm_arm_vcpu_sve_finalized(vcpu
))
245 return -EPERM
; /* too late! */
247 if (WARN_ON(vcpu
->arch
.sve_state
))
250 if (copy_from_user(vqs
, (const void __user
*)reg
->addr
, sizeof(vqs
)))
254 for (vq
= SVE_VQ_MIN
; vq
<= SVE_VQ_MAX
; ++vq
)
255 if (vq_present(vqs
, vq
))
258 if (max_vq
> sve_vq_from_vl(kvm_sve_max_vl
))
262 * Vector lengths supported by the host can't currently be
263 * hidden from the guest individually: instead we can only set a
264 * maxmium via ZCR_EL2.LEN. So, make sure the available vector
265 * lengths match the set requested exactly up to the requested
268 for (vq
= SVE_VQ_MIN
; vq
<= max_vq
; ++vq
)
269 if (vq_present(vqs
, vq
) != sve_vq_available(vq
))
272 /* Can't run with no vector lengths at all: */
273 if (max_vq
< SVE_VQ_MIN
)
276 /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
277 vcpu
->arch
.sve_max_vl
= sve_vl_from_vq(max_vq
);
282 #define SVE_REG_SLICE_SHIFT 0
283 #define SVE_REG_SLICE_BITS 5
284 #define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS)
285 #define SVE_REG_ID_BITS 5
287 #define SVE_REG_SLICE_MASK \
288 GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \
290 #define SVE_REG_ID_MASK \
291 GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT)
293 #define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS)
295 #define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0))
296 #define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
299 * Number of register slices required to cover each whole SVE register.
300 * NOTE: Only the first slice every exists, for now.
301 * If you are tempted to modify this, you must also rework sve_reg_to_region()
304 #define vcpu_sve_slices(vcpu) 1
306 /* Bounds of a single SVE register slice within vcpu->arch.sve_state */
307 struct sve_state_reg_region
{
308 unsigned int koffset
; /* offset into sve_state in kernel memory */
309 unsigned int klen
; /* length in kernel memory */
310 unsigned int upad
; /* extra trailing padding in user memory */
314 * Validate SVE register ID and get sanitised bounds for user/kernel SVE
317 static int sve_reg_to_region(struct sve_state_reg_region
*region
,
318 struct kvm_vcpu
*vcpu
,
319 const struct kvm_one_reg
*reg
)
321 /* reg ID ranges for Z- registers */
322 const u64 zreg_id_min
= KVM_REG_ARM64_SVE_ZREG(0, 0);
323 const u64 zreg_id_max
= KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS
- 1,
326 /* reg ID ranges for P- registers and FFR (which are contiguous) */
327 const u64 preg_id_min
= KVM_REG_ARM64_SVE_PREG(0, 0);
328 const u64 preg_id_max
= KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES
- 1);
331 unsigned int reg_num
;
333 unsigned int reqoffset
, reqlen
; /* User-requested offset and length */
334 unsigned int maxlen
; /* Maxmimum permitted length */
336 size_t sve_state_size
;
338 const u64 last_preg_id
= KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS
- 1,
341 /* Verify that the P-regs and FFR really do have contiguous IDs: */
342 BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id
+ 1);
344 /* Verify that we match the UAPI header: */
345 BUILD_BUG_ON(SVE_NUM_SLICES
!= KVM_ARM64_SVE_MAX_SLICES
);
347 reg_num
= (reg
->id
& SVE_REG_ID_MASK
) >> SVE_REG_ID_SHIFT
;
349 if (reg
->id
>= zreg_id_min
&& reg
->id
<= zreg_id_max
) {
350 if (!vcpu_has_sve(vcpu
) || (reg
->id
& SVE_REG_SLICE_MASK
) > 0)
353 vq
= sve_vq_from_vl(vcpu
->arch
.sve_max_vl
);
355 reqoffset
= SVE_SIG_ZREG_OFFSET(vq
, reg_num
) -
357 reqlen
= KVM_SVE_ZREG_SIZE
;
358 maxlen
= SVE_SIG_ZREG_SIZE(vq
);
359 } else if (reg
->id
>= preg_id_min
&& reg
->id
<= preg_id_max
) {
360 if (!vcpu_has_sve(vcpu
) || (reg
->id
& SVE_REG_SLICE_MASK
) > 0)
363 vq
= sve_vq_from_vl(vcpu
->arch
.sve_max_vl
);
365 reqoffset
= SVE_SIG_PREG_OFFSET(vq
, reg_num
) -
367 reqlen
= KVM_SVE_PREG_SIZE
;
368 maxlen
= SVE_SIG_PREG_SIZE(vq
);
373 sve_state_size
= vcpu_sve_state_size(vcpu
);
374 if (WARN_ON(!sve_state_size
))
377 region
->koffset
= array_index_nospec(reqoffset
, sve_state_size
);
378 region
->klen
= min(maxlen
, reqlen
);
379 region
->upad
= reqlen
- region
->klen
;
384 static int get_sve_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
387 struct sve_state_reg_region region
;
388 char __user
*uptr
= (char __user
*)reg
->addr
;
390 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
391 if (reg
->id
== KVM_REG_ARM64_SVE_VLS
)
392 return get_sve_vls(vcpu
, reg
);
394 /* Try to interpret reg ID as an architectural SVE register... */
395 ret
= sve_reg_to_region(®ion
, vcpu
, reg
);
399 if (!kvm_arm_vcpu_sve_finalized(vcpu
))
402 if (copy_to_user(uptr
, vcpu
->arch
.sve_state
+ region
.koffset
,
404 clear_user(uptr
+ region
.klen
, region
.upad
))
410 static int set_sve_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
413 struct sve_state_reg_region region
;
414 const char __user
*uptr
= (const char __user
*)reg
->addr
;
416 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
417 if (reg
->id
== KVM_REG_ARM64_SVE_VLS
)
418 return set_sve_vls(vcpu
, reg
);
420 /* Try to interpret reg ID as an architectural SVE register... */
421 ret
= sve_reg_to_region(®ion
, vcpu
, reg
);
425 if (!kvm_arm_vcpu_sve_finalized(vcpu
))
428 if (copy_from_user(vcpu
->arch
.sve_state
+ region
.koffset
, uptr
,
435 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
440 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
445 static int copy_core_reg_indices(const struct kvm_vcpu
*vcpu
,
446 u64 __user
*uindices
)
451 for (i
= 0; i
< sizeof(struct kvm_regs
) / sizeof(__u32
); i
++) {
452 u64 reg
= KVM_REG_ARM64
| KVM_REG_ARM_CORE
| i
;
453 int size
= core_reg_size_from_offset(vcpu
, i
);
460 reg
|= KVM_REG_SIZE_U32
;
464 reg
|= KVM_REG_SIZE_U64
;
467 case sizeof(__uint128_t
):
468 reg
|= KVM_REG_SIZE_U128
;
477 if (put_user(reg
, uindices
))
488 static unsigned long num_core_regs(const struct kvm_vcpu
*vcpu
)
490 return copy_core_reg_indices(vcpu
, NULL
);
494 * ARM64 versions of the TIMER registers, always available on arm64
497 #define NUM_TIMER_REGS 3
499 static bool is_timer_reg(u64 index
)
502 case KVM_REG_ARM_TIMER_CTL
:
503 case KVM_REG_ARM_TIMER_CNT
:
504 case KVM_REG_ARM_TIMER_CVAL
:
510 static int copy_timer_indices(struct kvm_vcpu
*vcpu
, u64 __user
*uindices
)
512 if (put_user(KVM_REG_ARM_TIMER_CTL
, uindices
))
515 if (put_user(KVM_REG_ARM_TIMER_CNT
, uindices
))
518 if (put_user(KVM_REG_ARM_TIMER_CVAL
, uindices
))
524 static int set_timer_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
526 void __user
*uaddr
= (void __user
*)(long)reg
->addr
;
530 ret
= copy_from_user(&val
, uaddr
, KVM_REG_SIZE(reg
->id
));
534 return kvm_arm_timer_set_reg(vcpu
, reg
->id
, val
);
537 static int get_timer_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
539 void __user
*uaddr
= (void __user
*)(long)reg
->addr
;
542 val
= kvm_arm_timer_get_reg(vcpu
, reg
->id
);
543 return copy_to_user(uaddr
, &val
, KVM_REG_SIZE(reg
->id
)) ? -EFAULT
: 0;
546 static unsigned long num_sve_regs(const struct kvm_vcpu
*vcpu
)
548 const unsigned int slices
= vcpu_sve_slices(vcpu
);
550 if (!vcpu_has_sve(vcpu
))
553 /* Policed by KVM_GET_REG_LIST: */
554 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu
));
556 return slices
* (SVE_NUM_PREGS
+ SVE_NUM_ZREGS
+ 1 /* FFR */)
557 + 1; /* KVM_REG_ARM64_SVE_VLS */
560 static int copy_sve_reg_indices(const struct kvm_vcpu
*vcpu
,
561 u64 __user
*uindices
)
563 const unsigned int slices
= vcpu_sve_slices(vcpu
);
568 if (!vcpu_has_sve(vcpu
))
571 /* Policed by KVM_GET_REG_LIST: */
572 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu
));
575 * Enumerate this first, so that userspace can save/restore in
576 * the order reported by KVM_GET_REG_LIST:
578 reg
= KVM_REG_ARM64_SVE_VLS
;
579 if (put_user(reg
, uindices
++))
583 for (i
= 0; i
< slices
; i
++) {
584 for (n
= 0; n
< SVE_NUM_ZREGS
; n
++) {
585 reg
= KVM_REG_ARM64_SVE_ZREG(n
, i
);
586 if (put_user(reg
, uindices
++))
591 for (n
= 0; n
< SVE_NUM_PREGS
; n
++) {
592 reg
= KVM_REG_ARM64_SVE_PREG(n
, i
);
593 if (put_user(reg
, uindices
++))
598 reg
= KVM_REG_ARM64_SVE_FFR(i
);
599 if (put_user(reg
, uindices
++))
608 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
610 * This is for all registers.
612 unsigned long kvm_arm_num_regs(struct kvm_vcpu
*vcpu
)
614 unsigned long res
= 0;
616 res
+= num_core_regs(vcpu
);
617 res
+= num_sve_regs(vcpu
);
618 res
+= kvm_arm_num_sys_reg_descs(vcpu
);
619 res
+= kvm_arm_get_fw_num_regs(vcpu
);
620 res
+= NUM_TIMER_REGS
;
626 * kvm_arm_copy_reg_indices - get indices of all registers.
628 * We do core registers right here, then we append system regs.
630 int kvm_arm_copy_reg_indices(struct kvm_vcpu
*vcpu
, u64 __user
*uindices
)
634 ret
= copy_core_reg_indices(vcpu
, uindices
);
639 ret
= copy_sve_reg_indices(vcpu
, uindices
);
644 ret
= kvm_arm_copy_fw_reg_indices(vcpu
, uindices
);
647 uindices
+= kvm_arm_get_fw_num_regs(vcpu
);
649 ret
= copy_timer_indices(vcpu
, uindices
);
652 uindices
+= NUM_TIMER_REGS
;
654 return kvm_arm_copy_sys_reg_indices(vcpu
, uindices
);
657 int kvm_arm_get_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
659 /* We currently use nothing arch-specific in upper 32 bits */
660 if ((reg
->id
& ~KVM_REG_SIZE_MASK
) >> 32 != KVM_REG_ARM64
>> 32)
663 switch (reg
->id
& KVM_REG_ARM_COPROC_MASK
) {
664 case KVM_REG_ARM_CORE
: return get_core_reg(vcpu
, reg
);
665 case KVM_REG_ARM_FW
: return kvm_arm_get_fw_reg(vcpu
, reg
);
666 case KVM_REG_ARM64_SVE
: return get_sve_reg(vcpu
, reg
);
669 if (is_timer_reg(reg
->id
))
670 return get_timer_reg(vcpu
, reg
);
672 return kvm_arm_sys_reg_get_reg(vcpu
, reg
);
675 int kvm_arm_set_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
677 /* We currently use nothing arch-specific in upper 32 bits */
678 if ((reg
->id
& ~KVM_REG_SIZE_MASK
) >> 32 != KVM_REG_ARM64
>> 32)
681 switch (reg
->id
& KVM_REG_ARM_COPROC_MASK
) {
682 case KVM_REG_ARM_CORE
: return set_core_reg(vcpu
, reg
);
683 case KVM_REG_ARM_FW
: return kvm_arm_set_fw_reg(vcpu
, reg
);
684 case KVM_REG_ARM64_SVE
: return set_sve_reg(vcpu
, reg
);
687 if (is_timer_reg(reg
->id
))
688 return set_timer_reg(vcpu
, reg
);
690 return kvm_arm_sys_reg_set_reg(vcpu
, reg
);
693 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
694 struct kvm_sregs
*sregs
)
699 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
700 struct kvm_sregs
*sregs
)
705 int __kvm_arm_vcpu_get_events(struct kvm_vcpu
*vcpu
,
706 struct kvm_vcpu_events
*events
)
708 events
->exception
.serror_pending
= !!(vcpu
->arch
.hcr_el2
& HCR_VSE
);
709 events
->exception
.serror_has_esr
= cpus_have_const_cap(ARM64_HAS_RAS_EXTN
);
711 if (events
->exception
.serror_pending
&& events
->exception
.serror_has_esr
)
712 events
->exception
.serror_esr
= vcpu_get_vsesr(vcpu
);
715 * We never return a pending ext_dabt here because we deliver it to
716 * the virtual CPU directly when setting the event and it's no longer
717 * 'pending' at this point.
723 int __kvm_arm_vcpu_set_events(struct kvm_vcpu
*vcpu
,
724 struct kvm_vcpu_events
*events
)
726 bool serror_pending
= events
->exception
.serror_pending
;
727 bool has_esr
= events
->exception
.serror_has_esr
;
728 bool ext_dabt_pending
= events
->exception
.ext_dabt_pending
;
730 if (serror_pending
&& has_esr
) {
731 if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN
))
734 if (!((events
->exception
.serror_esr
) & ~ESR_ELx_ISS_MASK
))
735 kvm_set_sei_esr(vcpu
, events
->exception
.serror_esr
);
738 } else if (serror_pending
) {
739 kvm_inject_vabt(vcpu
);
742 if (ext_dabt_pending
)
743 kvm_inject_dabt(vcpu
, kvm_vcpu_get_hfar(vcpu
));
748 int __attribute_const__
kvm_target_cpu(void)
750 unsigned long implementor
= read_cpuid_implementor();
751 unsigned long part_number
= read_cpuid_part_number();
753 switch (implementor
) {
754 case ARM_CPU_IMP_ARM
:
755 switch (part_number
) {
756 case ARM_CPU_PART_AEM_V8
:
757 return KVM_ARM_TARGET_AEM_V8
;
758 case ARM_CPU_PART_FOUNDATION
:
759 return KVM_ARM_TARGET_FOUNDATION_V8
;
760 case ARM_CPU_PART_CORTEX_A53
:
761 return KVM_ARM_TARGET_CORTEX_A53
;
762 case ARM_CPU_PART_CORTEX_A57
:
763 return KVM_ARM_TARGET_CORTEX_A57
;
766 case ARM_CPU_IMP_APM
:
767 switch (part_number
) {
768 case APM_CPU_PART_POTENZA
:
769 return KVM_ARM_TARGET_XGENE_POTENZA
;
774 /* Return a default generic target */
775 return KVM_ARM_TARGET_GENERIC_V8
;
778 int kvm_vcpu_preferred_target(struct kvm_vcpu_init
*init
)
780 int target
= kvm_target_cpu();
785 memset(init
, 0, sizeof(*init
));
788 * For now, we don't return any features.
789 * In future, we might use features to return target
790 * specific features available for the preferred
793 init
->target
= (__u32
)target
;
798 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
803 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
808 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
809 struct kvm_translation
*tr
)
814 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
815 KVM_GUESTDBG_USE_SW_BP | \
816 KVM_GUESTDBG_USE_HW | \
817 KVM_GUESTDBG_SINGLESTEP)
820 * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
821 * @kvm: pointer to the KVM struct
822 * @kvm_guest_debug: the ioctl data buffer
824 * This sets up and enables the VM for guest debugging. Userspace
825 * passes in a control flag to enable different debug types and
826 * potentially other architecture specific information in the rest of
829 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
830 struct kvm_guest_debug
*dbg
)
834 trace_kvm_set_guest_debug(vcpu
, dbg
->control
);
836 if (dbg
->control
& ~KVM_GUESTDBG_VALID_MASK
) {
841 if (dbg
->control
& KVM_GUESTDBG_ENABLE
) {
842 vcpu
->guest_debug
= dbg
->control
;
844 /* Hardware assisted Break and Watch points */
845 if (vcpu
->guest_debug
& KVM_GUESTDBG_USE_HW
) {
846 vcpu
->arch
.external_debug_state
= dbg
->arch
;
850 /* If not enabled clear all flags */
851 vcpu
->guest_debug
= 0;
858 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu
*vcpu
,
859 struct kvm_device_attr
*attr
)
863 switch (attr
->group
) {
864 case KVM_ARM_VCPU_PMU_V3_CTRL
:
865 ret
= kvm_arm_pmu_v3_set_attr(vcpu
, attr
);
867 case KVM_ARM_VCPU_TIMER_CTRL
:
868 ret
= kvm_arm_timer_set_attr(vcpu
, attr
);
870 case KVM_ARM_VCPU_PVTIME_CTRL
:
871 ret
= kvm_arm_pvtime_set_attr(vcpu
, attr
);
881 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu
*vcpu
,
882 struct kvm_device_attr
*attr
)
886 switch (attr
->group
) {
887 case KVM_ARM_VCPU_PMU_V3_CTRL
:
888 ret
= kvm_arm_pmu_v3_get_attr(vcpu
, attr
);
890 case KVM_ARM_VCPU_TIMER_CTRL
:
891 ret
= kvm_arm_timer_get_attr(vcpu
, attr
);
893 case KVM_ARM_VCPU_PVTIME_CTRL
:
894 ret
= kvm_arm_pvtime_get_attr(vcpu
, attr
);
904 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu
*vcpu
,
905 struct kvm_device_attr
*attr
)
909 switch (attr
->group
) {
910 case KVM_ARM_VCPU_PMU_V3_CTRL
:
911 ret
= kvm_arm_pmu_v3_has_attr(vcpu
, attr
);
913 case KVM_ARM_VCPU_TIMER_CTRL
:
914 ret
= kvm_arm_timer_has_attr(vcpu
, attr
);
916 case KVM_ARM_VCPU_PVTIME_CTRL
:
917 ret
= kvm_arm_pvtime_has_attr(vcpu
, attr
);