1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/kvm/guest.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 #include <linux/bits.h>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/nospec.h>
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/stddef.h>
18 #include <linux/string.h>
19 #include <linux/vmalloc.h>
21 #include <kvm/arm_psci.h>
22 #include <asm/cputype.h>
23 #include <linux/uaccess.h>
24 #include <asm/fpsimd.h>
26 #include <asm/kvm_emulate.h>
27 #include <asm/sigcontext.h>
31 struct kvm_stats_debugfs_item debugfs_entries
[] = {
32 VCPU_STAT("halt_successful_poll", halt_successful_poll
),
33 VCPU_STAT("halt_attempted_poll", halt_attempted_poll
),
34 VCPU_STAT("halt_poll_invalid", halt_poll_invalid
),
35 VCPU_STAT("halt_wakeup", halt_wakeup
),
36 VCPU_STAT("hvc_exit_stat", hvc_exit_stat
),
37 VCPU_STAT("wfe_exit_stat", wfe_exit_stat
),
38 VCPU_STAT("wfi_exit_stat", wfi_exit_stat
),
39 VCPU_STAT("mmio_exit_user", mmio_exit_user
),
40 VCPU_STAT("mmio_exit_kernel", mmio_exit_kernel
),
41 VCPU_STAT("exits", exits
),
42 VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns
),
43 VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns
),
47 static bool core_reg_offset_is_vreg(u64 off
)
49 return off
>= KVM_REG_ARM_CORE_REG(fp_regs
.vregs
) &&
50 off
< KVM_REG_ARM_CORE_REG(fp_regs
.fpsr
);
53 static u64
core_reg_offset_from_id(u64 id
)
55 return id
& ~(KVM_REG_ARCH_MASK
| KVM_REG_SIZE_MASK
| KVM_REG_ARM_CORE
);
58 static int core_reg_size_from_offset(const struct kvm_vcpu
*vcpu
, u64 off
)
63 case KVM_REG_ARM_CORE_REG(regs
.regs
[0]) ...
64 KVM_REG_ARM_CORE_REG(regs
.regs
[30]):
65 case KVM_REG_ARM_CORE_REG(regs
.sp
):
66 case KVM_REG_ARM_CORE_REG(regs
.pc
):
67 case KVM_REG_ARM_CORE_REG(regs
.pstate
):
68 case KVM_REG_ARM_CORE_REG(sp_el1
):
69 case KVM_REG_ARM_CORE_REG(elr_el1
):
70 case KVM_REG_ARM_CORE_REG(spsr
[0]) ...
71 KVM_REG_ARM_CORE_REG(spsr
[KVM_NR_SPSR
- 1]):
75 case KVM_REG_ARM_CORE_REG(fp_regs
.vregs
[0]) ...
76 KVM_REG_ARM_CORE_REG(fp_regs
.vregs
[31]):
77 size
= sizeof(__uint128_t
);
80 case KVM_REG_ARM_CORE_REG(fp_regs
.fpsr
):
81 case KVM_REG_ARM_CORE_REG(fp_regs
.fpcr
):
89 if (!IS_ALIGNED(off
, size
/ sizeof(__u32
)))
93 * The KVM_REG_ARM64_SVE regs must be used instead of
94 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
97 if (vcpu_has_sve(vcpu
) && core_reg_offset_is_vreg(off
))
103 static void *core_reg_addr(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
105 u64 off
= core_reg_offset_from_id(reg
->id
);
106 int size
= core_reg_size_from_offset(vcpu
, off
);
111 if (KVM_REG_SIZE(reg
->id
) != size
)
115 case KVM_REG_ARM_CORE_REG(regs
.regs
[0]) ...
116 KVM_REG_ARM_CORE_REG(regs
.regs
[30]):
117 off
-= KVM_REG_ARM_CORE_REG(regs
.regs
[0]);
119 return &vcpu
->arch
.ctxt
.regs
.regs
[off
];
121 case KVM_REG_ARM_CORE_REG(regs
.sp
):
122 return &vcpu
->arch
.ctxt
.regs
.sp
;
124 case KVM_REG_ARM_CORE_REG(regs
.pc
):
125 return &vcpu
->arch
.ctxt
.regs
.pc
;
127 case KVM_REG_ARM_CORE_REG(regs
.pstate
):
128 return &vcpu
->arch
.ctxt
.regs
.pstate
;
130 case KVM_REG_ARM_CORE_REG(sp_el1
):
131 return __ctxt_sys_reg(&vcpu
->arch
.ctxt
, SP_EL1
);
133 case KVM_REG_ARM_CORE_REG(elr_el1
):
134 return __ctxt_sys_reg(&vcpu
->arch
.ctxt
, ELR_EL1
);
136 case KVM_REG_ARM_CORE_REG(spsr
[KVM_SPSR_EL1
]):
137 return __ctxt_sys_reg(&vcpu
->arch
.ctxt
, SPSR_EL1
);
139 case KVM_REG_ARM_CORE_REG(spsr
[KVM_SPSR_ABT
]):
140 return &vcpu
->arch
.ctxt
.spsr_abt
;
142 case KVM_REG_ARM_CORE_REG(spsr
[KVM_SPSR_UND
]):
143 return &vcpu
->arch
.ctxt
.spsr_und
;
145 case KVM_REG_ARM_CORE_REG(spsr
[KVM_SPSR_IRQ
]):
146 return &vcpu
->arch
.ctxt
.spsr_irq
;
148 case KVM_REG_ARM_CORE_REG(spsr
[KVM_SPSR_FIQ
]):
149 return &vcpu
->arch
.ctxt
.spsr_fiq
;
151 case KVM_REG_ARM_CORE_REG(fp_regs
.vregs
[0]) ...
152 KVM_REG_ARM_CORE_REG(fp_regs
.vregs
[31]):
153 off
-= KVM_REG_ARM_CORE_REG(fp_regs
.vregs
[0]);
155 return &vcpu
->arch
.ctxt
.fp_regs
.vregs
[off
];
157 case KVM_REG_ARM_CORE_REG(fp_regs
.fpsr
):
158 return &vcpu
->arch
.ctxt
.fp_regs
.fpsr
;
160 case KVM_REG_ARM_CORE_REG(fp_regs
.fpcr
):
161 return &vcpu
->arch
.ctxt
.fp_regs
.fpcr
;
168 static int get_core_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
171 * Because the kvm_regs structure is a mix of 32, 64 and
172 * 128bit fields, we index it as if it was a 32bit
173 * array. Hence below, nr_regs is the number of entries, and
174 * off the index in the "array".
176 __u32 __user
*uaddr
= (__u32 __user
*)(unsigned long)reg
->addr
;
177 int nr_regs
= sizeof(struct kvm_regs
) / sizeof(__u32
);
181 /* Our ID is an index into the kvm_regs struct. */
182 off
= core_reg_offset_from_id(reg
->id
);
183 if (off
>= nr_regs
||
184 (off
+ (KVM_REG_SIZE(reg
->id
) / sizeof(__u32
))) >= nr_regs
)
187 addr
= core_reg_addr(vcpu
, reg
);
191 if (copy_to_user(uaddr
, addr
, KVM_REG_SIZE(reg
->id
)))
197 static int set_core_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
199 __u32 __user
*uaddr
= (__u32 __user
*)(unsigned long)reg
->addr
;
200 int nr_regs
= sizeof(struct kvm_regs
) / sizeof(__u32
);
202 void *valp
= &tmp
, *addr
;
206 /* Our ID is an index into the kvm_regs struct. */
207 off
= core_reg_offset_from_id(reg
->id
);
208 if (off
>= nr_regs
||
209 (off
+ (KVM_REG_SIZE(reg
->id
) / sizeof(__u32
))) >= nr_regs
)
212 addr
= core_reg_addr(vcpu
, reg
);
216 if (KVM_REG_SIZE(reg
->id
) > sizeof(tmp
))
219 if (copy_from_user(valp
, uaddr
, KVM_REG_SIZE(reg
->id
))) {
224 if (off
== KVM_REG_ARM_CORE_REG(regs
.pstate
)) {
225 u64 mode
= (*(u64
*)valp
) & PSR_AA32_MODE_MASK
;
227 case PSR_AA32_MODE_USR
:
228 if (!system_supports_32bit_el0())
231 case PSR_AA32_MODE_FIQ
:
232 case PSR_AA32_MODE_IRQ
:
233 case PSR_AA32_MODE_SVC
:
234 case PSR_AA32_MODE_ABT
:
235 case PSR_AA32_MODE_UND
:
236 if (!vcpu_el1_is_32bit(vcpu
))
242 if (vcpu_el1_is_32bit(vcpu
))
251 memcpy(addr
, valp
, KVM_REG_SIZE(reg
->id
));
253 if (*vcpu_cpsr(vcpu
) & PSR_MODE32_BIT
) {
256 switch (*vcpu_cpsr(vcpu
)) {
258 * Either we are dealing with user mode, and only the
259 * first 15 registers (+ PC) must be narrowed to 32bit.
260 * AArch32 r0-r14 conveniently map to AArch64 x0-x14.
262 case PSR_AA32_MODE_USR
:
263 case PSR_AA32_MODE_SYS
:
268 * Otherwide, this is a priviledged mode, and *all* the
269 * registers must be narrowed to 32bit.
276 for (i
= 0; i
< nr_reg
; i
++)
277 vcpu_set_reg(vcpu
, i
, (u32
)vcpu_get_reg(vcpu
, i
));
279 *vcpu_pc(vcpu
) = (u32
)*vcpu_pc(vcpu
);
285 #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
286 #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
287 #define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq)))
289 static int get_sve_vls(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
291 unsigned int max_vq
, vq
;
292 u64 vqs
[KVM_ARM64_SVE_VLS_WORDS
];
294 if (!vcpu_has_sve(vcpu
))
297 if (WARN_ON(!sve_vl_valid(vcpu
->arch
.sve_max_vl
)))
300 memset(vqs
, 0, sizeof(vqs
));
302 max_vq
= sve_vq_from_vl(vcpu
->arch
.sve_max_vl
);
303 for (vq
= SVE_VQ_MIN
; vq
<= max_vq
; ++vq
)
304 if (sve_vq_available(vq
))
305 vqs
[vq_word(vq
)] |= vq_mask(vq
);
307 if (copy_to_user((void __user
*)reg
->addr
, vqs
, sizeof(vqs
)))
313 static int set_sve_vls(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
315 unsigned int max_vq
, vq
;
316 u64 vqs
[KVM_ARM64_SVE_VLS_WORDS
];
318 if (!vcpu_has_sve(vcpu
))
321 if (kvm_arm_vcpu_sve_finalized(vcpu
))
322 return -EPERM
; /* too late! */
324 if (WARN_ON(vcpu
->arch
.sve_state
))
327 if (copy_from_user(vqs
, (const void __user
*)reg
->addr
, sizeof(vqs
)))
331 for (vq
= SVE_VQ_MIN
; vq
<= SVE_VQ_MAX
; ++vq
)
332 if (vq_present(vqs
, vq
))
335 if (max_vq
> sve_vq_from_vl(kvm_sve_max_vl
))
339 * Vector lengths supported by the host can't currently be
340 * hidden from the guest individually: instead we can only set a
341 * maximum via ZCR_EL2.LEN. So, make sure the available vector
342 * lengths match the set requested exactly up to the requested
345 for (vq
= SVE_VQ_MIN
; vq
<= max_vq
; ++vq
)
346 if (vq_present(vqs
, vq
) != sve_vq_available(vq
))
349 /* Can't run with no vector lengths at all: */
350 if (max_vq
< SVE_VQ_MIN
)
353 /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
354 vcpu
->arch
.sve_max_vl
= sve_vl_from_vq(max_vq
);
359 #define SVE_REG_SLICE_SHIFT 0
360 #define SVE_REG_SLICE_BITS 5
361 #define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS)
362 #define SVE_REG_ID_BITS 5
364 #define SVE_REG_SLICE_MASK \
365 GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \
367 #define SVE_REG_ID_MASK \
368 GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT)
370 #define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS)
372 #define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0))
373 #define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
376 * Number of register slices required to cover each whole SVE register.
377 * NOTE: Only the first slice every exists, for now.
378 * If you are tempted to modify this, you must also rework sve_reg_to_region()
381 #define vcpu_sve_slices(vcpu) 1
383 /* Bounds of a single SVE register slice within vcpu->arch.sve_state */
384 struct sve_state_reg_region
{
385 unsigned int koffset
; /* offset into sve_state in kernel memory */
386 unsigned int klen
; /* length in kernel memory */
387 unsigned int upad
; /* extra trailing padding in user memory */
391 * Validate SVE register ID and get sanitised bounds for user/kernel SVE
394 static int sve_reg_to_region(struct sve_state_reg_region
*region
,
395 struct kvm_vcpu
*vcpu
,
396 const struct kvm_one_reg
*reg
)
398 /* reg ID ranges for Z- registers */
399 const u64 zreg_id_min
= KVM_REG_ARM64_SVE_ZREG(0, 0);
400 const u64 zreg_id_max
= KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS
- 1,
403 /* reg ID ranges for P- registers and FFR (which are contiguous) */
404 const u64 preg_id_min
= KVM_REG_ARM64_SVE_PREG(0, 0);
405 const u64 preg_id_max
= KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES
- 1);
408 unsigned int reg_num
;
410 unsigned int reqoffset
, reqlen
; /* User-requested offset and length */
411 unsigned int maxlen
; /* Maximum permitted length */
413 size_t sve_state_size
;
415 const u64 last_preg_id
= KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS
- 1,
418 /* Verify that the P-regs and FFR really do have contiguous IDs: */
419 BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id
+ 1);
421 /* Verify that we match the UAPI header: */
422 BUILD_BUG_ON(SVE_NUM_SLICES
!= KVM_ARM64_SVE_MAX_SLICES
);
424 reg_num
= (reg
->id
& SVE_REG_ID_MASK
) >> SVE_REG_ID_SHIFT
;
426 if (reg
->id
>= zreg_id_min
&& reg
->id
<= zreg_id_max
) {
427 if (!vcpu_has_sve(vcpu
) || (reg
->id
& SVE_REG_SLICE_MASK
) > 0)
430 vq
= sve_vq_from_vl(vcpu
->arch
.sve_max_vl
);
432 reqoffset
= SVE_SIG_ZREG_OFFSET(vq
, reg_num
) -
434 reqlen
= KVM_SVE_ZREG_SIZE
;
435 maxlen
= SVE_SIG_ZREG_SIZE(vq
);
436 } else if (reg
->id
>= preg_id_min
&& reg
->id
<= preg_id_max
) {
437 if (!vcpu_has_sve(vcpu
) || (reg
->id
& SVE_REG_SLICE_MASK
) > 0)
440 vq
= sve_vq_from_vl(vcpu
->arch
.sve_max_vl
);
442 reqoffset
= SVE_SIG_PREG_OFFSET(vq
, reg_num
) -
444 reqlen
= KVM_SVE_PREG_SIZE
;
445 maxlen
= SVE_SIG_PREG_SIZE(vq
);
450 sve_state_size
= vcpu_sve_state_size(vcpu
);
451 if (WARN_ON(!sve_state_size
))
454 region
->koffset
= array_index_nospec(reqoffset
, sve_state_size
);
455 region
->klen
= min(maxlen
, reqlen
);
456 region
->upad
= reqlen
- region
->klen
;
461 static int get_sve_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
464 struct sve_state_reg_region region
;
465 char __user
*uptr
= (char __user
*)reg
->addr
;
467 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
468 if (reg
->id
== KVM_REG_ARM64_SVE_VLS
)
469 return get_sve_vls(vcpu
, reg
);
471 /* Try to interpret reg ID as an architectural SVE register... */
472 ret
= sve_reg_to_region(®ion
, vcpu
, reg
);
476 if (!kvm_arm_vcpu_sve_finalized(vcpu
))
479 if (copy_to_user(uptr
, vcpu
->arch
.sve_state
+ region
.koffset
,
481 clear_user(uptr
+ region
.klen
, region
.upad
))
487 static int set_sve_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
490 struct sve_state_reg_region region
;
491 const char __user
*uptr
= (const char __user
*)reg
->addr
;
493 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
494 if (reg
->id
== KVM_REG_ARM64_SVE_VLS
)
495 return set_sve_vls(vcpu
, reg
);
497 /* Try to interpret reg ID as an architectural SVE register... */
498 ret
= sve_reg_to_region(®ion
, vcpu
, reg
);
502 if (!kvm_arm_vcpu_sve_finalized(vcpu
))
505 if (copy_from_user(vcpu
->arch
.sve_state
+ region
.koffset
, uptr
,
512 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
517 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
522 static int copy_core_reg_indices(const struct kvm_vcpu
*vcpu
,
523 u64 __user
*uindices
)
528 for (i
= 0; i
< sizeof(struct kvm_regs
) / sizeof(__u32
); i
++) {
529 u64 reg
= KVM_REG_ARM64
| KVM_REG_ARM_CORE
| i
;
530 int size
= core_reg_size_from_offset(vcpu
, i
);
537 reg
|= KVM_REG_SIZE_U32
;
541 reg
|= KVM_REG_SIZE_U64
;
544 case sizeof(__uint128_t
):
545 reg
|= KVM_REG_SIZE_U128
;
554 if (put_user(reg
, uindices
))
565 static unsigned long num_core_regs(const struct kvm_vcpu
*vcpu
)
567 return copy_core_reg_indices(vcpu
, NULL
);
571 * ARM64 versions of the TIMER registers, always available on arm64
574 #define NUM_TIMER_REGS 3
576 static bool is_timer_reg(u64 index
)
579 case KVM_REG_ARM_TIMER_CTL
:
580 case KVM_REG_ARM_TIMER_CNT
:
581 case KVM_REG_ARM_TIMER_CVAL
:
587 static int copy_timer_indices(struct kvm_vcpu
*vcpu
, u64 __user
*uindices
)
589 if (put_user(KVM_REG_ARM_TIMER_CTL
, uindices
))
592 if (put_user(KVM_REG_ARM_TIMER_CNT
, uindices
))
595 if (put_user(KVM_REG_ARM_TIMER_CVAL
, uindices
))
601 static int set_timer_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
603 void __user
*uaddr
= (void __user
*)(long)reg
->addr
;
607 ret
= copy_from_user(&val
, uaddr
, KVM_REG_SIZE(reg
->id
));
611 return kvm_arm_timer_set_reg(vcpu
, reg
->id
, val
);
614 static int get_timer_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
616 void __user
*uaddr
= (void __user
*)(long)reg
->addr
;
619 val
= kvm_arm_timer_get_reg(vcpu
, reg
->id
);
620 return copy_to_user(uaddr
, &val
, KVM_REG_SIZE(reg
->id
)) ? -EFAULT
: 0;
623 static unsigned long num_sve_regs(const struct kvm_vcpu
*vcpu
)
625 const unsigned int slices
= vcpu_sve_slices(vcpu
);
627 if (!vcpu_has_sve(vcpu
))
630 /* Policed by KVM_GET_REG_LIST: */
631 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu
));
633 return slices
* (SVE_NUM_PREGS
+ SVE_NUM_ZREGS
+ 1 /* FFR */)
634 + 1; /* KVM_REG_ARM64_SVE_VLS */
637 static int copy_sve_reg_indices(const struct kvm_vcpu
*vcpu
,
638 u64 __user
*uindices
)
640 const unsigned int slices
= vcpu_sve_slices(vcpu
);
645 if (!vcpu_has_sve(vcpu
))
648 /* Policed by KVM_GET_REG_LIST: */
649 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu
));
652 * Enumerate this first, so that userspace can save/restore in
653 * the order reported by KVM_GET_REG_LIST:
655 reg
= KVM_REG_ARM64_SVE_VLS
;
656 if (put_user(reg
, uindices
++))
660 for (i
= 0; i
< slices
; i
++) {
661 for (n
= 0; n
< SVE_NUM_ZREGS
; n
++) {
662 reg
= KVM_REG_ARM64_SVE_ZREG(n
, i
);
663 if (put_user(reg
, uindices
++))
668 for (n
= 0; n
< SVE_NUM_PREGS
; n
++) {
669 reg
= KVM_REG_ARM64_SVE_PREG(n
, i
);
670 if (put_user(reg
, uindices
++))
675 reg
= KVM_REG_ARM64_SVE_FFR(i
);
676 if (put_user(reg
, uindices
++))
685 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
687 * This is for all registers.
689 unsigned long kvm_arm_num_regs(struct kvm_vcpu
*vcpu
)
691 unsigned long res
= 0;
693 res
+= num_core_regs(vcpu
);
694 res
+= num_sve_regs(vcpu
);
695 res
+= kvm_arm_num_sys_reg_descs(vcpu
);
696 res
+= kvm_arm_get_fw_num_regs(vcpu
);
697 res
+= NUM_TIMER_REGS
;
703 * kvm_arm_copy_reg_indices - get indices of all registers.
705 * We do core registers right here, then we append system regs.
707 int kvm_arm_copy_reg_indices(struct kvm_vcpu
*vcpu
, u64 __user
*uindices
)
711 ret
= copy_core_reg_indices(vcpu
, uindices
);
716 ret
= copy_sve_reg_indices(vcpu
, uindices
);
721 ret
= kvm_arm_copy_fw_reg_indices(vcpu
, uindices
);
724 uindices
+= kvm_arm_get_fw_num_regs(vcpu
);
726 ret
= copy_timer_indices(vcpu
, uindices
);
729 uindices
+= NUM_TIMER_REGS
;
731 return kvm_arm_copy_sys_reg_indices(vcpu
, uindices
);
734 int kvm_arm_get_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
736 /* We currently use nothing arch-specific in upper 32 bits */
737 if ((reg
->id
& ~KVM_REG_SIZE_MASK
) >> 32 != KVM_REG_ARM64
>> 32)
740 switch (reg
->id
& KVM_REG_ARM_COPROC_MASK
) {
741 case KVM_REG_ARM_CORE
: return get_core_reg(vcpu
, reg
);
742 case KVM_REG_ARM_FW
: return kvm_arm_get_fw_reg(vcpu
, reg
);
743 case KVM_REG_ARM64_SVE
: return get_sve_reg(vcpu
, reg
);
746 if (is_timer_reg(reg
->id
))
747 return get_timer_reg(vcpu
, reg
);
749 return kvm_arm_sys_reg_get_reg(vcpu
, reg
);
752 int kvm_arm_set_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
754 /* We currently use nothing arch-specific in upper 32 bits */
755 if ((reg
->id
& ~KVM_REG_SIZE_MASK
) >> 32 != KVM_REG_ARM64
>> 32)
758 switch (reg
->id
& KVM_REG_ARM_COPROC_MASK
) {
759 case KVM_REG_ARM_CORE
: return set_core_reg(vcpu
, reg
);
760 case KVM_REG_ARM_FW
: return kvm_arm_set_fw_reg(vcpu
, reg
);
761 case KVM_REG_ARM64_SVE
: return set_sve_reg(vcpu
, reg
);
764 if (is_timer_reg(reg
->id
))
765 return set_timer_reg(vcpu
, reg
);
767 return kvm_arm_sys_reg_set_reg(vcpu
, reg
);
770 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
771 struct kvm_sregs
*sregs
)
776 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
777 struct kvm_sregs
*sregs
)
782 int __kvm_arm_vcpu_get_events(struct kvm_vcpu
*vcpu
,
783 struct kvm_vcpu_events
*events
)
785 events
->exception
.serror_pending
= !!(vcpu
->arch
.hcr_el2
& HCR_VSE
);
786 events
->exception
.serror_has_esr
= cpus_have_const_cap(ARM64_HAS_RAS_EXTN
);
788 if (events
->exception
.serror_pending
&& events
->exception
.serror_has_esr
)
789 events
->exception
.serror_esr
= vcpu_get_vsesr(vcpu
);
792 * We never return a pending ext_dabt here because we deliver it to
793 * the virtual CPU directly when setting the event and it's no longer
794 * 'pending' at this point.
800 int __kvm_arm_vcpu_set_events(struct kvm_vcpu
*vcpu
,
801 struct kvm_vcpu_events
*events
)
803 bool serror_pending
= events
->exception
.serror_pending
;
804 bool has_esr
= events
->exception
.serror_has_esr
;
805 bool ext_dabt_pending
= events
->exception
.ext_dabt_pending
;
807 if (serror_pending
&& has_esr
) {
808 if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN
))
811 if (!((events
->exception
.serror_esr
) & ~ESR_ELx_ISS_MASK
))
812 kvm_set_sei_esr(vcpu
, events
->exception
.serror_esr
);
815 } else if (serror_pending
) {
816 kvm_inject_vabt(vcpu
);
819 if (ext_dabt_pending
)
820 kvm_inject_dabt(vcpu
, kvm_vcpu_get_hfar(vcpu
));
825 int __attribute_const__
kvm_target_cpu(void)
827 unsigned long implementor
= read_cpuid_implementor();
828 unsigned long part_number
= read_cpuid_part_number();
830 switch (implementor
) {
831 case ARM_CPU_IMP_ARM
:
832 switch (part_number
) {
833 case ARM_CPU_PART_AEM_V8
:
834 return KVM_ARM_TARGET_AEM_V8
;
835 case ARM_CPU_PART_FOUNDATION
:
836 return KVM_ARM_TARGET_FOUNDATION_V8
;
837 case ARM_CPU_PART_CORTEX_A53
:
838 return KVM_ARM_TARGET_CORTEX_A53
;
839 case ARM_CPU_PART_CORTEX_A57
:
840 return KVM_ARM_TARGET_CORTEX_A57
;
843 case ARM_CPU_IMP_APM
:
844 switch (part_number
) {
845 case APM_CPU_PART_POTENZA
:
846 return KVM_ARM_TARGET_XGENE_POTENZA
;
851 /* Return a default generic target */
852 return KVM_ARM_TARGET_GENERIC_V8
;
855 int kvm_vcpu_preferred_target(struct kvm_vcpu_init
*init
)
857 int target
= kvm_target_cpu();
862 memset(init
, 0, sizeof(*init
));
865 * For now, we don't return any features.
866 * In future, we might use features to return target
867 * specific features available for the preferred
870 init
->target
= (__u32
)target
;
875 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
880 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
885 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
886 struct kvm_translation
*tr
)
891 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
892 KVM_GUESTDBG_USE_SW_BP | \
893 KVM_GUESTDBG_USE_HW | \
894 KVM_GUESTDBG_SINGLESTEP)
897 * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
898 * @kvm: pointer to the KVM struct
899 * @kvm_guest_debug: the ioctl data buffer
901 * This sets up and enables the VM for guest debugging. Userspace
902 * passes in a control flag to enable different debug types and
903 * potentially other architecture specific information in the rest of
906 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
907 struct kvm_guest_debug
*dbg
)
911 trace_kvm_set_guest_debug(vcpu
, dbg
->control
);
913 if (dbg
->control
& ~KVM_GUESTDBG_VALID_MASK
) {
918 if (dbg
->control
& KVM_GUESTDBG_ENABLE
) {
919 vcpu
->guest_debug
= dbg
->control
;
921 /* Hardware assisted Break and Watch points */
922 if (vcpu
->guest_debug
& KVM_GUESTDBG_USE_HW
) {
923 vcpu
->arch
.external_debug_state
= dbg
->arch
;
927 /* If not enabled clear all flags */
928 vcpu
->guest_debug
= 0;
935 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu
*vcpu
,
936 struct kvm_device_attr
*attr
)
940 switch (attr
->group
) {
941 case KVM_ARM_VCPU_PMU_V3_CTRL
:
942 ret
= kvm_arm_pmu_v3_set_attr(vcpu
, attr
);
944 case KVM_ARM_VCPU_TIMER_CTRL
:
945 ret
= kvm_arm_timer_set_attr(vcpu
, attr
);
947 case KVM_ARM_VCPU_PVTIME_CTRL
:
948 ret
= kvm_arm_pvtime_set_attr(vcpu
, attr
);
958 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu
*vcpu
,
959 struct kvm_device_attr
*attr
)
963 switch (attr
->group
) {
964 case KVM_ARM_VCPU_PMU_V3_CTRL
:
965 ret
= kvm_arm_pmu_v3_get_attr(vcpu
, attr
);
967 case KVM_ARM_VCPU_TIMER_CTRL
:
968 ret
= kvm_arm_timer_get_attr(vcpu
, attr
);
970 case KVM_ARM_VCPU_PVTIME_CTRL
:
971 ret
= kvm_arm_pvtime_get_attr(vcpu
, attr
);
981 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu
*vcpu
,
982 struct kvm_device_attr
*attr
)
986 switch (attr
->group
) {
987 case KVM_ARM_VCPU_PMU_V3_CTRL
:
988 ret
= kvm_arm_pmu_v3_has_attr(vcpu
, attr
);
990 case KVM_ARM_VCPU_TIMER_CTRL
:
991 ret
= kvm_arm_timer_has_attr(vcpu
, attr
);
993 case KVM_ARM_VCPU_PVTIME_CTRL
:
994 ret
= kvm_arm_pvtime_has_attr(vcpu
, attr
);