1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
7 #include <linux/errno.h>
9 #include <linux/kvm_host.h>
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
13 #include <kvm/arm_psci.h>
14 #include <asm/cputype.h>
15 #include <linux/uaccess.h>
17 #include <asm/kvm_emulate.h>
18 #include <asm/kvm_coproc.h>
20 #define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
21 #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
23 struct kvm_stats_debugfs_item debugfs_entries
[] = {
24 VCPU_STAT(halt_successful_poll
),
25 VCPU_STAT(halt_attempted_poll
),
26 VCPU_STAT(halt_poll_invalid
),
27 VCPU_STAT(halt_wakeup
),
28 VCPU_STAT(hvc_exit_stat
),
29 VCPU_STAT(wfe_exit_stat
),
30 VCPU_STAT(wfi_exit_stat
),
31 VCPU_STAT(mmio_exit_user
),
32 VCPU_STAT(mmio_exit_kernel
),
37 static u64
core_reg_offset_from_id(u64 id
)
39 return id
& ~(KVM_REG_ARCH_MASK
| KVM_REG_SIZE_MASK
| KVM_REG_ARM_CORE
);
42 static int get_core_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
44 u32 __user
*uaddr
= (u32 __user
*)(long)reg
->addr
;
45 struct kvm_regs
*regs
= &vcpu
->arch
.ctxt
.gp_regs
;
48 if (KVM_REG_SIZE(reg
->id
) != 4)
51 /* Our ID is an index into the kvm_regs struct. */
52 off
= core_reg_offset_from_id(reg
->id
);
53 if (off
>= sizeof(*regs
) / KVM_REG_SIZE(reg
->id
))
56 return put_user(((u32
*)regs
)[off
], uaddr
);
59 static int set_core_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
61 u32 __user
*uaddr
= (u32 __user
*)(long)reg
->addr
;
62 struct kvm_regs
*regs
= &vcpu
->arch
.ctxt
.gp_regs
;
65 if (KVM_REG_SIZE(reg
->id
) != 4)
68 /* Our ID is an index into the kvm_regs struct. */
69 off
= core_reg_offset_from_id(reg
->id
);
70 if (off
>= sizeof(*regs
) / KVM_REG_SIZE(reg
->id
))
73 if (get_user(val
, uaddr
) != 0)
76 if (off
== KVM_REG_ARM_CORE_REG(usr_regs
.ARM_cpsr
)) {
77 unsigned long mode
= val
& MODE_MASK
;
91 ((u32
*)regs
)[off
] = val
;
95 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
100 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
105 #define NUM_TIMER_REGS 3
107 static bool is_timer_reg(u64 index
)
110 case KVM_REG_ARM_TIMER_CTL
:
111 case KVM_REG_ARM_TIMER_CNT
:
112 case KVM_REG_ARM_TIMER_CVAL
:
118 static int copy_timer_indices(struct kvm_vcpu
*vcpu
, u64 __user
*uindices
)
120 if (put_user(KVM_REG_ARM_TIMER_CTL
, uindices
))
123 if (put_user(KVM_REG_ARM_TIMER_CNT
, uindices
))
126 if (put_user(KVM_REG_ARM_TIMER_CVAL
, uindices
))
132 static int set_timer_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
134 void __user
*uaddr
= (void __user
*)(long)reg
->addr
;
138 ret
= copy_from_user(&val
, uaddr
, KVM_REG_SIZE(reg
->id
));
142 return kvm_arm_timer_set_reg(vcpu
, reg
->id
, val
);
145 static int get_timer_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
147 void __user
*uaddr
= (void __user
*)(long)reg
->addr
;
150 val
= kvm_arm_timer_get_reg(vcpu
, reg
->id
);
151 return copy_to_user(uaddr
, &val
, KVM_REG_SIZE(reg
->id
)) ? -EFAULT
: 0;
154 static unsigned long num_core_regs(void)
156 return sizeof(struct kvm_regs
) / sizeof(u32
);
160 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
162 * This is for all registers.
164 unsigned long kvm_arm_num_regs(struct kvm_vcpu
*vcpu
)
166 return num_core_regs() + kvm_arm_num_coproc_regs(vcpu
)
167 + kvm_arm_get_fw_num_regs(vcpu
)
172 * kvm_arm_copy_reg_indices - get indices of all registers.
174 * We do core registers right here, then we append coproc regs.
176 int kvm_arm_copy_reg_indices(struct kvm_vcpu
*vcpu
, u64 __user
*uindices
)
179 const u64 core_reg
= KVM_REG_ARM
| KVM_REG_SIZE_U32
| KVM_REG_ARM_CORE
;
182 for (i
= 0; i
< sizeof(struct kvm_regs
)/sizeof(u32
); i
++) {
183 if (put_user(core_reg
| i
, uindices
))
188 ret
= kvm_arm_copy_fw_reg_indices(vcpu
, uindices
);
191 uindices
+= kvm_arm_get_fw_num_regs(vcpu
);
193 ret
= copy_timer_indices(vcpu
, uindices
);
196 uindices
+= NUM_TIMER_REGS
;
198 return kvm_arm_copy_coproc_indices(vcpu
, uindices
);
201 int kvm_arm_get_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
203 /* We currently use nothing arch-specific in upper 32 bits */
204 if ((reg
->id
& ~KVM_REG_SIZE_MASK
) >> 32 != KVM_REG_ARM
>> 32)
207 /* Register group 16 means we want a core register. */
208 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_CORE
)
209 return get_core_reg(vcpu
, reg
);
211 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_FW
)
212 return kvm_arm_get_fw_reg(vcpu
, reg
);
214 if (is_timer_reg(reg
->id
))
215 return get_timer_reg(vcpu
, reg
);
217 return kvm_arm_coproc_get_reg(vcpu
, reg
);
220 int kvm_arm_set_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
222 /* We currently use nothing arch-specific in upper 32 bits */
223 if ((reg
->id
& ~KVM_REG_SIZE_MASK
) >> 32 != KVM_REG_ARM
>> 32)
226 /* Register group 16 means we set a core register. */
227 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_CORE
)
228 return set_core_reg(vcpu
, reg
);
230 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_FW
)
231 return kvm_arm_set_fw_reg(vcpu
, reg
);
233 if (is_timer_reg(reg
->id
))
234 return set_timer_reg(vcpu
, reg
);
236 return kvm_arm_coproc_set_reg(vcpu
, reg
);
239 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
240 struct kvm_sregs
*sregs
)
245 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
246 struct kvm_sregs
*sregs
)
252 int __kvm_arm_vcpu_get_events(struct kvm_vcpu
*vcpu
,
253 struct kvm_vcpu_events
*events
)
255 events
->exception
.serror_pending
= !!(*vcpu_hcr(vcpu
) & HCR_VA
);
258 * We never return a pending ext_dabt here because we deliver it to
259 * the virtual CPU directly when setting the event and it's no longer
260 * 'pending' at this point.
266 int __kvm_arm_vcpu_set_events(struct kvm_vcpu
*vcpu
,
267 struct kvm_vcpu_events
*events
)
269 bool serror_pending
= events
->exception
.serror_pending
;
270 bool has_esr
= events
->exception
.serror_has_esr
;
271 bool ext_dabt_pending
= events
->exception
.ext_dabt_pending
;
273 if (serror_pending
&& has_esr
)
275 else if (serror_pending
)
276 kvm_inject_vabt(vcpu
);
278 if (ext_dabt_pending
)
279 kvm_inject_dabt(vcpu
, kvm_vcpu_get_hfar(vcpu
));
284 int __attribute_const__
kvm_target_cpu(void)
286 switch (read_cpuid_part()) {
287 case ARM_CPU_PART_CORTEX_A7
:
288 return KVM_ARM_TARGET_CORTEX_A7
;
289 case ARM_CPU_PART_CORTEX_A15
:
290 return KVM_ARM_TARGET_CORTEX_A15
;
296 int kvm_vcpu_preferred_target(struct kvm_vcpu_init
*init
)
298 int target
= kvm_target_cpu();
303 memset(init
, 0, sizeof(*init
));
306 * For now, we don't return any features.
307 * In future, we might use features to return target
308 * specific features available for the preferred
311 init
->target
= (__u32
)target
;
316 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
321 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
326 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
327 struct kvm_translation
*tr
)
332 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
333 struct kvm_guest_debug
*dbg
)
338 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu
*vcpu
,
339 struct kvm_device_attr
*attr
)
343 switch (attr
->group
) {
344 case KVM_ARM_VCPU_TIMER_CTRL
:
345 ret
= kvm_arm_timer_set_attr(vcpu
, attr
);
355 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu
*vcpu
,
356 struct kvm_device_attr
*attr
)
360 switch (attr
->group
) {
361 case KVM_ARM_VCPU_TIMER_CTRL
:
362 ret
= kvm_arm_timer_get_attr(vcpu
, attr
);
372 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu
*vcpu
,
373 struct kvm_device_attr
*attr
)
377 switch (attr
->group
) {
378 case KVM_ARM_VCPU_TIMER_CTRL
:
379 ret
= kvm_arm_timer_has_attr(vcpu
, attr
);