4 * Copyright (C) 2015 ARM Ltd.
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 #include <linux/kvm_host.h>
17 #include <kvm/arm_vgic.h>
18 #include <linux/uaccess.h>
19 #include <asm/kvm_mmu.h>
20 #include <asm/cputype.h>
25 int vgic_check_ioaddr(struct kvm
*kvm
, phys_addr_t
*ioaddr
,
26 phys_addr_t addr
, phys_addr_t alignment
)
28 if (addr
& ~KVM_PHYS_MASK
)
31 if (!IS_ALIGNED(addr
, alignment
))
34 if (!IS_VGIC_ADDR_UNDEF(*ioaddr
))
40 static int vgic_check_type(struct kvm
*kvm
, int type_needed
)
42 if (kvm
->arch
.vgic
.vgic_model
!= type_needed
)
49 * kvm_vgic_addr - set or get vgic VM base addresses
50 * @kvm: pointer to the vm struct
51 * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
52 * @addr: pointer to address value
53 * @write: if true set the address in the VM address space, if false read the
56 * Set or get the vgic base addresses for the distributor and the virtual CPU
57 * interface in the VM physical address space. These addresses are properties
58 * of the emulated core/SoC and therefore user space initially knows this
60 * Check them for sanity (alignment, double assignment). We can't check for
61 * overlapping regions in case of a virtual GICv3 here, since we don't know
62 * the number of VCPUs yet, so we defer this check to map_resources().
64 int kvm_vgic_addr(struct kvm
*kvm
, unsigned long type
, u64
*addr
, bool write
)
67 struct vgic_dist
*vgic
= &kvm
->arch
.vgic
;
68 phys_addr_t
*addr_ptr
, alignment
;
70 mutex_lock(&kvm
->lock
);
72 case KVM_VGIC_V2_ADDR_TYPE_DIST
:
73 r
= vgic_check_type(kvm
, KVM_DEV_TYPE_ARM_VGIC_V2
);
74 addr_ptr
= &vgic
->vgic_dist_base
;
77 case KVM_VGIC_V2_ADDR_TYPE_CPU
:
78 r
= vgic_check_type(kvm
, KVM_DEV_TYPE_ARM_VGIC_V2
);
79 addr_ptr
= &vgic
->vgic_cpu_base
;
82 case KVM_VGIC_V3_ADDR_TYPE_DIST
:
83 r
= vgic_check_type(kvm
, KVM_DEV_TYPE_ARM_VGIC_V3
);
84 addr_ptr
= &vgic
->vgic_dist_base
;
87 case KVM_VGIC_V3_ADDR_TYPE_REDIST
:
88 r
= vgic_check_type(kvm
, KVM_DEV_TYPE_ARM_VGIC_V3
);
92 r
= vgic_v3_set_redist_base(kvm
, *addr
);
95 addr_ptr
= &vgic
->vgic_redist_base
;
105 r
= vgic_check_ioaddr(kvm
, addr_ptr
, *addr
, alignment
);
113 mutex_unlock(&kvm
->lock
);
117 static int vgic_set_common_attr(struct kvm_device
*dev
,
118 struct kvm_device_attr
*attr
)
122 switch (attr
->group
) {
123 case KVM_DEV_ARM_VGIC_GRP_ADDR
: {
124 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
126 unsigned long type
= (unsigned long)attr
->attr
;
128 if (copy_from_user(&addr
, uaddr
, sizeof(addr
)))
131 r
= kvm_vgic_addr(dev
->kvm
, type
, &addr
, true);
132 return (r
== -ENODEV
) ? -ENXIO
: r
;
134 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS
: {
135 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
139 if (get_user(val
, uaddr
))
144 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
145 * - at most 1024 interrupts
146 * - a multiple of 32 interrupts
148 if (val
< (VGIC_NR_PRIVATE_IRQS
+ 32) ||
149 val
> VGIC_MAX_RESERVED
||
153 mutex_lock(&dev
->kvm
->lock
);
155 if (vgic_ready(dev
->kvm
) || dev
->kvm
->arch
.vgic
.nr_spis
)
158 dev
->kvm
->arch
.vgic
.nr_spis
=
159 val
- VGIC_NR_PRIVATE_IRQS
;
161 mutex_unlock(&dev
->kvm
->lock
);
165 case KVM_DEV_ARM_VGIC_GRP_CTRL
: {
166 switch (attr
->attr
) {
167 case KVM_DEV_ARM_VGIC_CTRL_INIT
:
168 mutex_lock(&dev
->kvm
->lock
);
169 r
= vgic_init(dev
->kvm
);
170 mutex_unlock(&dev
->kvm
->lock
);
180 static int vgic_get_common_attr(struct kvm_device
*dev
,
181 struct kvm_device_attr
*attr
)
185 switch (attr
->group
) {
186 case KVM_DEV_ARM_VGIC_GRP_ADDR
: {
187 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
189 unsigned long type
= (unsigned long)attr
->attr
;
191 r
= kvm_vgic_addr(dev
->kvm
, type
, &addr
, false);
193 return (r
== -ENODEV
) ? -ENXIO
: r
;
195 if (copy_to_user(uaddr
, &addr
, sizeof(addr
)))
199 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS
: {
200 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
202 r
= put_user(dev
->kvm
->arch
.vgic
.nr_spis
+
203 VGIC_NR_PRIVATE_IRQS
, uaddr
);
211 static int vgic_create(struct kvm_device
*dev
, u32 type
)
213 return kvm_vgic_create(dev
->kvm
, type
);
216 static void vgic_destroy(struct kvm_device
*dev
)
221 int kvm_register_vgic_device(unsigned long type
)
226 case KVM_DEV_TYPE_ARM_VGIC_V2
:
227 ret
= kvm_register_device_ops(&kvm_arm_vgic_v2_ops
,
228 KVM_DEV_TYPE_ARM_VGIC_V2
);
230 case KVM_DEV_TYPE_ARM_VGIC_V3
:
231 ret
= kvm_register_device_ops(&kvm_arm_vgic_v3_ops
,
232 KVM_DEV_TYPE_ARM_VGIC_V3
);
236 ret
= kvm_vgic_register_its_device();
243 int vgic_v2_parse_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
,
244 struct vgic_reg_attr
*reg_attr
)
248 cpuid
= (attr
->attr
& KVM_DEV_ARM_VGIC_CPUID_MASK
) >>
249 KVM_DEV_ARM_VGIC_CPUID_SHIFT
;
251 if (cpuid
>= atomic_read(&dev
->kvm
->online_vcpus
))
254 reg_attr
->vcpu
= kvm_get_vcpu(dev
->kvm
, cpuid
);
255 reg_attr
->addr
= attr
->attr
& KVM_DEV_ARM_VGIC_OFFSET_MASK
;
260 /* unlocks vcpus from @vcpu_lock_idx and smaller */
261 static void unlock_vcpus(struct kvm
*kvm
, int vcpu_lock_idx
)
263 struct kvm_vcpu
*tmp_vcpu
;
265 for (; vcpu_lock_idx
>= 0; vcpu_lock_idx
--) {
266 tmp_vcpu
= kvm_get_vcpu(kvm
, vcpu_lock_idx
);
267 mutex_unlock(&tmp_vcpu
->mutex
);
271 void unlock_all_vcpus(struct kvm
*kvm
)
273 unlock_vcpus(kvm
, atomic_read(&kvm
->online_vcpus
) - 1);
276 /* Returns true if all vcpus were locked, false otherwise */
277 bool lock_all_vcpus(struct kvm
*kvm
)
279 struct kvm_vcpu
*tmp_vcpu
;
283 * Any time a vcpu is run, vcpu_load is called which tries to grab the
284 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
285 * that no other VCPUs are run and fiddle with the vgic state while we
288 kvm_for_each_vcpu(c
, tmp_vcpu
, kvm
) {
289 if (!mutex_trylock(&tmp_vcpu
->mutex
)) {
290 unlock_vcpus(kvm
, c
- 1);
299 * vgic_v2_attr_regs_access - allows user space to access VGIC v2 state
301 * @dev: kvm device handle
302 * @attr: kvm device attribute
303 * @reg: address the value is read or written
304 * @is_write: true if userspace is writing a register
306 static int vgic_v2_attr_regs_access(struct kvm_device
*dev
,
307 struct kvm_device_attr
*attr
,
308 u32
*reg
, bool is_write
)
310 struct vgic_reg_attr reg_attr
;
312 struct kvm_vcpu
*vcpu
;
315 ret
= vgic_v2_parse_attr(dev
, attr
, ®_attr
);
319 vcpu
= reg_attr
.vcpu
;
320 addr
= reg_attr
.addr
;
322 mutex_lock(&dev
->kvm
->lock
);
324 ret
= vgic_init(dev
->kvm
);
328 if (!lock_all_vcpus(dev
->kvm
)) {
333 switch (attr
->group
) {
334 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
:
335 ret
= vgic_v2_cpuif_uaccess(vcpu
, is_write
, addr
, reg
);
337 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
338 ret
= vgic_v2_dist_uaccess(vcpu
, is_write
, addr
, reg
);
345 unlock_all_vcpus(dev
->kvm
);
347 mutex_unlock(&dev
->kvm
->lock
);
351 static int vgic_v2_set_attr(struct kvm_device
*dev
,
352 struct kvm_device_attr
*attr
)
356 ret
= vgic_set_common_attr(dev
, attr
);
360 switch (attr
->group
) {
361 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
362 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
: {
363 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
366 if (get_user(reg
, uaddr
))
369 return vgic_v2_attr_regs_access(dev
, attr
, ®
, true);
376 static int vgic_v2_get_attr(struct kvm_device
*dev
,
377 struct kvm_device_attr
*attr
)
381 ret
= vgic_get_common_attr(dev
, attr
);
385 switch (attr
->group
) {
386 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
387 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
: {
388 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
391 ret
= vgic_v2_attr_regs_access(dev
, attr
, ®
, false);
394 return put_user(reg
, uaddr
);
401 static int vgic_v2_has_attr(struct kvm_device
*dev
,
402 struct kvm_device_attr
*attr
)
404 switch (attr
->group
) {
405 case KVM_DEV_ARM_VGIC_GRP_ADDR
:
406 switch (attr
->attr
) {
407 case KVM_VGIC_V2_ADDR_TYPE_DIST
:
408 case KVM_VGIC_V2_ADDR_TYPE_CPU
:
412 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
413 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
:
414 return vgic_v2_has_attr_regs(dev
, attr
);
415 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS
:
417 case KVM_DEV_ARM_VGIC_GRP_CTRL
:
418 switch (attr
->attr
) {
419 case KVM_DEV_ARM_VGIC_CTRL_INIT
:
426 struct kvm_device_ops kvm_arm_vgic_v2_ops
= {
427 .name
= "kvm-arm-vgic-v2",
428 .create
= vgic_create
,
429 .destroy
= vgic_destroy
,
430 .set_attr
= vgic_v2_set_attr
,
431 .get_attr
= vgic_v2_get_attr
,
432 .has_attr
= vgic_v2_has_attr
,
435 int vgic_v3_parse_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
,
436 struct vgic_reg_attr
*reg_attr
)
438 unsigned long vgic_mpidr
, mpidr_reg
;
441 * For KVM_DEV_ARM_VGIC_GRP_DIST_REGS group,
442 * attr might not hold MPIDR. Hence assume vcpu0.
444 if (attr
->group
!= KVM_DEV_ARM_VGIC_GRP_DIST_REGS
) {
445 vgic_mpidr
= (attr
->attr
& KVM_DEV_ARM_VGIC_V3_MPIDR_MASK
) >>
446 KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT
;
448 mpidr_reg
= VGIC_TO_MPIDR(vgic_mpidr
);
449 reg_attr
->vcpu
= kvm_mpidr_to_vcpu(dev
->kvm
, mpidr_reg
);
451 reg_attr
->vcpu
= kvm_get_vcpu(dev
->kvm
, 0);
457 reg_attr
->addr
= attr
->attr
& KVM_DEV_ARM_VGIC_OFFSET_MASK
;
463 * vgic_v3_attr_regs_access - allows user space to access VGIC v3 state
465 * @dev: kvm device handle
466 * @attr: kvm device attribute
467 * @reg: address the value is read or written
468 * @is_write: true if userspace is writing a register
470 static int vgic_v3_attr_regs_access(struct kvm_device
*dev
,
471 struct kvm_device_attr
*attr
,
472 u64
*reg
, bool is_write
)
474 struct vgic_reg_attr reg_attr
;
476 struct kvm_vcpu
*vcpu
;
480 ret
= vgic_v3_parse_attr(dev
, attr
, ®_attr
);
484 vcpu
= reg_attr
.vcpu
;
485 addr
= reg_attr
.addr
;
487 mutex_lock(&dev
->kvm
->lock
);
489 if (unlikely(!vgic_initialized(dev
->kvm
))) {
494 if (!lock_all_vcpus(dev
->kvm
)) {
499 switch (attr
->group
) {
500 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
504 ret
= vgic_v3_dist_uaccess(vcpu
, is_write
, addr
, &tmp32
);
508 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS
:
512 ret
= vgic_v3_redist_uaccess(vcpu
, is_write
, addr
, &tmp32
);
516 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS
: {
519 regid
= (attr
->attr
& KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK
);
520 ret
= vgic_v3_cpu_sysregs_uaccess(vcpu
, is_write
,
524 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO
: {
525 unsigned int info
, intid
;
527 info
= (attr
->attr
& KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK
) >>
528 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT
;
529 if (info
== VGIC_LEVEL_INFO_LINE_LEVEL
) {
531 KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK
;
532 ret
= vgic_v3_line_level_info_uaccess(vcpu
, is_write
,
544 unlock_all_vcpus(dev
->kvm
);
546 mutex_unlock(&dev
->kvm
->lock
);
550 static int vgic_v3_set_attr(struct kvm_device
*dev
,
551 struct kvm_device_attr
*attr
)
555 ret
= vgic_set_common_attr(dev
, attr
);
559 switch (attr
->group
) {
560 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
561 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS
: {
562 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
566 if (get_user(tmp32
, uaddr
))
570 return vgic_v3_attr_regs_access(dev
, attr
, ®
, true);
572 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS
: {
573 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
576 if (get_user(reg
, uaddr
))
579 return vgic_v3_attr_regs_access(dev
, attr
, ®
, true);
581 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO
: {
582 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
586 if (get_user(tmp32
, uaddr
))
590 return vgic_v3_attr_regs_access(dev
, attr
, ®
, true);
592 case KVM_DEV_ARM_VGIC_GRP_CTRL
: {
595 switch (attr
->attr
) {
596 case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES
:
597 mutex_lock(&dev
->kvm
->lock
);
599 if (!lock_all_vcpus(dev
->kvm
)) {
600 mutex_unlock(&dev
->kvm
->lock
);
603 ret
= vgic_v3_save_pending_tables(dev
->kvm
);
604 unlock_all_vcpus(dev
->kvm
);
605 mutex_unlock(&dev
->kvm
->lock
);
614 static int vgic_v3_get_attr(struct kvm_device
*dev
,
615 struct kvm_device_attr
*attr
)
619 ret
= vgic_get_common_attr(dev
, attr
);
623 switch (attr
->group
) {
624 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
625 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS
: {
626 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
630 ret
= vgic_v3_attr_regs_access(dev
, attr
, ®
, false);
634 return put_user(tmp32
, uaddr
);
636 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS
: {
637 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
640 ret
= vgic_v3_attr_regs_access(dev
, attr
, ®
, false);
643 return put_user(reg
, uaddr
);
645 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO
: {
646 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
650 ret
= vgic_v3_attr_regs_access(dev
, attr
, ®
, false);
654 return put_user(tmp32
, uaddr
);
660 static int vgic_v3_has_attr(struct kvm_device
*dev
,
661 struct kvm_device_attr
*attr
)
663 switch (attr
->group
) {
664 case KVM_DEV_ARM_VGIC_GRP_ADDR
:
665 switch (attr
->attr
) {
666 case KVM_VGIC_V3_ADDR_TYPE_DIST
:
667 case KVM_VGIC_V3_ADDR_TYPE_REDIST
:
671 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
672 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS
:
673 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS
:
674 return vgic_v3_has_attr_regs(dev
, attr
);
675 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS
:
677 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO
: {
678 if (((attr
->attr
& KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK
) >>
679 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT
) ==
680 VGIC_LEVEL_INFO_LINE_LEVEL
)
684 case KVM_DEV_ARM_VGIC_GRP_CTRL
:
685 switch (attr
->attr
) {
686 case KVM_DEV_ARM_VGIC_CTRL_INIT
:
688 case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES
:
695 struct kvm_device_ops kvm_arm_vgic_v3_ops
= {
696 .name
= "kvm-arm-vgic-v3",
697 .create
= vgic_create
,
698 .destroy
= vgic_destroy
,
699 .set_attr
= vgic_v3_set_attr
,
700 .get_attr
= vgic_v3_get_attr
,
701 .has_attr
= vgic_v3_has_attr
,