2 * Contains GICv2 specific emulation code, was in vgic.c before.
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/cpu.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/interrupt.h>
25 #include <linux/uaccess.h>
27 #include <linux/irqchip/arm-gic.h>
29 #include <asm/kvm_emulate.h>
30 #include <asm/kvm_arm.h>
31 #include <asm/kvm_mmu.h>
35 #define GICC_ARCH_VERSION_V2 0x2
37 static void vgic_dispatch_sgi(struct kvm_vcpu
*vcpu
, u32 reg
);
38 static u8
*vgic_get_sgi_sources(struct vgic_dist
*dist
, int vcpu_id
, int sgi
)
40 return dist
->irq_sgi_sources
+ vcpu_id
* VGIC_NR_SGIS
+ sgi
;
43 static bool handle_mmio_misc(struct kvm_vcpu
*vcpu
,
44 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
47 u32 word_offset
= offset
& 3;
49 switch (offset
& ~3) {
50 case 0: /* GICD_CTLR */
51 reg
= vcpu
->kvm
->arch
.vgic
.enabled
;
52 vgic_reg_access(mmio
, ®
, word_offset
,
53 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
55 vcpu
->kvm
->arch
.vgic
.enabled
= reg
& 1;
56 vgic_update_state(vcpu
->kvm
);
61 case 4: /* GICD_TYPER */
62 reg
= (atomic_read(&vcpu
->kvm
->online_vcpus
) - 1) << 5;
63 reg
|= (vcpu
->kvm
->arch
.vgic
.nr_irqs
>> 5) - 1;
64 vgic_reg_access(mmio
, ®
, word_offset
,
65 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
68 case 8: /* GICD_IIDR */
69 reg
= (PRODUCT_ID_KVM
<< 24) | (IMPLEMENTER_ARM
<< 0);
70 vgic_reg_access(mmio
, ®
, word_offset
,
71 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
78 static bool handle_mmio_set_enable_reg(struct kvm_vcpu
*vcpu
,
79 struct kvm_exit_mmio
*mmio
,
82 return vgic_handle_enable_reg(vcpu
->kvm
, mmio
, offset
,
83 vcpu
->vcpu_id
, ACCESS_WRITE_SETBIT
);
86 static bool handle_mmio_clear_enable_reg(struct kvm_vcpu
*vcpu
,
87 struct kvm_exit_mmio
*mmio
,
90 return vgic_handle_enable_reg(vcpu
->kvm
, mmio
, offset
,
91 vcpu
->vcpu_id
, ACCESS_WRITE_CLEARBIT
);
94 static bool handle_mmio_set_pending_reg(struct kvm_vcpu
*vcpu
,
95 struct kvm_exit_mmio
*mmio
,
98 return vgic_handle_set_pending_reg(vcpu
->kvm
, mmio
, offset
,
102 static bool handle_mmio_clear_pending_reg(struct kvm_vcpu
*vcpu
,
103 struct kvm_exit_mmio
*mmio
,
106 return vgic_handle_clear_pending_reg(vcpu
->kvm
, mmio
, offset
,
110 static bool handle_mmio_set_active_reg(struct kvm_vcpu
*vcpu
,
111 struct kvm_exit_mmio
*mmio
,
114 return vgic_handle_set_active_reg(vcpu
->kvm
, mmio
, offset
,
118 static bool handle_mmio_clear_active_reg(struct kvm_vcpu
*vcpu
,
119 struct kvm_exit_mmio
*mmio
,
122 return vgic_handle_clear_active_reg(vcpu
->kvm
, mmio
, offset
,
126 static bool handle_mmio_priority_reg(struct kvm_vcpu
*vcpu
,
127 struct kvm_exit_mmio
*mmio
,
130 u32
*reg
= vgic_bytemap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_priority
,
131 vcpu
->vcpu_id
, offset
);
132 vgic_reg_access(mmio
, reg
, offset
,
133 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
137 #define GICD_ITARGETSR_SIZE 32
138 #define GICD_CPUTARGETS_BITS 8
139 #define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
140 static u32
vgic_get_target_reg(struct kvm
*kvm
, int irq
)
142 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
146 irq
-= VGIC_NR_PRIVATE_IRQS
;
148 for (i
= 0; i
< GICD_IRQS_PER_ITARGETSR
; i
++)
149 val
|= 1 << (dist
->irq_spi_cpu
[irq
+ i
] + i
* 8);
154 static void vgic_set_target_reg(struct kvm
*kvm
, u32 val
, int irq
)
156 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
157 struct kvm_vcpu
*vcpu
;
162 irq
-= VGIC_NR_PRIVATE_IRQS
;
165 * Pick the LSB in each byte. This ensures we target exactly
166 * one vcpu per IRQ. If the byte is null, assume we target
169 for (i
= 0; i
< GICD_IRQS_PER_ITARGETSR
; i
++) {
170 int shift
= i
* GICD_CPUTARGETS_BITS
;
172 target
= ffs((val
>> shift
) & 0xffU
);
173 target
= target
? (target
- 1) : 0;
174 dist
->irq_spi_cpu
[irq
+ i
] = target
;
175 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
176 bmap
= vgic_bitmap_get_shared_map(&dist
->irq_spi_target
[c
]);
178 set_bit(irq
+ i
, bmap
);
180 clear_bit(irq
+ i
, bmap
);
185 static bool handle_mmio_target_reg(struct kvm_vcpu
*vcpu
,
186 struct kvm_exit_mmio
*mmio
,
191 /* We treat the banked interrupts targets as read-only */
195 roreg
= 1 << vcpu
->vcpu_id
;
197 roreg
|= roreg
<< 16;
199 vgic_reg_access(mmio
, &roreg
, offset
,
200 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
204 reg
= vgic_get_target_reg(vcpu
->kvm
, offset
& ~3U);
205 vgic_reg_access(mmio
, ®
, offset
,
206 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
207 if (mmio
->is_write
) {
208 vgic_set_target_reg(vcpu
->kvm
, reg
, offset
& ~3U);
209 vgic_update_state(vcpu
->kvm
);
216 static bool handle_mmio_cfg_reg(struct kvm_vcpu
*vcpu
,
217 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
221 reg
= vgic_bitmap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_cfg
,
222 vcpu
->vcpu_id
, offset
>> 1);
224 return vgic_handle_cfg_reg(reg
, mmio
, offset
);
227 static bool handle_mmio_sgi_reg(struct kvm_vcpu
*vcpu
,
228 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
232 vgic_reg_access(mmio
, ®
, offset
,
233 ACCESS_READ_RAZ
| ACCESS_WRITE_VALUE
);
234 if (mmio
->is_write
) {
235 vgic_dispatch_sgi(vcpu
, reg
);
236 vgic_update_state(vcpu
->kvm
);
243 /* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
244 static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu
*vcpu
,
245 struct kvm_exit_mmio
*mmio
,
248 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
250 int min_sgi
= (offset
& ~0x3);
251 int max_sgi
= min_sgi
+ 3;
252 int vcpu_id
= vcpu
->vcpu_id
;
255 /* Copy source SGIs from distributor side */
256 for (sgi
= min_sgi
; sgi
<= max_sgi
; sgi
++) {
257 u8 sources
= *vgic_get_sgi_sources(dist
, vcpu_id
, sgi
);
259 reg
|= ((u32
)sources
) << (8 * (sgi
- min_sgi
));
262 mmio_data_write(mmio
, ~0, reg
);
266 static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu
*vcpu
,
267 struct kvm_exit_mmio
*mmio
,
268 phys_addr_t offset
, bool set
)
270 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
272 int min_sgi
= (offset
& ~0x3);
273 int max_sgi
= min_sgi
+ 3;
274 int vcpu_id
= vcpu
->vcpu_id
;
276 bool updated
= false;
278 reg
= mmio_data_read(mmio
, ~0);
280 /* Clear pending SGIs on the distributor */
281 for (sgi
= min_sgi
; sgi
<= max_sgi
; sgi
++) {
282 u8 mask
= reg
>> (8 * (sgi
- min_sgi
));
283 u8
*src
= vgic_get_sgi_sources(dist
, vcpu_id
, sgi
);
286 if ((*src
& mask
) != mask
)
297 vgic_update_state(vcpu
->kvm
);
302 static bool handle_mmio_sgi_set(struct kvm_vcpu
*vcpu
,
303 struct kvm_exit_mmio
*mmio
,
307 return read_set_clear_sgi_pend_reg(vcpu
, mmio
, offset
);
309 return write_set_clear_sgi_pend_reg(vcpu
, mmio
, offset
, true);
312 static bool handle_mmio_sgi_clear(struct kvm_vcpu
*vcpu
,
313 struct kvm_exit_mmio
*mmio
,
317 return read_set_clear_sgi_pend_reg(vcpu
, mmio
, offset
);
319 return write_set_clear_sgi_pend_reg(vcpu
, mmio
, offset
, false);
322 static const struct vgic_io_range vgic_dist_ranges
[] = {
324 .base
= GIC_DIST_CTRL
,
327 .handle_mmio
= handle_mmio_misc
,
330 .base
= GIC_DIST_IGROUP
,
331 .len
= VGIC_MAX_IRQS
/ 8,
333 .handle_mmio
= handle_mmio_raz_wi
,
336 .base
= GIC_DIST_ENABLE_SET
,
337 .len
= VGIC_MAX_IRQS
/ 8,
339 .handle_mmio
= handle_mmio_set_enable_reg
,
342 .base
= GIC_DIST_ENABLE_CLEAR
,
343 .len
= VGIC_MAX_IRQS
/ 8,
345 .handle_mmio
= handle_mmio_clear_enable_reg
,
348 .base
= GIC_DIST_PENDING_SET
,
349 .len
= VGIC_MAX_IRQS
/ 8,
351 .handle_mmio
= handle_mmio_set_pending_reg
,
354 .base
= GIC_DIST_PENDING_CLEAR
,
355 .len
= VGIC_MAX_IRQS
/ 8,
357 .handle_mmio
= handle_mmio_clear_pending_reg
,
360 .base
= GIC_DIST_ACTIVE_SET
,
361 .len
= VGIC_MAX_IRQS
/ 8,
363 .handle_mmio
= handle_mmio_set_active_reg
,
366 .base
= GIC_DIST_ACTIVE_CLEAR
,
367 .len
= VGIC_MAX_IRQS
/ 8,
369 .handle_mmio
= handle_mmio_clear_active_reg
,
372 .base
= GIC_DIST_PRI
,
373 .len
= VGIC_MAX_IRQS
,
375 .handle_mmio
= handle_mmio_priority_reg
,
378 .base
= GIC_DIST_TARGET
,
379 .len
= VGIC_MAX_IRQS
,
381 .handle_mmio
= handle_mmio_target_reg
,
384 .base
= GIC_DIST_CONFIG
,
385 .len
= VGIC_MAX_IRQS
/ 4,
387 .handle_mmio
= handle_mmio_cfg_reg
,
390 .base
= GIC_DIST_SOFTINT
,
392 .handle_mmio
= handle_mmio_sgi_reg
,
395 .base
= GIC_DIST_SGI_PENDING_CLEAR
,
397 .handle_mmio
= handle_mmio_sgi_clear
,
400 .base
= GIC_DIST_SGI_PENDING_SET
,
402 .handle_mmio
= handle_mmio_sgi_set
,
407 static void vgic_dispatch_sgi(struct kvm_vcpu
*vcpu
, u32 reg
)
409 struct kvm
*kvm
= vcpu
->kvm
;
410 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
411 int nrcpus
= atomic_read(&kvm
->online_vcpus
);
413 int sgi
, mode
, c
, vcpu_id
;
415 vcpu_id
= vcpu
->vcpu_id
;
418 target_cpus
= (reg
>> 16) & 0xff;
419 mode
= (reg
>> 24) & 3;
428 target_cpus
= ((1 << nrcpus
) - 1) & ~(1 << vcpu_id
) & 0xff;
432 target_cpus
= 1 << vcpu_id
;
436 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
437 if (target_cpus
& 1) {
438 /* Flag the SGI as pending */
439 vgic_dist_irq_set_pending(vcpu
, sgi
);
440 *vgic_get_sgi_sources(dist
, c
, sgi
) |= 1 << vcpu_id
;
441 kvm_debug("SGI%d from CPU%d to CPU%d\n",
449 static bool vgic_v2_queue_sgi(struct kvm_vcpu
*vcpu
, int irq
)
451 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
452 unsigned long sources
;
453 int vcpu_id
= vcpu
->vcpu_id
;
456 sources
= *vgic_get_sgi_sources(dist
, vcpu_id
, irq
);
458 for_each_set_bit(c
, &sources
, dist
->nr_cpus
) {
459 if (vgic_queue_irq(vcpu
, c
, irq
))
460 clear_bit(c
, &sources
);
463 *vgic_get_sgi_sources(dist
, vcpu_id
, irq
) = sources
;
466 * If the sources bitmap has been cleared it means that we
467 * could queue all the SGIs onto link registers (see the
468 * clear_bit above), and therefore we are done with them in
469 * our emulated gic and can get rid of them.
472 vgic_dist_irq_clear_pending(vcpu
, irq
);
473 vgic_cpu_irq_clear(vcpu
, irq
);
481 * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs
482 * @kvm: pointer to the kvm struct
484 * Map the virtual CPU interface into the VM before running any VCPUs. We
485 * can't do this at creation time, because user space must first set the
486 * virtual CPU interface address in the guest physical address space.
488 static int vgic_v2_map_resources(struct kvm
*kvm
,
489 const struct vgic_params
*params
)
491 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
494 if (!irqchip_in_kernel(kvm
))
497 mutex_lock(&kvm
->lock
);
502 if (IS_VGIC_ADDR_UNDEF(dist
->vgic_dist_base
) ||
503 IS_VGIC_ADDR_UNDEF(dist
->vgic_cpu_base
)) {
504 kvm_err("Need to set vgic cpu and dist addresses first\n");
509 vgic_register_kvm_io_dev(kvm
, dist
->vgic_dist_base
,
510 KVM_VGIC_V2_DIST_SIZE
,
511 vgic_dist_ranges
, -1, &dist
->dist_iodev
);
514 * Initialize the vgic if this hasn't already been done on demand by
515 * accessing the vgic state from userspace.
517 ret
= vgic_init(kvm
);
519 kvm_err("Unable to allocate maps\n");
523 ret
= kvm_phys_addr_ioremap(kvm
, dist
->vgic_cpu_base
,
524 params
->vcpu_base
, KVM_VGIC_V2_CPU_SIZE
,
527 kvm_err("Unable to remap VGIC CPU to VCPU\n");
535 kvm_io_bus_unregister_dev(kvm
, KVM_MMIO_BUS
, &dist
->dist_iodev
.dev
);
539 kvm_vgic_destroy(kvm
);
540 mutex_unlock(&kvm
->lock
);
544 static void vgic_v2_add_sgi_source(struct kvm_vcpu
*vcpu
, int irq
, int source
)
546 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
548 *vgic_get_sgi_sources(dist
, vcpu
->vcpu_id
, irq
) |= 1 << source
;
551 static int vgic_v2_init_model(struct kvm
*kvm
)
555 for (i
= VGIC_NR_PRIVATE_IRQS
; i
< kvm
->arch
.vgic
.nr_irqs
; i
+= 4)
556 vgic_set_target_reg(kvm
, 0, i
);
561 void vgic_v2_init_emulation(struct kvm
*kvm
)
563 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
565 dist
->vm_ops
.queue_sgi
= vgic_v2_queue_sgi
;
566 dist
->vm_ops
.add_sgi_source
= vgic_v2_add_sgi_source
;
567 dist
->vm_ops
.init_model
= vgic_v2_init_model
;
568 dist
->vm_ops
.map_resources
= vgic_v2_map_resources
;
570 kvm
->arch
.max_vcpus
= VGIC_V2_MAX_CPUS
;
573 static bool handle_cpu_mmio_misc(struct kvm_vcpu
*vcpu
,
574 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
576 bool updated
= false;
577 struct vgic_vmcr vmcr
;
581 vgic_get_vmcr(vcpu
, &vmcr
);
583 switch (offset
& ~0x3) {
585 vmcr_field
= &vmcr
.ctlr
;
587 case GIC_CPU_PRIMASK
:
588 vmcr_field
= &vmcr
.pmr
;
590 case GIC_CPU_BINPOINT
:
591 vmcr_field
= &vmcr
.bpr
;
593 case GIC_CPU_ALIAS_BINPOINT
:
594 vmcr_field
= &vmcr
.abpr
;
600 if (!mmio
->is_write
) {
602 mmio_data_write(mmio
, ~0, reg
);
604 reg
= mmio_data_read(mmio
, ~0);
605 if (reg
!= *vmcr_field
) {
607 vgic_set_vmcr(vcpu
, &vmcr
);
614 static bool handle_mmio_abpr(struct kvm_vcpu
*vcpu
,
615 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
617 return handle_cpu_mmio_misc(vcpu
, mmio
, GIC_CPU_ALIAS_BINPOINT
);
620 static bool handle_cpu_mmio_ident(struct kvm_vcpu
*vcpu
,
621 struct kvm_exit_mmio
*mmio
,
630 reg
= (PRODUCT_ID_KVM
<< 20) |
631 (GICC_ARCH_VERSION_V2
<< 16) |
632 (IMPLEMENTER_ARM
<< 0);
633 mmio_data_write(mmio
, ~0, reg
);
638 * CPU Interface Register accesses - these are not accessed by the VM, but by
639 * user space for saving and restoring VGIC state.
641 static const struct vgic_io_range vgic_cpu_ranges
[] = {
643 .base
= GIC_CPU_CTRL
,
645 .handle_mmio
= handle_cpu_mmio_misc
,
648 .base
= GIC_CPU_ALIAS_BINPOINT
,
650 .handle_mmio
= handle_mmio_abpr
,
653 .base
= GIC_CPU_ACTIVEPRIO
,
655 .handle_mmio
= handle_mmio_raz_wi
,
658 .base
= GIC_CPU_IDENT
,
660 .handle_mmio
= handle_cpu_mmio_ident
,
664 static int vgic_attr_regs_access(struct kvm_device
*dev
,
665 struct kvm_device_attr
*attr
,
666 u32
*reg
, bool is_write
)
668 const struct vgic_io_range
*r
= NULL
, *ranges
;
671 struct kvm_vcpu
*vcpu
, *tmp_vcpu
;
672 struct vgic_dist
*vgic
;
673 struct kvm_exit_mmio mmio
;
676 offset
= attr
->attr
& KVM_DEV_ARM_VGIC_OFFSET_MASK
;
677 cpuid
= (attr
->attr
& KVM_DEV_ARM_VGIC_CPUID_MASK
) >>
678 KVM_DEV_ARM_VGIC_CPUID_SHIFT
;
680 mutex_lock(&dev
->kvm
->lock
);
682 ret
= vgic_init(dev
->kvm
);
686 if (cpuid
>= atomic_read(&dev
->kvm
->online_vcpus
)) {
691 vcpu
= kvm_get_vcpu(dev
->kvm
, cpuid
);
692 vgic
= &dev
->kvm
->arch
.vgic
;
695 mmio
.is_write
= is_write
;
698 mmio_data_write(&mmio
, ~0, *reg
);
699 switch (attr
->group
) {
700 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
701 mmio
.phys_addr
= vgic
->vgic_dist_base
+ offset
;
702 ranges
= vgic_dist_ranges
;
704 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
:
705 mmio
.phys_addr
= vgic
->vgic_cpu_base
+ offset
;
706 ranges
= vgic_cpu_ranges
;
711 r
= vgic_find_range(ranges
, 4, offset
);
713 if (unlikely(!r
|| !r
->handle_mmio
)) {
719 spin_lock(&vgic
->lock
);
722 * Ensure that no other VCPU is running by checking the vcpu->cpu
723 * field. If no other VPCUs are running we can safely access the VGIC
724 * state, because even if another VPU is run after this point, that
725 * VCPU will not touch the vgic state, because it will block on
726 * getting the vgic->lock in kvm_vgic_sync_hwstate().
728 kvm_for_each_vcpu(c
, tmp_vcpu
, dev
->kvm
) {
729 if (unlikely(tmp_vcpu
->cpu
!= -1)) {
731 goto out_vgic_unlock
;
736 * Move all pending IRQs from the LRs on all VCPUs so the pending
737 * state can be properly represented in the register state accessible
740 kvm_for_each_vcpu(c
, tmp_vcpu
, dev
->kvm
)
741 vgic_unqueue_irqs(tmp_vcpu
);
744 r
->handle_mmio(vcpu
, &mmio
, offset
);
747 *reg
= mmio_data_read(&mmio
, ~0);
751 spin_unlock(&vgic
->lock
);
753 mutex_unlock(&dev
->kvm
->lock
);
757 static int vgic_v2_create(struct kvm_device
*dev
, u32 type
)
759 return kvm_vgic_create(dev
->kvm
, type
);
762 static void vgic_v2_destroy(struct kvm_device
*dev
)
767 static int vgic_v2_set_attr(struct kvm_device
*dev
,
768 struct kvm_device_attr
*attr
)
772 ret
= vgic_set_common_attr(dev
, attr
);
776 switch (attr
->group
) {
777 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
778 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
: {
779 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
782 if (get_user(reg
, uaddr
))
785 return vgic_attr_regs_access(dev
, attr
, ®
, true);
793 static int vgic_v2_get_attr(struct kvm_device
*dev
,
794 struct kvm_device_attr
*attr
)
798 ret
= vgic_get_common_attr(dev
, attr
);
802 switch (attr
->group
) {
803 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
804 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
: {
805 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
808 ret
= vgic_attr_regs_access(dev
, attr
, ®
, false);
811 return put_user(reg
, uaddr
);
819 static int vgic_v2_has_attr(struct kvm_device
*dev
,
820 struct kvm_device_attr
*attr
)
824 switch (attr
->group
) {
825 case KVM_DEV_ARM_VGIC_GRP_ADDR
:
826 switch (attr
->attr
) {
827 case KVM_VGIC_V2_ADDR_TYPE_DIST
:
828 case KVM_VGIC_V2_ADDR_TYPE_CPU
:
832 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
833 offset
= attr
->attr
& KVM_DEV_ARM_VGIC_OFFSET_MASK
;
834 return vgic_has_attr_regs(vgic_dist_ranges
, offset
);
835 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
:
836 offset
= attr
->attr
& KVM_DEV_ARM_VGIC_OFFSET_MASK
;
837 return vgic_has_attr_regs(vgic_cpu_ranges
, offset
);
838 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS
:
840 case KVM_DEV_ARM_VGIC_GRP_CTRL
:
841 switch (attr
->attr
) {
842 case KVM_DEV_ARM_VGIC_CTRL_INIT
:
849 struct kvm_device_ops kvm_arm_vgic_v2_ops
= {
850 .name
= "kvm-arm-vgic-v2",
851 .create
= vgic_v2_create
,
852 .destroy
= vgic_v2_destroy
,
853 .set_attr
= vgic_v2_set_attr
,
854 .get_attr
= vgic_v2_get_attr
,
855 .has_attr
= vgic_v2_has_attr
,