1 // SPDX-License-Identifier: GPL-2.0-only
3 * VGICv2 MMIO handling functions
6 #include <linux/irqchip/arm-gic.h>
8 #include <linux/kvm_host.h>
9 #include <linux/nospec.h>
11 #include <kvm/iodev.h>
12 #include <kvm/arm_vgic.h>
15 #include "vgic-mmio.h"
18 * The Revision field in the IIDR have the following meanings:
20 * Revision 1: Report GICv2 interrupts as group 0 instead of group 1
21 * Revision 2: Interrupt groups are guest-configurable and signaled using
22 * their configured groups.
25 static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu
*vcpu
,
26 gpa_t addr
, unsigned int len
)
28 struct vgic_dist
*vgic
= &vcpu
->kvm
->arch
.vgic
;
31 switch (addr
& 0x0c) {
33 value
= vgic
->enabled
? GICD_ENABLE
: 0;
36 value
= vgic
->nr_spis
+ VGIC_NR_PRIVATE_IRQS
;
37 value
= (value
>> 5) - 1;
38 value
|= (atomic_read(&vcpu
->kvm
->online_vcpus
) - 1) << 5;
41 value
= (PRODUCT_ID_KVM
<< GICD_IIDR_PRODUCT_ID_SHIFT
) |
42 (vgic
->implementation_rev
<< GICD_IIDR_REVISION_SHIFT
) |
43 (IMPLEMENTER_ARM
<< GICD_IIDR_IMPLEMENTER_SHIFT
);
52 static void vgic_mmio_write_v2_misc(struct kvm_vcpu
*vcpu
,
53 gpa_t addr
, unsigned int len
,
56 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
57 bool was_enabled
= dist
->enabled
;
59 switch (addr
& 0x0c) {
61 dist
->enabled
= val
& GICD_ENABLE
;
62 if (!was_enabled
&& dist
->enabled
)
63 vgic_kick_vcpus(vcpu
->kvm
);
72 static int vgic_mmio_uaccess_write_v2_misc(struct kvm_vcpu
*vcpu
,
73 gpa_t addr
, unsigned int len
,
76 switch (addr
& 0x0c) {
78 if (val
!= vgic_mmio_read_v2_misc(vcpu
, addr
, len
))
82 * If we observe a write to GICD_IIDR we know that userspace
83 * has been updated and has had a chance to cope with older
84 * kernels (VGICv2 IIDR.Revision == 0) incorrectly reporting
85 * interrupts as group 1, and therefore we now allow groups to
86 * be user writable. Doing this by default would break
87 * migration from old kernels to new kernels with legacy
90 vcpu
->kvm
->arch
.vgic
.v2_groups_user_writable
= true;
94 vgic_mmio_write_v2_misc(vcpu
, addr
, len
, val
);
98 static int vgic_mmio_uaccess_write_v2_group(struct kvm_vcpu
*vcpu
,
99 gpa_t addr
, unsigned int len
,
102 if (vcpu
->kvm
->arch
.vgic
.v2_groups_user_writable
)
103 vgic_mmio_write_group(vcpu
, addr
, len
, val
);
108 static void vgic_mmio_write_sgir(struct kvm_vcpu
*source_vcpu
,
109 gpa_t addr
, unsigned int len
,
112 int nr_vcpus
= atomic_read(&source_vcpu
->kvm
->online_vcpus
);
113 int intid
= val
& 0xf;
114 int targets
= (val
>> 16) & 0xff;
115 int mode
= (val
>> 24) & 0x03;
117 struct kvm_vcpu
*vcpu
;
121 case 0x0: /* as specified by targets */
124 targets
= (1U << nr_vcpus
) - 1; /* all, ... */
125 targets
&= ~(1U << source_vcpu
->vcpu_id
); /* but self */
127 case 0x2: /* this very vCPU only */
128 targets
= (1U << source_vcpu
->vcpu_id
);
130 case 0x3: /* reserved */
134 kvm_for_each_vcpu(c
, vcpu
, source_vcpu
->kvm
) {
135 struct vgic_irq
*irq
;
137 if (!(targets
& (1U << c
)))
140 irq
= vgic_get_irq(source_vcpu
->kvm
, vcpu
, intid
);
142 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
143 irq
->pending_latch
= true;
144 irq
->source
|= 1U << source_vcpu
->vcpu_id
;
146 vgic_queue_irq_unlock(source_vcpu
->kvm
, irq
, flags
);
147 vgic_put_irq(source_vcpu
->kvm
, irq
);
151 static unsigned long vgic_mmio_read_target(struct kvm_vcpu
*vcpu
,
152 gpa_t addr
, unsigned int len
)
154 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 8);
158 for (i
= 0; i
< len
; i
++) {
159 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
161 val
|= (u64
)irq
->targets
<< (i
* 8);
163 vgic_put_irq(vcpu
->kvm
, irq
);
169 static void vgic_mmio_write_target(struct kvm_vcpu
*vcpu
,
170 gpa_t addr
, unsigned int len
,
173 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 8);
174 u8 cpu_mask
= GENMASK(atomic_read(&vcpu
->kvm
->online_vcpus
) - 1, 0);
178 /* GICD_ITARGETSR[0-7] are read-only */
179 if (intid
< VGIC_NR_PRIVATE_IRQS
)
182 for (i
= 0; i
< len
; i
++) {
183 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, NULL
, intid
+ i
);
186 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
188 irq
->targets
= (val
>> (i
* 8)) & cpu_mask
;
189 target
= irq
->targets
? __ffs(irq
->targets
) : 0;
190 irq
->target_vcpu
= kvm_get_vcpu(vcpu
->kvm
, target
);
192 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
193 vgic_put_irq(vcpu
->kvm
, irq
);
197 static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu
*vcpu
,
198 gpa_t addr
, unsigned int len
)
200 u32 intid
= addr
& 0x0f;
204 for (i
= 0; i
< len
; i
++) {
205 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
207 val
|= (u64
)irq
->source
<< (i
* 8);
209 vgic_put_irq(vcpu
->kvm
, irq
);
214 static void vgic_mmio_write_sgipendc(struct kvm_vcpu
*vcpu
,
215 gpa_t addr
, unsigned int len
,
218 u32 intid
= addr
& 0x0f;
222 for (i
= 0; i
< len
; i
++) {
223 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
225 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
227 irq
->source
&= ~((val
>> (i
* 8)) & 0xff);
229 irq
->pending_latch
= false;
231 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
232 vgic_put_irq(vcpu
->kvm
, irq
);
236 static void vgic_mmio_write_sgipends(struct kvm_vcpu
*vcpu
,
237 gpa_t addr
, unsigned int len
,
240 u32 intid
= addr
& 0x0f;
244 for (i
= 0; i
< len
; i
++) {
245 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
247 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
249 irq
->source
|= (val
>> (i
* 8)) & 0xff;
252 irq
->pending_latch
= true;
253 vgic_queue_irq_unlock(vcpu
->kvm
, irq
, flags
);
255 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
257 vgic_put_irq(vcpu
->kvm
, irq
);
261 #define GICC_ARCH_VERSION_V2 0x2
263 /* These are for userland accesses only, there is no guest-facing emulation. */
264 static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu
*vcpu
,
265 gpa_t addr
, unsigned int len
)
267 struct vgic_vmcr vmcr
;
270 vgic_get_vmcr(vcpu
, &vmcr
);
272 switch (addr
& 0xff) {
274 val
= vmcr
.grpen0
<< GIC_CPU_CTRL_EnableGrp0_SHIFT
;
275 val
|= vmcr
.grpen1
<< GIC_CPU_CTRL_EnableGrp1_SHIFT
;
276 val
|= vmcr
.ackctl
<< GIC_CPU_CTRL_AckCtl_SHIFT
;
277 val
|= vmcr
.fiqen
<< GIC_CPU_CTRL_FIQEn_SHIFT
;
278 val
|= vmcr
.cbpr
<< GIC_CPU_CTRL_CBPR_SHIFT
;
279 val
|= vmcr
.eoim
<< GIC_CPU_CTRL_EOImodeNS_SHIFT
;
282 case GIC_CPU_PRIMASK
:
284 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
285 * the PMR field as GICH_VMCR.VMPriMask rather than
286 * GICC_PMR.Priority, so we expose the upper five bits of
287 * priority mask to userspace using the lower bits in the
290 val
= (vmcr
.pmr
& GICV_PMR_PRIORITY_MASK
) >>
291 GICV_PMR_PRIORITY_SHIFT
;
293 case GIC_CPU_BINPOINT
:
296 case GIC_CPU_ALIAS_BINPOINT
:
300 val
= ((PRODUCT_ID_KVM
<< 20) |
301 (GICC_ARCH_VERSION_V2
<< 16) |
311 static void vgic_mmio_write_vcpuif(struct kvm_vcpu
*vcpu
,
312 gpa_t addr
, unsigned int len
,
315 struct vgic_vmcr vmcr
;
317 vgic_get_vmcr(vcpu
, &vmcr
);
319 switch (addr
& 0xff) {
321 vmcr
.grpen0
= !!(val
& GIC_CPU_CTRL_EnableGrp0
);
322 vmcr
.grpen1
= !!(val
& GIC_CPU_CTRL_EnableGrp1
);
323 vmcr
.ackctl
= !!(val
& GIC_CPU_CTRL_AckCtl
);
324 vmcr
.fiqen
= !!(val
& GIC_CPU_CTRL_FIQEn
);
325 vmcr
.cbpr
= !!(val
& GIC_CPU_CTRL_CBPR
);
326 vmcr
.eoim
= !!(val
& GIC_CPU_CTRL_EOImodeNS
);
329 case GIC_CPU_PRIMASK
:
331 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
332 * the PMR field as GICH_VMCR.VMPriMask rather than
333 * GICC_PMR.Priority, so we expose the upper five bits of
334 * priority mask to userspace using the lower bits in the
337 vmcr
.pmr
= (val
<< GICV_PMR_PRIORITY_SHIFT
) &
338 GICV_PMR_PRIORITY_MASK
;
340 case GIC_CPU_BINPOINT
:
343 case GIC_CPU_ALIAS_BINPOINT
:
348 vgic_set_vmcr(vcpu
, &vmcr
);
351 static unsigned long vgic_mmio_read_apr(struct kvm_vcpu
*vcpu
,
352 gpa_t addr
, unsigned int len
)
354 int n
; /* which APRn is this */
356 n
= (addr
>> 2) & 0x3;
358 if (kvm_vgic_global_state
.type
== VGIC_V2
) {
359 /* GICv2 hardware systems support max. 32 groups */
362 return vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_apr
;
364 struct vgic_v3_cpu_if
*vgicv3
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
366 if (n
> vgic_v3_max_apr_idx(vcpu
))
369 n
= array_index_nospec(n
, 4);
371 /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
372 return vgicv3
->vgic_ap1r
[n
];
376 static void vgic_mmio_write_apr(struct kvm_vcpu
*vcpu
,
377 gpa_t addr
, unsigned int len
,
380 int n
; /* which APRn is this */
382 n
= (addr
>> 2) & 0x3;
384 if (kvm_vgic_global_state
.type
== VGIC_V2
) {
385 /* GICv2 hardware systems support max. 32 groups */
388 vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_apr
= val
;
390 struct vgic_v3_cpu_if
*vgicv3
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
392 if (n
> vgic_v3_max_apr_idx(vcpu
))
395 n
= array_index_nospec(n
, 4);
397 /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
398 vgicv3
->vgic_ap1r
[n
] = val
;
402 static const struct vgic_register_region vgic_v2_dist_registers
[] = {
403 REGISTER_DESC_WITH_LENGTH_UACCESS(GIC_DIST_CTRL
,
404 vgic_mmio_read_v2_misc
, vgic_mmio_write_v2_misc
,
405 NULL
, vgic_mmio_uaccess_write_v2_misc
,
406 12, VGIC_ACCESS_32bit
),
407 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP
,
408 vgic_mmio_read_group
, vgic_mmio_write_group
,
409 NULL
, vgic_mmio_uaccess_write_v2_group
, 1,
411 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET
,
412 vgic_mmio_read_enable
, vgic_mmio_write_senable
, NULL
, NULL
, 1,
414 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR
,
415 vgic_mmio_read_enable
, vgic_mmio_write_cenable
, NULL
, NULL
, 1,
417 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET
,
418 vgic_mmio_read_pending
, vgic_mmio_write_spending
, NULL
, NULL
, 1,
420 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR
,
421 vgic_mmio_read_pending
, vgic_mmio_write_cpending
, NULL
, NULL
, 1,
423 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET
,
424 vgic_mmio_read_active
, vgic_mmio_write_sactive
,
425 NULL
, vgic_mmio_uaccess_write_sactive
, 1,
427 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR
,
428 vgic_mmio_read_active
, vgic_mmio_write_cactive
,
429 NULL
, vgic_mmio_uaccess_write_cactive
, 1,
431 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI
,
432 vgic_mmio_read_priority
, vgic_mmio_write_priority
, NULL
, NULL
,
433 8, VGIC_ACCESS_32bit
| VGIC_ACCESS_8bit
),
434 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET
,
435 vgic_mmio_read_target
, vgic_mmio_write_target
, NULL
, NULL
, 8,
436 VGIC_ACCESS_32bit
| VGIC_ACCESS_8bit
),
437 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG
,
438 vgic_mmio_read_config
, vgic_mmio_write_config
, NULL
, NULL
, 2,
440 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT
,
441 vgic_mmio_read_raz
, vgic_mmio_write_sgir
, 4,
443 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR
,
444 vgic_mmio_read_sgipend
, vgic_mmio_write_sgipendc
, 16,
445 VGIC_ACCESS_32bit
| VGIC_ACCESS_8bit
),
446 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_SET
,
447 vgic_mmio_read_sgipend
, vgic_mmio_write_sgipends
, 16,
448 VGIC_ACCESS_32bit
| VGIC_ACCESS_8bit
),
451 static const struct vgic_register_region vgic_v2_cpu_registers
[] = {
452 REGISTER_DESC_WITH_LENGTH(GIC_CPU_CTRL
,
453 vgic_mmio_read_vcpuif
, vgic_mmio_write_vcpuif
, 4,
455 REGISTER_DESC_WITH_LENGTH(GIC_CPU_PRIMASK
,
456 vgic_mmio_read_vcpuif
, vgic_mmio_write_vcpuif
, 4,
458 REGISTER_DESC_WITH_LENGTH(GIC_CPU_BINPOINT
,
459 vgic_mmio_read_vcpuif
, vgic_mmio_write_vcpuif
, 4,
461 REGISTER_DESC_WITH_LENGTH(GIC_CPU_ALIAS_BINPOINT
,
462 vgic_mmio_read_vcpuif
, vgic_mmio_write_vcpuif
, 4,
464 REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO
,
465 vgic_mmio_read_apr
, vgic_mmio_write_apr
, 16,
467 REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT
,
468 vgic_mmio_read_vcpuif
, vgic_mmio_write_vcpuif
, 4,
472 unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device
*dev
)
474 dev
->regions
= vgic_v2_dist_registers
;
475 dev
->nr_regions
= ARRAY_SIZE(vgic_v2_dist_registers
);
477 kvm_iodevice_init(&dev
->dev
, &kvm_io_gic_ops
);
482 int vgic_v2_has_attr_regs(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
484 const struct vgic_register_region
*region
;
485 struct vgic_io_device iodev
;
486 struct vgic_reg_attr reg_attr
;
487 struct kvm_vcpu
*vcpu
;
491 ret
= vgic_v2_parse_attr(dev
, attr
, ®_attr
);
495 vcpu
= reg_attr
.vcpu
;
496 addr
= reg_attr
.addr
;
498 switch (attr
->group
) {
499 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
500 iodev
.regions
= vgic_v2_dist_registers
;
501 iodev
.nr_regions
= ARRAY_SIZE(vgic_v2_dist_registers
);
504 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
:
505 iodev
.regions
= vgic_v2_cpu_registers
;
506 iodev
.nr_regions
= ARRAY_SIZE(vgic_v2_cpu_registers
);
513 /* We only support aligned 32-bit accesses. */
517 region
= vgic_get_mmio_region(vcpu
, &iodev
, addr
, sizeof(u32
));
524 int vgic_v2_cpuif_uaccess(struct kvm_vcpu
*vcpu
, bool is_write
,
525 int offset
, u32
*val
)
527 struct vgic_io_device dev
= {
528 .regions
= vgic_v2_cpu_registers
,
529 .nr_regions
= ARRAY_SIZE(vgic_v2_cpu_registers
),
530 .iodev_type
= IODEV_CPUIF
,
533 return vgic_uaccess(vcpu
, &dev
, is_write
, offset
, val
);
536 int vgic_v2_dist_uaccess(struct kvm_vcpu
*vcpu
, bool is_write
,
537 int offset
, u32
*val
)
539 struct vgic_io_device dev
= {
540 .regions
= vgic_v2_dist_registers
,
541 .nr_regions
= ARRAY_SIZE(vgic_v2_dist_registers
),
542 .iodev_type
= IODEV_DIST
,
545 return vgic_uaccess(vcpu
, &dev
, is_write
, offset
, val
);