2 * VGICv2 MMIO handling functions
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/irqchip/arm-gic.h>
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <kvm/iodev.h>
18 #include <kvm/arm_vgic.h>
21 #include "vgic-mmio.h"
23 static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu
*vcpu
,
24 gpa_t addr
, unsigned int len
)
28 switch (addr
& 0x0c) {
30 value
= vcpu
->kvm
->arch
.vgic
.enabled
? GICD_ENABLE
: 0;
33 value
= vcpu
->kvm
->arch
.vgic
.nr_spis
+ VGIC_NR_PRIVATE_IRQS
;
34 value
= (value
>> 5) - 1;
35 value
|= (atomic_read(&vcpu
->kvm
->online_vcpus
) - 1) << 5;
38 value
= (PRODUCT_ID_KVM
<< 24) | (IMPLEMENTER_ARM
<< 0);
47 static void vgic_mmio_write_v2_misc(struct kvm_vcpu
*vcpu
,
48 gpa_t addr
, unsigned int len
,
51 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
52 bool was_enabled
= dist
->enabled
;
54 switch (addr
& 0x0c) {
56 dist
->enabled
= val
& GICD_ENABLE
;
57 if (!was_enabled
&& dist
->enabled
)
58 vgic_kick_vcpus(vcpu
->kvm
);
67 static void vgic_mmio_write_sgir(struct kvm_vcpu
*source_vcpu
,
68 gpa_t addr
, unsigned int len
,
71 int nr_vcpus
= atomic_read(&source_vcpu
->kvm
->online_vcpus
);
72 int intid
= val
& 0xf;
73 int targets
= (val
>> 16) & 0xff;
74 int mode
= (val
>> 24) & 0x03;
76 struct kvm_vcpu
*vcpu
;
80 case 0x0: /* as specified by targets */
83 targets
= (1U << nr_vcpus
) - 1; /* all, ... */
84 targets
&= ~(1U << source_vcpu
->vcpu_id
); /* but self */
86 case 0x2: /* this very vCPU only */
87 targets
= (1U << source_vcpu
->vcpu_id
);
89 case 0x3: /* reserved */
93 kvm_for_each_vcpu(c
, vcpu
, source_vcpu
->kvm
) {
96 if (!(targets
& (1U << c
)))
99 irq
= vgic_get_irq(source_vcpu
->kvm
, vcpu
, intid
);
101 spin_lock_irqsave(&irq
->irq_lock
, flags
);
102 irq
->pending_latch
= true;
103 irq
->source
|= 1U << source_vcpu
->vcpu_id
;
105 vgic_queue_irq_unlock(source_vcpu
->kvm
, irq
, flags
);
106 vgic_put_irq(source_vcpu
->kvm
, irq
);
110 static unsigned long vgic_mmio_read_target(struct kvm_vcpu
*vcpu
,
111 gpa_t addr
, unsigned int len
)
113 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 8);
117 for (i
= 0; i
< len
; i
++) {
118 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
120 val
|= (u64
)irq
->targets
<< (i
* 8);
122 vgic_put_irq(vcpu
->kvm
, irq
);
128 static void vgic_mmio_write_target(struct kvm_vcpu
*vcpu
,
129 gpa_t addr
, unsigned int len
,
132 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 8);
133 u8 cpu_mask
= GENMASK(atomic_read(&vcpu
->kvm
->online_vcpus
) - 1, 0);
137 /* GICD_ITARGETSR[0-7] are read-only */
138 if (intid
< VGIC_NR_PRIVATE_IRQS
)
141 for (i
= 0; i
< len
; i
++) {
142 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, NULL
, intid
+ i
);
145 spin_lock_irqsave(&irq
->irq_lock
, flags
);
147 irq
->targets
= (val
>> (i
* 8)) & cpu_mask
;
148 target
= irq
->targets
? __ffs(irq
->targets
) : 0;
149 irq
->target_vcpu
= kvm_get_vcpu(vcpu
->kvm
, target
);
151 spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
152 vgic_put_irq(vcpu
->kvm
, irq
);
156 static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu
*vcpu
,
157 gpa_t addr
, unsigned int len
)
159 u32 intid
= addr
& 0x0f;
163 for (i
= 0; i
< len
; i
++) {
164 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
166 val
|= (u64
)irq
->source
<< (i
* 8);
168 vgic_put_irq(vcpu
->kvm
, irq
);
173 static void vgic_mmio_write_sgipendc(struct kvm_vcpu
*vcpu
,
174 gpa_t addr
, unsigned int len
,
177 u32 intid
= addr
& 0x0f;
181 for (i
= 0; i
< len
; i
++) {
182 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
184 spin_lock_irqsave(&irq
->irq_lock
, flags
);
186 irq
->source
&= ~((val
>> (i
* 8)) & 0xff);
188 irq
->pending_latch
= false;
190 spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
191 vgic_put_irq(vcpu
->kvm
, irq
);
195 static void vgic_mmio_write_sgipends(struct kvm_vcpu
*vcpu
,
196 gpa_t addr
, unsigned int len
,
199 u32 intid
= addr
& 0x0f;
203 for (i
= 0; i
< len
; i
++) {
204 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
206 spin_lock_irqsave(&irq
->irq_lock
, flags
);
208 irq
->source
|= (val
>> (i
* 8)) & 0xff;
211 irq
->pending_latch
= true;
212 vgic_queue_irq_unlock(vcpu
->kvm
, irq
, flags
);
214 spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
216 vgic_put_irq(vcpu
->kvm
, irq
);
220 #define GICC_ARCH_VERSION_V2 0x2
222 /* These are for userland accesses only, there is no guest-facing emulation. */
223 static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu
*vcpu
,
224 gpa_t addr
, unsigned int len
)
226 struct vgic_vmcr vmcr
;
229 vgic_get_vmcr(vcpu
, &vmcr
);
231 switch (addr
& 0xff) {
233 val
= vmcr
.grpen0
<< GIC_CPU_CTRL_EnableGrp0_SHIFT
;
234 val
|= vmcr
.grpen1
<< GIC_CPU_CTRL_EnableGrp1_SHIFT
;
235 val
|= vmcr
.ackctl
<< GIC_CPU_CTRL_AckCtl_SHIFT
;
236 val
|= vmcr
.fiqen
<< GIC_CPU_CTRL_FIQEn_SHIFT
;
237 val
|= vmcr
.cbpr
<< GIC_CPU_CTRL_CBPR_SHIFT
;
238 val
|= vmcr
.eoim
<< GIC_CPU_CTRL_EOImodeNS_SHIFT
;
241 case GIC_CPU_PRIMASK
:
243 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
244 * the PMR field as GICH_VMCR.VMPriMask rather than
245 * GICC_PMR.Priority, so we expose the upper five bits of
246 * priority mask to userspace using the lower bits in the
249 val
= (vmcr
.pmr
& GICV_PMR_PRIORITY_MASK
) >>
250 GICV_PMR_PRIORITY_SHIFT
;
252 case GIC_CPU_BINPOINT
:
255 case GIC_CPU_ALIAS_BINPOINT
:
259 val
= ((PRODUCT_ID_KVM
<< 20) |
260 (GICC_ARCH_VERSION_V2
<< 16) |
270 static void vgic_mmio_write_vcpuif(struct kvm_vcpu
*vcpu
,
271 gpa_t addr
, unsigned int len
,
274 struct vgic_vmcr vmcr
;
276 vgic_get_vmcr(vcpu
, &vmcr
);
278 switch (addr
& 0xff) {
280 vmcr
.grpen0
= !!(val
& GIC_CPU_CTRL_EnableGrp0
);
281 vmcr
.grpen1
= !!(val
& GIC_CPU_CTRL_EnableGrp1
);
282 vmcr
.ackctl
= !!(val
& GIC_CPU_CTRL_AckCtl
);
283 vmcr
.fiqen
= !!(val
& GIC_CPU_CTRL_FIQEn
);
284 vmcr
.cbpr
= !!(val
& GIC_CPU_CTRL_CBPR
);
285 vmcr
.eoim
= !!(val
& GIC_CPU_CTRL_EOImodeNS
);
288 case GIC_CPU_PRIMASK
:
290 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
291 * the PMR field as GICH_VMCR.VMPriMask rather than
292 * GICC_PMR.Priority, so we expose the upper five bits of
293 * priority mask to userspace using the lower bits in the
296 vmcr
.pmr
= (val
<< GICV_PMR_PRIORITY_SHIFT
) &
297 GICV_PMR_PRIORITY_MASK
;
299 case GIC_CPU_BINPOINT
:
302 case GIC_CPU_ALIAS_BINPOINT
:
307 vgic_set_vmcr(vcpu
, &vmcr
);
310 static unsigned long vgic_mmio_read_apr(struct kvm_vcpu
*vcpu
,
311 gpa_t addr
, unsigned int len
)
313 int n
; /* which APRn is this */
315 n
= (addr
>> 2) & 0x3;
317 if (kvm_vgic_global_state
.type
== VGIC_V2
) {
318 /* GICv2 hardware systems support max. 32 groups */
321 return vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_apr
;
323 struct vgic_v3_cpu_if
*vgicv3
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
325 if (n
> vgic_v3_max_apr_idx(vcpu
))
327 /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
328 return vgicv3
->vgic_ap1r
[n
];
332 static void vgic_mmio_write_apr(struct kvm_vcpu
*vcpu
,
333 gpa_t addr
, unsigned int len
,
336 int n
; /* which APRn is this */
338 n
= (addr
>> 2) & 0x3;
340 if (kvm_vgic_global_state
.type
== VGIC_V2
) {
341 /* GICv2 hardware systems support max. 32 groups */
344 vcpu
->arch
.vgic_cpu
.vgic_v2
.vgic_apr
= val
;
346 struct vgic_v3_cpu_if
*vgicv3
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
348 if (n
> vgic_v3_max_apr_idx(vcpu
))
350 /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
351 vgicv3
->vgic_ap1r
[n
] = val
;
355 static const struct vgic_register_region vgic_v2_dist_registers
[] = {
356 REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL
,
357 vgic_mmio_read_v2_misc
, vgic_mmio_write_v2_misc
, 12,
359 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP
,
360 vgic_mmio_read_rao
, vgic_mmio_write_wi
, NULL
, NULL
, 1,
362 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET
,
363 vgic_mmio_read_enable
, vgic_mmio_write_senable
, NULL
, NULL
, 1,
365 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR
,
366 vgic_mmio_read_enable
, vgic_mmio_write_cenable
, NULL
, NULL
, 1,
368 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET
,
369 vgic_mmio_read_pending
, vgic_mmio_write_spending
, NULL
, NULL
, 1,
371 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR
,
372 vgic_mmio_read_pending
, vgic_mmio_write_cpending
, NULL
, NULL
, 1,
374 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET
,
375 vgic_mmio_read_active
, vgic_mmio_write_sactive
,
376 NULL
, vgic_mmio_uaccess_write_sactive
, 1,
378 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR
,
379 vgic_mmio_read_active
, vgic_mmio_write_cactive
,
380 NULL
, vgic_mmio_uaccess_write_cactive
, 1,
382 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI
,
383 vgic_mmio_read_priority
, vgic_mmio_write_priority
, NULL
, NULL
,
384 8, VGIC_ACCESS_32bit
| VGIC_ACCESS_8bit
),
385 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET
,
386 vgic_mmio_read_target
, vgic_mmio_write_target
, NULL
, NULL
, 8,
387 VGIC_ACCESS_32bit
| VGIC_ACCESS_8bit
),
388 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG
,
389 vgic_mmio_read_config
, vgic_mmio_write_config
, NULL
, NULL
, 2,
391 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT
,
392 vgic_mmio_read_raz
, vgic_mmio_write_sgir
, 4,
394 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR
,
395 vgic_mmio_read_sgipend
, vgic_mmio_write_sgipendc
, 16,
396 VGIC_ACCESS_32bit
| VGIC_ACCESS_8bit
),
397 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_SET
,
398 vgic_mmio_read_sgipend
, vgic_mmio_write_sgipends
, 16,
399 VGIC_ACCESS_32bit
| VGIC_ACCESS_8bit
),
402 static const struct vgic_register_region vgic_v2_cpu_registers
[] = {
403 REGISTER_DESC_WITH_LENGTH(GIC_CPU_CTRL
,
404 vgic_mmio_read_vcpuif
, vgic_mmio_write_vcpuif
, 4,
406 REGISTER_DESC_WITH_LENGTH(GIC_CPU_PRIMASK
,
407 vgic_mmio_read_vcpuif
, vgic_mmio_write_vcpuif
, 4,
409 REGISTER_DESC_WITH_LENGTH(GIC_CPU_BINPOINT
,
410 vgic_mmio_read_vcpuif
, vgic_mmio_write_vcpuif
, 4,
412 REGISTER_DESC_WITH_LENGTH(GIC_CPU_ALIAS_BINPOINT
,
413 vgic_mmio_read_vcpuif
, vgic_mmio_write_vcpuif
, 4,
415 REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO
,
416 vgic_mmio_read_apr
, vgic_mmio_write_apr
, 16,
418 REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT
,
419 vgic_mmio_read_vcpuif
, vgic_mmio_write_vcpuif
, 4,
423 unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device
*dev
)
425 dev
->regions
= vgic_v2_dist_registers
;
426 dev
->nr_regions
= ARRAY_SIZE(vgic_v2_dist_registers
);
428 kvm_iodevice_init(&dev
->dev
, &kvm_io_gic_ops
);
433 int vgic_v2_has_attr_regs(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
435 const struct vgic_register_region
*region
;
436 struct vgic_io_device iodev
;
437 struct vgic_reg_attr reg_attr
;
438 struct kvm_vcpu
*vcpu
;
442 ret
= vgic_v2_parse_attr(dev
, attr
, ®_attr
);
446 vcpu
= reg_attr
.vcpu
;
447 addr
= reg_attr
.addr
;
449 switch (attr
->group
) {
450 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
451 iodev
.regions
= vgic_v2_dist_registers
;
452 iodev
.nr_regions
= ARRAY_SIZE(vgic_v2_dist_registers
);
455 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
:
456 iodev
.regions
= vgic_v2_cpu_registers
;
457 iodev
.nr_regions
= ARRAY_SIZE(vgic_v2_cpu_registers
);
464 /* We only support aligned 32-bit accesses. */
468 region
= vgic_get_mmio_region(vcpu
, &iodev
, addr
, sizeof(u32
));
475 int vgic_v2_cpuif_uaccess(struct kvm_vcpu
*vcpu
, bool is_write
,
476 int offset
, u32
*val
)
478 struct vgic_io_device dev
= {
479 .regions
= vgic_v2_cpu_registers
,
480 .nr_regions
= ARRAY_SIZE(vgic_v2_cpu_registers
),
481 .iodev_type
= IODEV_CPUIF
,
484 return vgic_uaccess(vcpu
, &dev
, is_write
, offset
, val
);
487 int vgic_v2_dist_uaccess(struct kvm_vcpu
*vcpu
, bool is_write
,
488 int offset
, u32
*val
)
490 struct vgic_io_device dev
= {
491 .regions
= vgic_v2_dist_registers
,
492 .nr_regions
= ARRAY_SIZE(vgic_v2_dist_registers
),
493 .iodev_type
= IODEV_DIST
,
496 return vgic_uaccess(vcpu
, &dev
, is_write
, offset
, val
);