2 * GICv3 distributor and redistributor emulation
4 * GICv3 emulation is currently only supported on a GICv3 host (because
5 * we rely on the hardware's CPU interface virtualization support), but
6 * supports both hardware with or without the optional GICv2 backwards
7 * compatibility features.
9 * Limitations of the emulation:
10 * (RAZ/WI: read as zero, write ignore, RAO/WI: read as one, write ignore)
11 * - We do not support LPIs (yet). TYPER.LPIS is reported as 0 and is RAZ/WI.
12 * - We do not support the message based interrupts (MBIs) triggered by
13 * writes to the GICD_{SET,CLR}SPI_* registers. TYPER.MBIS is reported as 0.
14 * - We do not support the (optional) backwards compatibility feature.
15 * GICD_CTLR.ARE resets to 1 and is RAO/WI. If the _host_ GIC supports
16 * the compatiblity feature, you can use a GICv2 in the guest, though.
17 * - We only support a single security state. GICD_CTLR.DS is 1 and is RAO/WI.
18 * - Priorities are not emulated (same as the GICv2 emulation). Linux
19 * as a guest is fine with this, because it does not use priorities.
20 * - We only support Group1 interrupts. Again Linux uses only those.
22 * Copyright (C) 2014 ARM Ltd.
23 * Author: Andre Przywara <andre.przywara@arm.com>
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License version 2 as
27 * published by the Free Software Foundation.
29 * This program is distributed in the hope that it will be useful,
30 * but WITHOUT ANY WARRANTY; without even the implied warranty of
31 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
32 * GNU General Public License for more details.
34 * You should have received a copy of the GNU General Public License
35 * along with this program. If not, see <http://www.gnu.org/licenses/>.
38 #include <linux/cpu.h>
39 #include <linux/kvm.h>
40 #include <linux/kvm_host.h>
41 #include <linux/interrupt.h>
43 #include <linux/irqchip/arm-gic-v3.h>
44 #include <kvm/arm_vgic.h>
46 #include <asm/kvm_emulate.h>
47 #include <asm/kvm_arm.h>
48 #include <asm/kvm_mmu.h>
52 static bool handle_mmio_rao_wi(struct kvm_vcpu
*vcpu
,
53 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
57 vgic_reg_access(mmio
, ®
, offset
,
58 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
63 static bool handle_mmio_ctlr(struct kvm_vcpu
*vcpu
,
64 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
69 * Force ARE and DS to 1, the guest cannot change this.
70 * For the time being we only support Group1 interrupts.
72 if (vcpu
->kvm
->arch
.vgic
.enabled
)
73 reg
= GICD_CTLR_ENABLE_SS_G1
;
74 reg
|= GICD_CTLR_ARE_NS
| GICD_CTLR_DS
;
76 vgic_reg_access(mmio
, ®
, offset
,
77 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
79 vcpu
->kvm
->arch
.vgic
.enabled
= !!(reg
& GICD_CTLR_ENABLE_SS_G1
);
80 vgic_update_state(vcpu
->kvm
);
87 * As this implementation does not provide compatibility
88 * with GICv2 (ARE==1), we report zero CPUs in bits [5..7].
89 * Also LPIs and MBIs are not supported, so we set the respective bits to 0.
90 * Also we report at most 2**10=1024 interrupt IDs (to match 1024 SPIs).
92 #define INTERRUPT_ID_BITS 10
93 static bool handle_mmio_typer(struct kvm_vcpu
*vcpu
,
94 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
98 reg
= (min(vcpu
->kvm
->arch
.vgic
.nr_irqs
, 1024) >> 5) - 1;
100 reg
|= (INTERRUPT_ID_BITS
- 1) << 19;
102 vgic_reg_access(mmio
, ®
, offset
,
103 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
108 static bool handle_mmio_iidr(struct kvm_vcpu
*vcpu
,
109 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
113 reg
= (PRODUCT_ID_KVM
<< 24) | (IMPLEMENTER_ARM
<< 0);
114 vgic_reg_access(mmio
, ®
, offset
,
115 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
120 static bool handle_mmio_set_enable_reg_dist(struct kvm_vcpu
*vcpu
,
121 struct kvm_exit_mmio
*mmio
,
124 if (likely(offset
>= VGIC_NR_PRIVATE_IRQS
/ 8))
125 return vgic_handle_enable_reg(vcpu
->kvm
, mmio
, offset
,
127 ACCESS_WRITE_SETBIT
);
129 vgic_reg_access(mmio
, NULL
, offset
,
130 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
134 static bool handle_mmio_clear_enable_reg_dist(struct kvm_vcpu
*vcpu
,
135 struct kvm_exit_mmio
*mmio
,
138 if (likely(offset
>= VGIC_NR_PRIVATE_IRQS
/ 8))
139 return vgic_handle_enable_reg(vcpu
->kvm
, mmio
, offset
,
141 ACCESS_WRITE_CLEARBIT
);
143 vgic_reg_access(mmio
, NULL
, offset
,
144 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
148 static bool handle_mmio_set_pending_reg_dist(struct kvm_vcpu
*vcpu
,
149 struct kvm_exit_mmio
*mmio
,
152 if (likely(offset
>= VGIC_NR_PRIVATE_IRQS
/ 8))
153 return vgic_handle_set_pending_reg(vcpu
->kvm
, mmio
, offset
,
156 vgic_reg_access(mmio
, NULL
, offset
,
157 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
161 static bool handle_mmio_clear_pending_reg_dist(struct kvm_vcpu
*vcpu
,
162 struct kvm_exit_mmio
*mmio
,
165 if (likely(offset
>= VGIC_NR_PRIVATE_IRQS
/ 8))
166 return vgic_handle_clear_pending_reg(vcpu
->kvm
, mmio
, offset
,
169 vgic_reg_access(mmio
, NULL
, offset
,
170 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
174 static bool handle_mmio_set_active_reg_dist(struct kvm_vcpu
*vcpu
,
175 struct kvm_exit_mmio
*mmio
,
178 if (likely(offset
>= VGIC_NR_PRIVATE_IRQS
/ 8))
179 return vgic_handle_set_active_reg(vcpu
->kvm
, mmio
, offset
,
182 vgic_reg_access(mmio
, NULL
, offset
,
183 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
187 static bool handle_mmio_clear_active_reg_dist(struct kvm_vcpu
*vcpu
,
188 struct kvm_exit_mmio
*mmio
,
191 if (likely(offset
>= VGIC_NR_PRIVATE_IRQS
/ 8))
192 return vgic_handle_clear_active_reg(vcpu
->kvm
, mmio
, offset
,
195 vgic_reg_access(mmio
, NULL
, offset
,
196 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
200 static bool handle_mmio_priority_reg_dist(struct kvm_vcpu
*vcpu
,
201 struct kvm_exit_mmio
*mmio
,
206 if (unlikely(offset
< VGIC_NR_PRIVATE_IRQS
)) {
207 vgic_reg_access(mmio
, NULL
, offset
,
208 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
212 reg
= vgic_bytemap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_priority
,
213 vcpu
->vcpu_id
, offset
);
214 vgic_reg_access(mmio
, reg
, offset
,
215 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
219 static bool handle_mmio_cfg_reg_dist(struct kvm_vcpu
*vcpu
,
220 struct kvm_exit_mmio
*mmio
,
225 if (unlikely(offset
< VGIC_NR_PRIVATE_IRQS
/ 4)) {
226 vgic_reg_access(mmio
, NULL
, offset
,
227 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
231 reg
= vgic_bitmap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_cfg
,
232 vcpu
->vcpu_id
, offset
>> 1);
234 return vgic_handle_cfg_reg(reg
, mmio
, offset
);
238 * We use a compressed version of the MPIDR (all 32 bits in one 32-bit word)
239 * when we store the target MPIDR written by the guest.
241 static u32
compress_mpidr(unsigned long mpidr
)
245 ret
= MPIDR_AFFINITY_LEVEL(mpidr
, 0);
246 ret
|= MPIDR_AFFINITY_LEVEL(mpidr
, 1) << 8;
247 ret
|= MPIDR_AFFINITY_LEVEL(mpidr
, 2) << 16;
248 ret
|= MPIDR_AFFINITY_LEVEL(mpidr
, 3) << 24;
253 static unsigned long uncompress_mpidr(u32 value
)
257 mpidr
= ((value
>> 0) & 0xFF) << MPIDR_LEVEL_SHIFT(0);
258 mpidr
|= ((value
>> 8) & 0xFF) << MPIDR_LEVEL_SHIFT(1);
259 mpidr
|= ((value
>> 16) & 0xFF) << MPIDR_LEVEL_SHIFT(2);
260 mpidr
|= (u64
)((value
>> 24) & 0xFF) << MPIDR_LEVEL_SHIFT(3);
266 * Lookup the given MPIDR value to get the vcpu_id (if there is one)
267 * and store that in the irq_spi_cpu[] array.
268 * This limits the number of VCPUs to 255 for now, extending the data
269 * type (or storing kvm_vcpu pointers) should lift the limit.
270 * Store the original MPIDR value in an extra array to support read-as-written.
271 * Unallocated MPIDRs are translated to a special value and caught
272 * before any array accesses.
274 static bool handle_mmio_route_reg(struct kvm_vcpu
*vcpu
,
275 struct kvm_exit_mmio
*mmio
,
278 struct kvm
*kvm
= vcpu
->kvm
;
279 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
283 unsigned long *bmap
, mpidr
;
286 * The upper 32 bits of each 64 bit register are zero,
287 * as we don't support Aff3.
290 vgic_reg_access(mmio
, NULL
, offset
,
291 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
295 /* This region only covers SPIs, so no handling of private IRQs here. */
298 /* get the stored MPIDR for this IRQ */
299 mpidr
= uncompress_mpidr(dist
->irq_spi_mpidr
[spi
]);
302 vgic_reg_access(mmio
, ®
, offset
,
303 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
309 * Now clear the currently assigned vCPU from the map, making room
310 * for the new one to be written below
312 vcpu
= kvm_mpidr_to_vcpu(kvm
, mpidr
);
314 vcpu_id
= vcpu
->vcpu_id
;
315 bmap
= vgic_bitmap_get_shared_map(&dist
->irq_spi_target
[vcpu_id
]);
316 __clear_bit(spi
, bmap
);
319 dist
->irq_spi_mpidr
[spi
] = compress_mpidr(reg
);
320 vcpu
= kvm_mpidr_to_vcpu(kvm
, reg
& MPIDR_HWID_BITMASK
);
323 * The spec says that non-existent MPIDR values should not be
324 * forwarded to any existent (v)CPU, but should be able to become
325 * pending anyway. We simply keep the irq_spi_target[] array empty, so
326 * the interrupt will never be injected.
327 * irq_spi_cpu[irq] gets a magic value in this case.
330 vcpu_id
= vcpu
->vcpu_id
;
331 dist
->irq_spi_cpu
[spi
] = vcpu_id
;
332 bmap
= vgic_bitmap_get_shared_map(&dist
->irq_spi_target
[vcpu_id
]);
333 __set_bit(spi
, bmap
);
335 dist
->irq_spi_cpu
[spi
] = VCPU_NOT_ALLOCATED
;
338 vgic_update_state(kvm
);
344 * We should be careful about promising too much when a guest reads
345 * this register. Don't claim to be like any hardware implementation,
346 * but just report the GIC as version 3 - which is what a Linux guest
349 static bool handle_mmio_idregs(struct kvm_vcpu
*vcpu
,
350 struct kvm_exit_mmio
*mmio
,
355 switch (offset
+ GICD_IDREGS
) {
361 vgic_reg_access(mmio
, ®
, offset
,
362 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
367 static const struct vgic_io_range vgic_v3_dist_ranges
[] = {
372 .handle_mmio
= handle_mmio_ctlr
,
378 .handle_mmio
= handle_mmio_typer
,
384 .handle_mmio
= handle_mmio_iidr
,
387 /* this register is optional, it is RAZ/WI if not implemented */
388 .base
= GICD_STATUSR
,
391 .handle_mmio
= handle_mmio_raz_wi
,
394 /* this write only register is WI when TYPER.MBIS=0 */
395 .base
= GICD_SETSPI_NSR
,
398 .handle_mmio
= handle_mmio_raz_wi
,
401 /* this write only register is WI when TYPER.MBIS=0 */
402 .base
= GICD_CLRSPI_NSR
,
405 .handle_mmio
= handle_mmio_raz_wi
,
408 /* this is RAZ/WI when DS=1 */
409 .base
= GICD_SETSPI_SR
,
412 .handle_mmio
= handle_mmio_raz_wi
,
415 /* this is RAZ/WI when DS=1 */
416 .base
= GICD_CLRSPI_SR
,
419 .handle_mmio
= handle_mmio_raz_wi
,
422 .base
= GICD_IGROUPR
,
425 .handle_mmio
= handle_mmio_rao_wi
,
428 .base
= GICD_ISENABLER
,
431 .handle_mmio
= handle_mmio_set_enable_reg_dist
,
434 .base
= GICD_ICENABLER
,
437 .handle_mmio
= handle_mmio_clear_enable_reg_dist
,
440 .base
= GICD_ISPENDR
,
443 .handle_mmio
= handle_mmio_set_pending_reg_dist
,
446 .base
= GICD_ICPENDR
,
449 .handle_mmio
= handle_mmio_clear_pending_reg_dist
,
452 .base
= GICD_ISACTIVER
,
455 .handle_mmio
= handle_mmio_set_active_reg_dist
,
458 .base
= GICD_ICACTIVER
,
461 .handle_mmio
= handle_mmio_clear_active_reg_dist
,
464 .base
= GICD_IPRIORITYR
,
467 .handle_mmio
= handle_mmio_priority_reg_dist
,
470 /* TARGETSRn is RES0 when ARE=1 */
471 .base
= GICD_ITARGETSR
,
474 .handle_mmio
= handle_mmio_raz_wi
,
480 .handle_mmio
= handle_mmio_cfg_reg_dist
,
483 /* this is RAZ/WI when DS=1 */
484 .base
= GICD_IGRPMODR
,
487 .handle_mmio
= handle_mmio_raz_wi
,
490 /* this is RAZ/WI when DS=1 */
494 .handle_mmio
= handle_mmio_raz_wi
,
497 /* this is RAZ/WI when ARE=1 */
500 .handle_mmio
= handle_mmio_raz_wi
,
503 /* this is RAZ/WI when ARE=1 */
504 .base
= GICD_CPENDSGIR
,
506 .handle_mmio
= handle_mmio_raz_wi
,
509 /* this is RAZ/WI when ARE=1 */
510 .base
= GICD_SPENDSGIR
,
512 .handle_mmio
= handle_mmio_raz_wi
,
515 .base
= GICD_IROUTER
+ 0x100,
518 .handle_mmio
= handle_mmio_route_reg
,
524 .handle_mmio
= handle_mmio_idregs
,
529 static bool handle_mmio_ctlr_redist(struct kvm_vcpu
*vcpu
,
530 struct kvm_exit_mmio
*mmio
,
533 /* since we don't support LPIs, this register is zero for now */
534 vgic_reg_access(mmio
, NULL
, offset
,
535 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
539 static bool handle_mmio_typer_redist(struct kvm_vcpu
*vcpu
,
540 struct kvm_exit_mmio
*mmio
,
545 struct kvm_vcpu
*redist_vcpu
= mmio
->private;
546 int target_vcpu_id
= redist_vcpu
->vcpu_id
;
548 /* the upper 32 bits contain the affinity value */
549 if ((offset
& ~3) == 4) {
550 mpidr
= kvm_vcpu_get_mpidr_aff(redist_vcpu
);
551 reg
= compress_mpidr(mpidr
);
553 vgic_reg_access(mmio
, ®
, offset
,
554 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
558 reg
= redist_vcpu
->vcpu_id
<< 8;
559 if (target_vcpu_id
== atomic_read(&vcpu
->kvm
->online_vcpus
) - 1)
560 reg
|= GICR_TYPER_LAST
;
561 vgic_reg_access(mmio
, ®
, offset
,
562 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
566 static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu
*vcpu
,
567 struct kvm_exit_mmio
*mmio
,
570 struct kvm_vcpu
*redist_vcpu
= mmio
->private;
572 return vgic_handle_enable_reg(vcpu
->kvm
, mmio
, offset
,
573 redist_vcpu
->vcpu_id
,
574 ACCESS_WRITE_SETBIT
);
577 static bool handle_mmio_clear_enable_reg_redist(struct kvm_vcpu
*vcpu
,
578 struct kvm_exit_mmio
*mmio
,
581 struct kvm_vcpu
*redist_vcpu
= mmio
->private;
583 return vgic_handle_enable_reg(vcpu
->kvm
, mmio
, offset
,
584 redist_vcpu
->vcpu_id
,
585 ACCESS_WRITE_CLEARBIT
);
588 static bool handle_mmio_set_active_reg_redist(struct kvm_vcpu
*vcpu
,
589 struct kvm_exit_mmio
*mmio
,
592 struct kvm_vcpu
*redist_vcpu
= mmio
->private;
594 return vgic_handle_set_active_reg(vcpu
->kvm
, mmio
, offset
,
595 redist_vcpu
->vcpu_id
);
598 static bool handle_mmio_clear_active_reg_redist(struct kvm_vcpu
*vcpu
,
599 struct kvm_exit_mmio
*mmio
,
602 struct kvm_vcpu
*redist_vcpu
= mmio
->private;
604 return vgic_handle_clear_active_reg(vcpu
->kvm
, mmio
, offset
,
605 redist_vcpu
->vcpu_id
);
608 static bool handle_mmio_set_pending_reg_redist(struct kvm_vcpu
*vcpu
,
609 struct kvm_exit_mmio
*mmio
,
612 struct kvm_vcpu
*redist_vcpu
= mmio
->private;
614 return vgic_handle_set_pending_reg(vcpu
->kvm
, mmio
, offset
,
615 redist_vcpu
->vcpu_id
);
618 static bool handle_mmio_clear_pending_reg_redist(struct kvm_vcpu
*vcpu
,
619 struct kvm_exit_mmio
*mmio
,
622 struct kvm_vcpu
*redist_vcpu
= mmio
->private;
624 return vgic_handle_clear_pending_reg(vcpu
->kvm
, mmio
, offset
,
625 redist_vcpu
->vcpu_id
);
628 static bool handle_mmio_priority_reg_redist(struct kvm_vcpu
*vcpu
,
629 struct kvm_exit_mmio
*mmio
,
632 struct kvm_vcpu
*redist_vcpu
= mmio
->private;
635 reg
= vgic_bytemap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_priority
,
636 redist_vcpu
->vcpu_id
, offset
);
637 vgic_reg_access(mmio
, reg
, offset
,
638 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
642 static bool handle_mmio_cfg_reg_redist(struct kvm_vcpu
*vcpu
,
643 struct kvm_exit_mmio
*mmio
,
646 struct kvm_vcpu
*redist_vcpu
= mmio
->private;
648 u32
*reg
= vgic_bitmap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_cfg
,
649 redist_vcpu
->vcpu_id
, offset
>> 1);
651 return vgic_handle_cfg_reg(reg
, mmio
, offset
);
654 #define SGI_base(x) ((x) + SZ_64K)
656 static const struct vgic_io_range vgic_redist_ranges
[] = {
661 .handle_mmio
= handle_mmio_ctlr_redist
,
667 .handle_mmio
= handle_mmio_typer_redist
,
673 .handle_mmio
= handle_mmio_iidr
,
679 .handle_mmio
= handle_mmio_raz_wi
,
685 .handle_mmio
= handle_mmio_idregs
,
688 .base
= SGI_base(GICR_IGROUPR0
),
691 .handle_mmio
= handle_mmio_rao_wi
,
694 .base
= SGI_base(GICR_ISENABLER0
),
697 .handle_mmio
= handle_mmio_set_enable_reg_redist
,
700 .base
= SGI_base(GICR_ICENABLER0
),
703 .handle_mmio
= handle_mmio_clear_enable_reg_redist
,
706 .base
= SGI_base(GICR_ISPENDR0
),
709 .handle_mmio
= handle_mmio_set_pending_reg_redist
,
712 .base
= SGI_base(GICR_ICPENDR0
),
715 .handle_mmio
= handle_mmio_clear_pending_reg_redist
,
718 .base
= SGI_base(GICR_ISACTIVER0
),
721 .handle_mmio
= handle_mmio_set_active_reg_redist
,
724 .base
= SGI_base(GICR_ICACTIVER0
),
727 .handle_mmio
= handle_mmio_clear_active_reg_redist
,
730 .base
= SGI_base(GICR_IPRIORITYR0
),
733 .handle_mmio
= handle_mmio_priority_reg_redist
,
736 .base
= SGI_base(GICR_ICFGR0
),
739 .handle_mmio
= handle_mmio_cfg_reg_redist
,
742 .base
= SGI_base(GICR_IGRPMODR0
),
745 .handle_mmio
= handle_mmio_raz_wi
,
748 .base
= SGI_base(GICR_NSACR
),
750 .handle_mmio
= handle_mmio_raz_wi
,
755 static bool vgic_v3_queue_sgi(struct kvm_vcpu
*vcpu
, int irq
)
757 if (vgic_queue_irq(vcpu
, 0, irq
)) {
758 vgic_dist_irq_clear_pending(vcpu
, irq
);
759 vgic_cpu_irq_clear(vcpu
, irq
);
766 static int vgic_v3_map_resources(struct kvm
*kvm
,
767 const struct vgic_params
*params
)
770 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
771 gpa_t rdbase
= dist
->vgic_redist_base
;
772 struct vgic_io_device
*iodevs
= NULL
;
775 if (!irqchip_in_kernel(kvm
))
778 mutex_lock(&kvm
->lock
);
783 if (IS_VGIC_ADDR_UNDEF(dist
->vgic_dist_base
) ||
784 IS_VGIC_ADDR_UNDEF(dist
->vgic_redist_base
)) {
785 kvm_err("Need to set vgic distributor addresses first\n");
791 * For a VGICv3 we require the userland to explicitly initialize
792 * the VGIC before we need to use it.
794 if (!vgic_initialized(kvm
)) {
799 ret
= vgic_register_kvm_io_dev(kvm
, dist
->vgic_dist_base
,
800 GIC_V3_DIST_SIZE
, vgic_v3_dist_ranges
,
801 -1, &dist
->dist_iodev
);
805 iodevs
= kcalloc(dist
->nr_cpus
, sizeof(iodevs
[0]), GFP_KERNEL
);
811 for (i
= 0; i
< dist
->nr_cpus
; i
++) {
812 ret
= vgic_register_kvm_io_dev(kvm
, rdbase
,
813 SZ_128K
, vgic_redist_ranges
,
817 rdbase
+= GIC_V3_REDIST_SIZE
;
820 dist
->redist_iodevs
= iodevs
;
825 kvm_io_bus_unregister_dev(kvm
, KVM_MMIO_BUS
, &dist
->dist_iodev
.dev
);
827 for (i
= 0; i
< dist
->nr_cpus
; i
++) {
828 if (iodevs
[i
].dev
.ops
)
829 kvm_io_bus_unregister_dev(kvm
, KVM_MMIO_BUS
,
836 kvm_vgic_destroy(kvm
);
837 mutex_unlock(&kvm
->lock
);
841 static int vgic_v3_init_model(struct kvm
*kvm
)
845 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
846 int nr_spis
= dist
->nr_irqs
- VGIC_NR_PRIVATE_IRQS
;
848 dist
->irq_spi_mpidr
= kcalloc(nr_spis
, sizeof(dist
->irq_spi_mpidr
[0]),
851 if (!dist
->irq_spi_mpidr
)
854 /* Initialize the target VCPUs for each IRQ to VCPU 0 */
855 mpidr
= compress_mpidr(kvm_vcpu_get_mpidr_aff(kvm_get_vcpu(kvm
, 0)));
856 for (i
= VGIC_NR_PRIVATE_IRQS
; i
< dist
->nr_irqs
; i
++) {
857 dist
->irq_spi_cpu
[i
- VGIC_NR_PRIVATE_IRQS
] = 0;
858 dist
->irq_spi_mpidr
[i
- VGIC_NR_PRIVATE_IRQS
] = mpidr
;
859 vgic_bitmap_set_irq_val(dist
->irq_spi_target
, 0, i
, 1);
865 /* GICv3 does not keep track of SGI sources anymore. */
866 static void vgic_v3_add_sgi_source(struct kvm_vcpu
*vcpu
, int irq
, int source
)
870 void vgic_v3_init_emulation(struct kvm
*kvm
)
872 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
874 dist
->vm_ops
.queue_sgi
= vgic_v3_queue_sgi
;
875 dist
->vm_ops
.add_sgi_source
= vgic_v3_add_sgi_source
;
876 dist
->vm_ops
.init_model
= vgic_v3_init_model
;
877 dist
->vm_ops
.map_resources
= vgic_v3_map_resources
;
879 kvm
->arch
.max_vcpus
= KVM_MAX_VCPUS
;
883 * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
884 * generation register ICC_SGI1R_EL1) with a given VCPU.
885 * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
888 static int match_mpidr(u64 sgi_aff
, u16 sgi_cpu_mask
, struct kvm_vcpu
*vcpu
)
890 unsigned long affinity
;
894 * Split the current VCPU's MPIDR into affinity level 0 and the
895 * rest as this is what we have to compare against.
897 affinity
= kvm_vcpu_get_mpidr_aff(vcpu
);
898 level0
= MPIDR_AFFINITY_LEVEL(affinity
, 0);
899 affinity
&= ~MPIDR_LEVEL_MASK
;
901 /* bail out if the upper three levels don't match */
902 if (sgi_aff
!= affinity
)
905 /* Is this VCPU's bit set in the mask ? */
906 if (!(sgi_cpu_mask
& BIT(level0
)))
912 #define SGI_AFFINITY_LEVEL(reg, level) \
913 ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
914 >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
917 * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
918 * @vcpu: The VCPU requesting a SGI
919 * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU
921 * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
922 * This will trap in sys_regs.c and call this function.
923 * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
924 * target processors as well as a bitmask of 16 Aff0 CPUs.
925 * If the interrupt routing mode bit is not set, we iterate over all VCPUs to
926 * check for matching ones. If this bit is set, we signal all, but not the
929 void vgic_v3_dispatch_sgi(struct kvm_vcpu
*vcpu
, u64 reg
)
931 struct kvm
*kvm
= vcpu
->kvm
;
932 struct kvm_vcpu
*c_vcpu
;
933 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
937 int vcpu_id
= vcpu
->vcpu_id
;
941 sgi
= (reg
& ICC_SGI1R_SGI_ID_MASK
) >> ICC_SGI1R_SGI_ID_SHIFT
;
942 broadcast
= reg
& BIT(ICC_SGI1R_IRQ_ROUTING_MODE_BIT
);
943 target_cpus
= (reg
& ICC_SGI1R_TARGET_LIST_MASK
) >> ICC_SGI1R_TARGET_LIST_SHIFT
;
944 mpidr
= SGI_AFFINITY_LEVEL(reg
, 3);
945 mpidr
|= SGI_AFFINITY_LEVEL(reg
, 2);
946 mpidr
|= SGI_AFFINITY_LEVEL(reg
, 1);
949 * We take the dist lock here, because we come from the sysregs
950 * code path and not from the MMIO one (which already takes the lock).
952 spin_lock(&dist
->lock
);
955 * We iterate over all VCPUs to find the MPIDRs matching the request.
956 * If we have handled one CPU, we clear it's bit to detect early
957 * if we are already finished. This avoids iterating through all
958 * VCPUs when most of the times we just signal a single VCPU.
960 kvm_for_each_vcpu(c
, c_vcpu
, kvm
) {
962 /* Exit early if we have dealt with all requested CPUs */
963 if (!broadcast
&& target_cpus
== 0)
966 /* Don't signal the calling VCPU */
967 if (broadcast
&& c
== vcpu_id
)
973 level0
= match_mpidr(mpidr
, target_cpus
, c_vcpu
);
977 /* remove this matching VCPU from the mask */
978 target_cpus
&= ~BIT(level0
);
981 /* Flag the SGI as pending */
982 vgic_dist_irq_set_pending(c_vcpu
, sgi
);
984 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi
, vcpu_id
, c
);
987 vgic_update_state(vcpu
->kvm
);
988 spin_unlock(&dist
->lock
);
990 vgic_kick_vcpus(vcpu
->kvm
);
993 static int vgic_v3_create(struct kvm_device
*dev
, u32 type
)
995 return kvm_vgic_create(dev
->kvm
, type
);
998 static void vgic_v3_destroy(struct kvm_device
*dev
)
1003 static int vgic_v3_set_attr(struct kvm_device
*dev
,
1004 struct kvm_device_attr
*attr
)
1008 ret
= vgic_set_common_attr(dev
, attr
);
1012 switch (attr
->group
) {
1013 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
1014 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
:
1021 static int vgic_v3_get_attr(struct kvm_device
*dev
,
1022 struct kvm_device_attr
*attr
)
1026 ret
= vgic_get_common_attr(dev
, attr
);
1030 switch (attr
->group
) {
1031 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
1032 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
:
1039 static int vgic_v3_has_attr(struct kvm_device
*dev
,
1040 struct kvm_device_attr
*attr
)
1042 switch (attr
->group
) {
1043 case KVM_DEV_ARM_VGIC_GRP_ADDR
:
1044 switch (attr
->attr
) {
1045 case KVM_VGIC_V2_ADDR_TYPE_DIST
:
1046 case KVM_VGIC_V2_ADDR_TYPE_CPU
:
1048 case KVM_VGIC_V3_ADDR_TYPE_DIST
:
1049 case KVM_VGIC_V3_ADDR_TYPE_REDIST
:
1053 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
1054 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
:
1056 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS
:
1058 case KVM_DEV_ARM_VGIC_GRP_CTRL
:
1059 switch (attr
->attr
) {
1060 case KVM_DEV_ARM_VGIC_CTRL_INIT
:
1067 struct kvm_device_ops kvm_arm_vgic_v3_ops
= {
1068 .name
= "kvm-arm-vgic-v3",
1069 .create
= vgic_v3_create
,
1070 .destroy
= vgic_v3_destroy
,
1071 .set_attr
= vgic_v3_set_attr
,
1072 .get_attr
= vgic_v3_get_attr
,
1073 .has_attr
= vgic_v3_has_attr
,