2 * GICv3 distributor and redistributor emulation
4 * GICv3 emulation is currently only supported on a GICv3 host (because
5 * we rely on the hardware's CPU interface virtualization support), but
6 * supports both hardware with or without the optional GICv2 backwards
7 * compatibility features.
9 * Limitations of the emulation:
10 * (RAZ/WI: read as zero, write ignore, RAO/WI: read as one, write ignore)
11 * - We do not support LPIs (yet). TYPER.LPIS is reported as 0 and is RAZ/WI.
12 * - We do not support the message based interrupts (MBIs) triggered by
13 * writes to the GICD_{SET,CLR}SPI_* registers. TYPER.MBIS is reported as 0.
14 * - We do not support the (optional) backwards compatibility feature.
15 * GICD_CTLR.ARE resets to 1 and is RAO/WI. If the _host_ GIC supports
16 * the compatiblity feature, you can use a GICv2 in the guest, though.
17 * - We only support a single security state. GICD_CTLR.DS is 1 and is RAO/WI.
18 * - Priorities are not emulated (same as the GICv2 emulation). Linux
19 * as a guest is fine with this, because it does not use priorities.
20 * - We only support Group1 interrupts. Again Linux uses only those.
22 * Copyright (C) 2014 ARM Ltd.
23 * Author: Andre Przywara <andre.przywara@arm.com>
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License version 2 as
27 * published by the Free Software Foundation.
29 * This program is distributed in the hope that it will be useful,
30 * but WITHOUT ANY WARRANTY; without even the implied warranty of
31 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
32 * GNU General Public License for more details.
34 * You should have received a copy of the GNU General Public License
35 * along with this program. If not, see <http://www.gnu.org/licenses/>.
38 #include <linux/cpu.h>
39 #include <linux/kvm.h>
40 #include <linux/kvm_host.h>
41 #include <linux/interrupt.h>
43 #include <linux/irqchip/arm-gic-v3.h>
44 #include <kvm/arm_vgic.h>
46 #include <asm/kvm_emulate.h>
47 #include <asm/kvm_arm.h>
48 #include <asm/kvm_mmu.h>
52 static bool handle_mmio_rao_wi(struct kvm_vcpu
*vcpu
,
53 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
57 vgic_reg_access(mmio
, ®
, offset
,
58 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
63 static bool handle_mmio_ctlr(struct kvm_vcpu
*vcpu
,
64 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
69 * Force ARE and DS to 1, the guest cannot change this.
70 * For the time being we only support Group1 interrupts.
72 if (vcpu
->kvm
->arch
.vgic
.enabled
)
73 reg
= GICD_CTLR_ENABLE_SS_G1
;
74 reg
|= GICD_CTLR_ARE_NS
| GICD_CTLR_DS
;
76 vgic_reg_access(mmio
, ®
, offset
,
77 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
79 if (reg
& GICD_CTLR_ENABLE_SS_G0
)
80 kvm_info("guest tried to enable unsupported Group0 interrupts\n");
81 vcpu
->kvm
->arch
.vgic
.enabled
= !!(reg
& GICD_CTLR_ENABLE_SS_G1
);
82 vgic_update_state(vcpu
->kvm
);
89 * As this implementation does not provide compatibility
90 * with GICv2 (ARE==1), we report zero CPUs in bits [5..7].
91 * Also LPIs and MBIs are not supported, so we set the respective bits to 0.
92 * Also we report at most 2**10=1024 interrupt IDs (to match 1024 SPIs).
94 #define INTERRUPT_ID_BITS 10
95 static bool handle_mmio_typer(struct kvm_vcpu
*vcpu
,
96 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
100 reg
= (min(vcpu
->kvm
->arch
.vgic
.nr_irqs
, 1024) >> 5) - 1;
102 reg
|= (INTERRUPT_ID_BITS
- 1) << 19;
104 vgic_reg_access(mmio
, ®
, offset
,
105 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
110 static bool handle_mmio_iidr(struct kvm_vcpu
*vcpu
,
111 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
115 reg
= (PRODUCT_ID_KVM
<< 24) | (IMPLEMENTER_ARM
<< 0);
116 vgic_reg_access(mmio
, ®
, offset
,
117 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
122 static bool handle_mmio_set_enable_reg_dist(struct kvm_vcpu
*vcpu
,
123 struct kvm_exit_mmio
*mmio
,
126 if (likely(offset
>= VGIC_NR_PRIVATE_IRQS
/ 8))
127 return vgic_handle_enable_reg(vcpu
->kvm
, mmio
, offset
,
129 ACCESS_WRITE_SETBIT
);
131 vgic_reg_access(mmio
, NULL
, offset
,
132 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
136 static bool handle_mmio_clear_enable_reg_dist(struct kvm_vcpu
*vcpu
,
137 struct kvm_exit_mmio
*mmio
,
140 if (likely(offset
>= VGIC_NR_PRIVATE_IRQS
/ 8))
141 return vgic_handle_enable_reg(vcpu
->kvm
, mmio
, offset
,
143 ACCESS_WRITE_CLEARBIT
);
145 vgic_reg_access(mmio
, NULL
, offset
,
146 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
150 static bool handle_mmio_set_pending_reg_dist(struct kvm_vcpu
*vcpu
,
151 struct kvm_exit_mmio
*mmio
,
154 if (likely(offset
>= VGIC_NR_PRIVATE_IRQS
/ 8))
155 return vgic_handle_set_pending_reg(vcpu
->kvm
, mmio
, offset
,
158 vgic_reg_access(mmio
, NULL
, offset
,
159 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
163 static bool handle_mmio_clear_pending_reg_dist(struct kvm_vcpu
*vcpu
,
164 struct kvm_exit_mmio
*mmio
,
167 if (likely(offset
>= VGIC_NR_PRIVATE_IRQS
/ 8))
168 return vgic_handle_clear_pending_reg(vcpu
->kvm
, mmio
, offset
,
171 vgic_reg_access(mmio
, NULL
, offset
,
172 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
176 static bool handle_mmio_priority_reg_dist(struct kvm_vcpu
*vcpu
,
177 struct kvm_exit_mmio
*mmio
,
182 if (unlikely(offset
< VGIC_NR_PRIVATE_IRQS
)) {
183 vgic_reg_access(mmio
, NULL
, offset
,
184 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
188 reg
= vgic_bytemap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_priority
,
189 vcpu
->vcpu_id
, offset
);
190 vgic_reg_access(mmio
, reg
, offset
,
191 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
195 static bool handle_mmio_cfg_reg_dist(struct kvm_vcpu
*vcpu
,
196 struct kvm_exit_mmio
*mmio
,
201 if (unlikely(offset
< VGIC_NR_PRIVATE_IRQS
/ 4)) {
202 vgic_reg_access(mmio
, NULL
, offset
,
203 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
207 reg
= vgic_bitmap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_cfg
,
208 vcpu
->vcpu_id
, offset
>> 1);
210 return vgic_handle_cfg_reg(reg
, mmio
, offset
);
214 * We use a compressed version of the MPIDR (all 32 bits in one 32-bit word)
215 * when we store the target MPIDR written by the guest.
217 static u32
compress_mpidr(unsigned long mpidr
)
221 ret
= MPIDR_AFFINITY_LEVEL(mpidr
, 0);
222 ret
|= MPIDR_AFFINITY_LEVEL(mpidr
, 1) << 8;
223 ret
|= MPIDR_AFFINITY_LEVEL(mpidr
, 2) << 16;
224 ret
|= MPIDR_AFFINITY_LEVEL(mpidr
, 3) << 24;
229 static unsigned long uncompress_mpidr(u32 value
)
233 mpidr
= ((value
>> 0) & 0xFF) << MPIDR_LEVEL_SHIFT(0);
234 mpidr
|= ((value
>> 8) & 0xFF) << MPIDR_LEVEL_SHIFT(1);
235 mpidr
|= ((value
>> 16) & 0xFF) << MPIDR_LEVEL_SHIFT(2);
236 mpidr
|= (u64
)((value
>> 24) & 0xFF) << MPIDR_LEVEL_SHIFT(3);
242 * Lookup the given MPIDR value to get the vcpu_id (if there is one)
243 * and store that in the irq_spi_cpu[] array.
244 * This limits the number of VCPUs to 255 for now, extending the data
245 * type (or storing kvm_vcpu pointers) should lift the limit.
246 * Store the original MPIDR value in an extra array to support read-as-written.
247 * Unallocated MPIDRs are translated to a special value and caught
248 * before any array accesses.
250 static bool handle_mmio_route_reg(struct kvm_vcpu
*vcpu
,
251 struct kvm_exit_mmio
*mmio
,
254 struct kvm
*kvm
= vcpu
->kvm
;
255 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
259 unsigned long *bmap
, mpidr
;
262 * The upper 32 bits of each 64 bit register are zero,
263 * as we don't support Aff3.
266 vgic_reg_access(mmio
, NULL
, offset
,
267 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
271 /* This region only covers SPIs, so no handling of private IRQs here. */
274 /* get the stored MPIDR for this IRQ */
275 mpidr
= uncompress_mpidr(dist
->irq_spi_mpidr
[spi
]);
278 vgic_reg_access(mmio
, ®
, offset
,
279 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
285 * Now clear the currently assigned vCPU from the map, making room
286 * for the new one to be written below
288 vcpu
= kvm_mpidr_to_vcpu(kvm
, mpidr
);
290 vcpu_id
= vcpu
->vcpu_id
;
291 bmap
= vgic_bitmap_get_shared_map(&dist
->irq_spi_target
[vcpu_id
]);
292 __clear_bit(spi
, bmap
);
295 dist
->irq_spi_mpidr
[spi
] = compress_mpidr(reg
);
296 vcpu
= kvm_mpidr_to_vcpu(kvm
, reg
& MPIDR_HWID_BITMASK
);
299 * The spec says that non-existent MPIDR values should not be
300 * forwarded to any existent (v)CPU, but should be able to become
301 * pending anyway. We simply keep the irq_spi_target[] array empty, so
302 * the interrupt will never be injected.
303 * irq_spi_cpu[irq] gets a magic value in this case.
306 vcpu_id
= vcpu
->vcpu_id
;
307 dist
->irq_spi_cpu
[spi
] = vcpu_id
;
308 bmap
= vgic_bitmap_get_shared_map(&dist
->irq_spi_target
[vcpu_id
]);
309 __set_bit(spi
, bmap
);
311 dist
->irq_spi_cpu
[spi
] = VCPU_NOT_ALLOCATED
;
314 vgic_update_state(kvm
);
320 * We should be careful about promising too much when a guest reads
321 * this register. Don't claim to be like any hardware implementation,
322 * but just report the GIC as version 3 - which is what a Linux guest
325 static bool handle_mmio_idregs(struct kvm_vcpu
*vcpu
,
326 struct kvm_exit_mmio
*mmio
,
331 switch (offset
+ GICD_IDREGS
) {
337 vgic_reg_access(mmio
, ®
, offset
,
338 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
343 static const struct vgic_io_range vgic_v3_dist_ranges
[] = {
348 .handle_mmio
= handle_mmio_ctlr
,
354 .handle_mmio
= handle_mmio_typer
,
360 .handle_mmio
= handle_mmio_iidr
,
363 /* this register is optional, it is RAZ/WI if not implemented */
364 .base
= GICD_STATUSR
,
367 .handle_mmio
= handle_mmio_raz_wi
,
370 /* this write only register is WI when TYPER.MBIS=0 */
371 .base
= GICD_SETSPI_NSR
,
374 .handle_mmio
= handle_mmio_raz_wi
,
377 /* this write only register is WI when TYPER.MBIS=0 */
378 .base
= GICD_CLRSPI_NSR
,
381 .handle_mmio
= handle_mmio_raz_wi
,
384 /* this is RAZ/WI when DS=1 */
385 .base
= GICD_SETSPI_SR
,
388 .handle_mmio
= handle_mmio_raz_wi
,
391 /* this is RAZ/WI when DS=1 */
392 .base
= GICD_CLRSPI_SR
,
395 .handle_mmio
= handle_mmio_raz_wi
,
398 .base
= GICD_IGROUPR
,
401 .handle_mmio
= handle_mmio_rao_wi
,
404 .base
= GICD_ISENABLER
,
407 .handle_mmio
= handle_mmio_set_enable_reg_dist
,
410 .base
= GICD_ICENABLER
,
413 .handle_mmio
= handle_mmio_clear_enable_reg_dist
,
416 .base
= GICD_ISPENDR
,
419 .handle_mmio
= handle_mmio_set_pending_reg_dist
,
422 .base
= GICD_ICPENDR
,
425 .handle_mmio
= handle_mmio_clear_pending_reg_dist
,
428 .base
= GICD_ISACTIVER
,
431 .handle_mmio
= handle_mmio_raz_wi
,
434 .base
= GICD_ICACTIVER
,
437 .handle_mmio
= handle_mmio_raz_wi
,
440 .base
= GICD_IPRIORITYR
,
443 .handle_mmio
= handle_mmio_priority_reg_dist
,
446 /* TARGETSRn is RES0 when ARE=1 */
447 .base
= GICD_ITARGETSR
,
450 .handle_mmio
= handle_mmio_raz_wi
,
456 .handle_mmio
= handle_mmio_cfg_reg_dist
,
459 /* this is RAZ/WI when DS=1 */
460 .base
= GICD_IGRPMODR
,
463 .handle_mmio
= handle_mmio_raz_wi
,
466 /* this is RAZ/WI when DS=1 */
470 .handle_mmio
= handle_mmio_raz_wi
,
473 /* this is RAZ/WI when ARE=1 */
476 .handle_mmio
= handle_mmio_raz_wi
,
479 /* this is RAZ/WI when ARE=1 */
480 .base
= GICD_CPENDSGIR
,
482 .handle_mmio
= handle_mmio_raz_wi
,
485 /* this is RAZ/WI when ARE=1 */
486 .base
= GICD_SPENDSGIR
,
488 .handle_mmio
= handle_mmio_raz_wi
,
491 .base
= GICD_IROUTER
+ 0x100,
494 .handle_mmio
= handle_mmio_route_reg
,
500 .handle_mmio
= handle_mmio_idregs
,
505 static bool handle_mmio_ctlr_redist(struct kvm_vcpu
*vcpu
,
506 struct kvm_exit_mmio
*mmio
,
509 /* since we don't support LPIs, this register is zero for now */
510 vgic_reg_access(mmio
, NULL
, offset
,
511 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
515 static bool handle_mmio_typer_redist(struct kvm_vcpu
*vcpu
,
516 struct kvm_exit_mmio
*mmio
,
521 struct kvm_vcpu
*redist_vcpu
= mmio
->private;
522 int target_vcpu_id
= redist_vcpu
->vcpu_id
;
524 /* the upper 32 bits contain the affinity value */
525 if ((offset
& ~3) == 4) {
526 mpidr
= kvm_vcpu_get_mpidr_aff(redist_vcpu
);
527 reg
= compress_mpidr(mpidr
);
529 vgic_reg_access(mmio
, ®
, offset
,
530 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
534 reg
= redist_vcpu
->vcpu_id
<< 8;
535 if (target_vcpu_id
== atomic_read(&vcpu
->kvm
->online_vcpus
) - 1)
536 reg
|= GICR_TYPER_LAST
;
537 vgic_reg_access(mmio
, ®
, offset
,
538 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
542 static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu
*vcpu
,
543 struct kvm_exit_mmio
*mmio
,
546 struct kvm_vcpu
*redist_vcpu
= mmio
->private;
548 return vgic_handle_enable_reg(vcpu
->kvm
, mmio
, offset
,
549 redist_vcpu
->vcpu_id
,
550 ACCESS_WRITE_SETBIT
);
553 static bool handle_mmio_clear_enable_reg_redist(struct kvm_vcpu
*vcpu
,
554 struct kvm_exit_mmio
*mmio
,
557 struct kvm_vcpu
*redist_vcpu
= mmio
->private;
559 return vgic_handle_enable_reg(vcpu
->kvm
, mmio
, offset
,
560 redist_vcpu
->vcpu_id
,
561 ACCESS_WRITE_CLEARBIT
);
564 static bool handle_mmio_set_pending_reg_redist(struct kvm_vcpu
*vcpu
,
565 struct kvm_exit_mmio
*mmio
,
568 struct kvm_vcpu
*redist_vcpu
= mmio
->private;
570 return vgic_handle_set_pending_reg(vcpu
->kvm
, mmio
, offset
,
571 redist_vcpu
->vcpu_id
);
574 static bool handle_mmio_clear_pending_reg_redist(struct kvm_vcpu
*vcpu
,
575 struct kvm_exit_mmio
*mmio
,
578 struct kvm_vcpu
*redist_vcpu
= mmio
->private;
580 return vgic_handle_clear_pending_reg(vcpu
->kvm
, mmio
, offset
,
581 redist_vcpu
->vcpu_id
);
584 static bool handle_mmio_priority_reg_redist(struct kvm_vcpu
*vcpu
,
585 struct kvm_exit_mmio
*mmio
,
588 struct kvm_vcpu
*redist_vcpu
= mmio
->private;
591 reg
= vgic_bytemap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_priority
,
592 redist_vcpu
->vcpu_id
, offset
);
593 vgic_reg_access(mmio
, reg
, offset
,
594 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
598 static bool handle_mmio_cfg_reg_redist(struct kvm_vcpu
*vcpu
,
599 struct kvm_exit_mmio
*mmio
,
602 struct kvm_vcpu
*redist_vcpu
= mmio
->private;
604 u32
*reg
= vgic_bitmap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_cfg
,
605 redist_vcpu
->vcpu_id
, offset
>> 1);
607 return vgic_handle_cfg_reg(reg
, mmio
, offset
);
610 #define SGI_base(x) ((x) + SZ_64K)
612 static const struct vgic_io_range vgic_redist_ranges
[] = {
617 .handle_mmio
= handle_mmio_ctlr_redist
,
623 .handle_mmio
= handle_mmio_typer_redist
,
629 .handle_mmio
= handle_mmio_iidr
,
635 .handle_mmio
= handle_mmio_raz_wi
,
641 .handle_mmio
= handle_mmio_idregs
,
644 .base
= SGI_base(GICR_IGROUPR0
),
647 .handle_mmio
= handle_mmio_rao_wi
,
650 .base
= SGI_base(GICR_ISENABLER0
),
653 .handle_mmio
= handle_mmio_set_enable_reg_redist
,
656 .base
= SGI_base(GICR_ICENABLER0
),
659 .handle_mmio
= handle_mmio_clear_enable_reg_redist
,
662 .base
= SGI_base(GICR_ISPENDR0
),
665 .handle_mmio
= handle_mmio_set_pending_reg_redist
,
668 .base
= SGI_base(GICR_ICPENDR0
),
671 .handle_mmio
= handle_mmio_clear_pending_reg_redist
,
674 .base
= SGI_base(GICR_ISACTIVER0
),
677 .handle_mmio
= handle_mmio_raz_wi
,
680 .base
= SGI_base(GICR_ICACTIVER0
),
683 .handle_mmio
= handle_mmio_raz_wi
,
686 .base
= SGI_base(GICR_IPRIORITYR0
),
689 .handle_mmio
= handle_mmio_priority_reg_redist
,
692 .base
= SGI_base(GICR_ICFGR0
),
695 .handle_mmio
= handle_mmio_cfg_reg_redist
,
698 .base
= SGI_base(GICR_IGRPMODR0
),
701 .handle_mmio
= handle_mmio_raz_wi
,
704 .base
= SGI_base(GICR_NSACR
),
706 .handle_mmio
= handle_mmio_raz_wi
,
711 static bool vgic_v3_queue_sgi(struct kvm_vcpu
*vcpu
, int irq
)
713 if (vgic_queue_irq(vcpu
, 0, irq
)) {
714 vgic_dist_irq_clear_pending(vcpu
, irq
);
715 vgic_cpu_irq_clear(vcpu
, irq
);
722 static int vgic_v3_map_resources(struct kvm
*kvm
,
723 const struct vgic_params
*params
)
726 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
727 gpa_t rdbase
= dist
->vgic_redist_base
;
728 struct vgic_io_device
*iodevs
= NULL
;
731 if (!irqchip_in_kernel(kvm
))
734 mutex_lock(&kvm
->lock
);
739 if (IS_VGIC_ADDR_UNDEF(dist
->vgic_dist_base
) ||
740 IS_VGIC_ADDR_UNDEF(dist
->vgic_redist_base
)) {
741 kvm_err("Need to set vgic distributor addresses first\n");
747 * For a VGICv3 we require the userland to explicitly initialize
748 * the VGIC before we need to use it.
750 if (!vgic_initialized(kvm
)) {
755 ret
= vgic_register_kvm_io_dev(kvm
, dist
->vgic_dist_base
,
756 GIC_V3_DIST_SIZE
, vgic_v3_dist_ranges
,
757 -1, &dist
->dist_iodev
);
761 iodevs
= kcalloc(dist
->nr_cpus
, sizeof(iodevs
[0]), GFP_KERNEL
);
767 for (i
= 0; i
< dist
->nr_cpus
; i
++) {
768 ret
= vgic_register_kvm_io_dev(kvm
, rdbase
,
769 SZ_128K
, vgic_redist_ranges
,
773 rdbase
+= GIC_V3_REDIST_SIZE
;
776 dist
->redist_iodevs
= iodevs
;
781 kvm_io_bus_unregister_dev(kvm
, KVM_MMIO_BUS
, &dist
->dist_iodev
.dev
);
783 for (i
= 0; i
< dist
->nr_cpus
; i
++) {
784 if (iodevs
[i
].dev
.ops
)
785 kvm_io_bus_unregister_dev(kvm
, KVM_MMIO_BUS
,
792 kvm_vgic_destroy(kvm
);
793 mutex_unlock(&kvm
->lock
);
797 static int vgic_v3_init_model(struct kvm
*kvm
)
801 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
802 int nr_spis
= dist
->nr_irqs
- VGIC_NR_PRIVATE_IRQS
;
804 dist
->irq_spi_mpidr
= kcalloc(nr_spis
, sizeof(dist
->irq_spi_mpidr
[0]),
807 if (!dist
->irq_spi_mpidr
)
810 /* Initialize the target VCPUs for each IRQ to VCPU 0 */
811 mpidr
= compress_mpidr(kvm_vcpu_get_mpidr_aff(kvm_get_vcpu(kvm
, 0)));
812 for (i
= VGIC_NR_PRIVATE_IRQS
; i
< dist
->nr_irqs
; i
++) {
813 dist
->irq_spi_cpu
[i
- VGIC_NR_PRIVATE_IRQS
] = 0;
814 dist
->irq_spi_mpidr
[i
- VGIC_NR_PRIVATE_IRQS
] = mpidr
;
815 vgic_bitmap_set_irq_val(dist
->irq_spi_target
, 0, i
, 1);
821 /* GICv3 does not keep track of SGI sources anymore. */
822 static void vgic_v3_add_sgi_source(struct kvm_vcpu
*vcpu
, int irq
, int source
)
826 void vgic_v3_init_emulation(struct kvm
*kvm
)
828 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
830 dist
->vm_ops
.queue_sgi
= vgic_v3_queue_sgi
;
831 dist
->vm_ops
.add_sgi_source
= vgic_v3_add_sgi_source
;
832 dist
->vm_ops
.init_model
= vgic_v3_init_model
;
833 dist
->vm_ops
.map_resources
= vgic_v3_map_resources
;
835 kvm
->arch
.max_vcpus
= KVM_MAX_VCPUS
;
839 * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
840 * generation register ICC_SGI1R_EL1) with a given VCPU.
841 * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
844 static int match_mpidr(u64 sgi_aff
, u16 sgi_cpu_mask
, struct kvm_vcpu
*vcpu
)
846 unsigned long affinity
;
850 * Split the current VCPU's MPIDR into affinity level 0 and the
851 * rest as this is what we have to compare against.
853 affinity
= kvm_vcpu_get_mpidr_aff(vcpu
);
854 level0
= MPIDR_AFFINITY_LEVEL(affinity
, 0);
855 affinity
&= ~MPIDR_LEVEL_MASK
;
857 /* bail out if the upper three levels don't match */
858 if (sgi_aff
!= affinity
)
861 /* Is this VCPU's bit set in the mask ? */
862 if (!(sgi_cpu_mask
& BIT(level0
)))
868 #define SGI_AFFINITY_LEVEL(reg, level) \
869 ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
870 >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
873 * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
874 * @vcpu: The VCPU requesting a SGI
875 * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU
877 * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
878 * This will trap in sys_regs.c and call this function.
879 * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
880 * target processors as well as a bitmask of 16 Aff0 CPUs.
881 * If the interrupt routing mode bit is not set, we iterate over all VCPUs to
882 * check for matching ones. If this bit is set, we signal all, but not the
885 void vgic_v3_dispatch_sgi(struct kvm_vcpu
*vcpu
, u64 reg
)
887 struct kvm
*kvm
= vcpu
->kvm
;
888 struct kvm_vcpu
*c_vcpu
;
889 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
893 int vcpu_id
= vcpu
->vcpu_id
;
897 sgi
= (reg
& ICC_SGI1R_SGI_ID_MASK
) >> ICC_SGI1R_SGI_ID_SHIFT
;
898 broadcast
= reg
& BIT(ICC_SGI1R_IRQ_ROUTING_MODE_BIT
);
899 target_cpus
= (reg
& ICC_SGI1R_TARGET_LIST_MASK
) >> ICC_SGI1R_TARGET_LIST_SHIFT
;
900 mpidr
= SGI_AFFINITY_LEVEL(reg
, 3);
901 mpidr
|= SGI_AFFINITY_LEVEL(reg
, 2);
902 mpidr
|= SGI_AFFINITY_LEVEL(reg
, 1);
905 * We take the dist lock here, because we come from the sysregs
906 * code path and not from the MMIO one (which already takes the lock).
908 spin_lock(&dist
->lock
);
911 * We iterate over all VCPUs to find the MPIDRs matching the request.
912 * If we have handled one CPU, we clear it's bit to detect early
913 * if we are already finished. This avoids iterating through all
914 * VCPUs when most of the times we just signal a single VCPU.
916 kvm_for_each_vcpu(c
, c_vcpu
, kvm
) {
918 /* Exit early if we have dealt with all requested CPUs */
919 if (!broadcast
&& target_cpus
== 0)
922 /* Don't signal the calling VCPU */
923 if (broadcast
&& c
== vcpu_id
)
929 level0
= match_mpidr(mpidr
, target_cpus
, c_vcpu
);
933 /* remove this matching VCPU from the mask */
934 target_cpus
&= ~BIT(level0
);
937 /* Flag the SGI as pending */
938 vgic_dist_irq_set_pending(c_vcpu
, sgi
);
940 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi
, vcpu_id
, c
);
943 vgic_update_state(vcpu
->kvm
);
944 spin_unlock(&dist
->lock
);
946 vgic_kick_vcpus(vcpu
->kvm
);
949 static int vgic_v3_create(struct kvm_device
*dev
, u32 type
)
951 return kvm_vgic_create(dev
->kvm
, type
);
954 static void vgic_v3_destroy(struct kvm_device
*dev
)
959 static int vgic_v3_set_attr(struct kvm_device
*dev
,
960 struct kvm_device_attr
*attr
)
964 ret
= vgic_set_common_attr(dev
, attr
);
968 switch (attr
->group
) {
969 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
970 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
:
977 static int vgic_v3_get_attr(struct kvm_device
*dev
,
978 struct kvm_device_attr
*attr
)
982 ret
= vgic_get_common_attr(dev
, attr
);
986 switch (attr
->group
) {
987 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
988 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
:
995 static int vgic_v3_has_attr(struct kvm_device
*dev
,
996 struct kvm_device_attr
*attr
)
998 switch (attr
->group
) {
999 case KVM_DEV_ARM_VGIC_GRP_ADDR
:
1000 switch (attr
->attr
) {
1001 case KVM_VGIC_V2_ADDR_TYPE_DIST
:
1002 case KVM_VGIC_V2_ADDR_TYPE_CPU
:
1004 case KVM_VGIC_V3_ADDR_TYPE_DIST
:
1005 case KVM_VGIC_V3_ADDR_TYPE_REDIST
:
1009 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
1010 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
:
1012 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS
:
1014 case KVM_DEV_ARM_VGIC_GRP_CTRL
:
1015 switch (attr
->attr
) {
1016 case KVM_DEV_ARM_VGIC_CTRL_INIT
:
1023 struct kvm_device_ops kvm_arm_vgic_v3_ops
= {
1024 .name
= "kvm-arm-vgic-v3",
1025 .create
= vgic_v3_create
,
1026 .destroy
= vgic_v3_destroy
,
1027 .set_attr
= vgic_v3_set_attr
,
1028 .get_attr
= vgic_v3_get_attr
,
1029 .has_attr
= vgic_v3_has_attr
,