Linux 4.1.18
[linux/fpc-iii.git] / virt / kvm / arm / vgic-v3-emul.c
blobe9c3a7a83833bf2ef058cfd407b92bafe20da073
1 /*
2 * GICv3 distributor and redistributor emulation
4 * GICv3 emulation is currently only supported on a GICv3 host (because
5 * we rely on the hardware's CPU interface virtualization support), but
6 * supports both hardware with or without the optional GICv2 backwards
7 * compatibility features.
9 * Limitations of the emulation:
10 * (RAZ/WI: read as zero, write ignore, RAO/WI: read as one, write ignore)
11 * - We do not support LPIs (yet). TYPER.LPIS is reported as 0 and is RAZ/WI.
12 * - We do not support the message based interrupts (MBIs) triggered by
13 * writes to the GICD_{SET,CLR}SPI_* registers. TYPER.MBIS is reported as 0.
14 * - We do not support the (optional) backwards compatibility feature.
15 * GICD_CTLR.ARE resets to 1 and is RAO/WI. If the _host_ GIC supports
16 * the compatiblity feature, you can use a GICv2 in the guest, though.
17 * - We only support a single security state. GICD_CTLR.DS is 1 and is RAO/WI.
18 * - Priorities are not emulated (same as the GICv2 emulation). Linux
19 * as a guest is fine with this, because it does not use priorities.
20 * - We only support Group1 interrupts. Again Linux uses only those.
22 * Copyright (C) 2014 ARM Ltd.
23 * Author: Andre Przywara <andre.przywara@arm.com>
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License version 2 as
27 * published by the Free Software Foundation.
29 * This program is distributed in the hope that it will be useful,
30 * but WITHOUT ANY WARRANTY; without even the implied warranty of
31 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
32 * GNU General Public License for more details.
34 * You should have received a copy of the GNU General Public License
35 * along with this program. If not, see <http://www.gnu.org/licenses/>.
38 #include <linux/cpu.h>
39 #include <linux/kvm.h>
40 #include <linux/kvm_host.h>
41 #include <linux/interrupt.h>
43 #include <linux/irqchip/arm-gic-v3.h>
44 #include <kvm/arm_vgic.h>
46 #include <asm/kvm_emulate.h>
47 #include <asm/kvm_arm.h>
48 #include <asm/kvm_mmu.h>
50 #include "vgic.h"
52 static bool handle_mmio_rao_wi(struct kvm_vcpu *vcpu,
53 struct kvm_exit_mmio *mmio, phys_addr_t offset)
55 u32 reg = 0xffffffff;
57 vgic_reg_access(mmio, &reg, offset,
58 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
60 return false;
63 static bool handle_mmio_ctlr(struct kvm_vcpu *vcpu,
64 struct kvm_exit_mmio *mmio, phys_addr_t offset)
66 u32 reg = 0;
69 * Force ARE and DS to 1, the guest cannot change this.
70 * For the time being we only support Group1 interrupts.
72 if (vcpu->kvm->arch.vgic.enabled)
73 reg = GICD_CTLR_ENABLE_SS_G1;
74 reg |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
76 vgic_reg_access(mmio, &reg, offset,
77 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
78 if (mmio->is_write) {
79 if (reg & GICD_CTLR_ENABLE_SS_G0)
80 kvm_info("guest tried to enable unsupported Group0 interrupts\n");
81 vcpu->kvm->arch.vgic.enabled = !!(reg & GICD_CTLR_ENABLE_SS_G1);
82 vgic_update_state(vcpu->kvm);
83 return true;
85 return false;
89 * As this implementation does not provide compatibility
90 * with GICv2 (ARE==1), we report zero CPUs in bits [5..7].
91 * Also LPIs and MBIs are not supported, so we set the respective bits to 0.
92 * Also we report at most 2**10=1024 interrupt IDs (to match 1024 SPIs).
94 #define INTERRUPT_ID_BITS 10
95 static bool handle_mmio_typer(struct kvm_vcpu *vcpu,
96 struct kvm_exit_mmio *mmio, phys_addr_t offset)
98 u32 reg;
100 reg = (min(vcpu->kvm->arch.vgic.nr_irqs, 1024) >> 5) - 1;
102 reg |= (INTERRUPT_ID_BITS - 1) << 19;
104 vgic_reg_access(mmio, &reg, offset,
105 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
107 return false;
110 static bool handle_mmio_iidr(struct kvm_vcpu *vcpu,
111 struct kvm_exit_mmio *mmio, phys_addr_t offset)
113 u32 reg;
115 reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
116 vgic_reg_access(mmio, &reg, offset,
117 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
119 return false;
122 static bool handle_mmio_set_enable_reg_dist(struct kvm_vcpu *vcpu,
123 struct kvm_exit_mmio *mmio,
124 phys_addr_t offset)
126 if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
127 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
128 vcpu->vcpu_id,
129 ACCESS_WRITE_SETBIT);
131 vgic_reg_access(mmio, NULL, offset,
132 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
133 return false;
136 static bool handle_mmio_clear_enable_reg_dist(struct kvm_vcpu *vcpu,
137 struct kvm_exit_mmio *mmio,
138 phys_addr_t offset)
140 if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
141 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
142 vcpu->vcpu_id,
143 ACCESS_WRITE_CLEARBIT);
145 vgic_reg_access(mmio, NULL, offset,
146 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
147 return false;
150 static bool handle_mmio_set_pending_reg_dist(struct kvm_vcpu *vcpu,
151 struct kvm_exit_mmio *mmio,
152 phys_addr_t offset)
154 if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
155 return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
156 vcpu->vcpu_id);
158 vgic_reg_access(mmio, NULL, offset,
159 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
160 return false;
163 static bool handle_mmio_clear_pending_reg_dist(struct kvm_vcpu *vcpu,
164 struct kvm_exit_mmio *mmio,
165 phys_addr_t offset)
167 if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
168 return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
169 vcpu->vcpu_id);
171 vgic_reg_access(mmio, NULL, offset,
172 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
173 return false;
176 static bool handle_mmio_priority_reg_dist(struct kvm_vcpu *vcpu,
177 struct kvm_exit_mmio *mmio,
178 phys_addr_t offset)
180 u32 *reg;
182 if (unlikely(offset < VGIC_NR_PRIVATE_IRQS)) {
183 vgic_reg_access(mmio, NULL, offset,
184 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
185 return false;
188 reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
189 vcpu->vcpu_id, offset);
190 vgic_reg_access(mmio, reg, offset,
191 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
192 return false;
195 static bool handle_mmio_cfg_reg_dist(struct kvm_vcpu *vcpu,
196 struct kvm_exit_mmio *mmio,
197 phys_addr_t offset)
199 u32 *reg;
201 if (unlikely(offset < VGIC_NR_PRIVATE_IRQS / 4)) {
202 vgic_reg_access(mmio, NULL, offset,
203 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
204 return false;
207 reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
208 vcpu->vcpu_id, offset >> 1);
210 return vgic_handle_cfg_reg(reg, mmio, offset);
214 * We use a compressed version of the MPIDR (all 32 bits in one 32-bit word)
215 * when we store the target MPIDR written by the guest.
217 static u32 compress_mpidr(unsigned long mpidr)
219 u32 ret;
221 ret = MPIDR_AFFINITY_LEVEL(mpidr, 0);
222 ret |= MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8;
223 ret |= MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16;
224 ret |= MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24;
226 return ret;
229 static unsigned long uncompress_mpidr(u32 value)
231 unsigned long mpidr;
233 mpidr = ((value >> 0) & 0xFF) << MPIDR_LEVEL_SHIFT(0);
234 mpidr |= ((value >> 8) & 0xFF) << MPIDR_LEVEL_SHIFT(1);
235 mpidr |= ((value >> 16) & 0xFF) << MPIDR_LEVEL_SHIFT(2);
236 mpidr |= (u64)((value >> 24) & 0xFF) << MPIDR_LEVEL_SHIFT(3);
238 return mpidr;
242 * Lookup the given MPIDR value to get the vcpu_id (if there is one)
243 * and store that in the irq_spi_cpu[] array.
244 * This limits the number of VCPUs to 255 for now, extending the data
245 * type (or storing kvm_vcpu pointers) should lift the limit.
246 * Store the original MPIDR value in an extra array to support read-as-written.
247 * Unallocated MPIDRs are translated to a special value and caught
248 * before any array accesses.
250 static bool handle_mmio_route_reg(struct kvm_vcpu *vcpu,
251 struct kvm_exit_mmio *mmio,
252 phys_addr_t offset)
254 struct kvm *kvm = vcpu->kvm;
255 struct vgic_dist *dist = &kvm->arch.vgic;
256 int spi;
257 u32 reg;
258 int vcpu_id;
259 unsigned long *bmap, mpidr;
262 * The upper 32 bits of each 64 bit register are zero,
263 * as we don't support Aff3.
265 if ((offset & 4)) {
266 vgic_reg_access(mmio, NULL, offset,
267 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
268 return false;
271 /* This region only covers SPIs, so no handling of private IRQs here. */
272 spi = offset / 8;
274 /* get the stored MPIDR for this IRQ */
275 mpidr = uncompress_mpidr(dist->irq_spi_mpidr[spi]);
276 reg = mpidr;
278 vgic_reg_access(mmio, &reg, offset,
279 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
281 if (!mmio->is_write)
282 return false;
285 * Now clear the currently assigned vCPU from the map, making room
286 * for the new one to be written below
288 vcpu = kvm_mpidr_to_vcpu(kvm, mpidr);
289 if (likely(vcpu)) {
290 vcpu_id = vcpu->vcpu_id;
291 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]);
292 __clear_bit(spi, bmap);
295 dist->irq_spi_mpidr[spi] = compress_mpidr(reg);
296 vcpu = kvm_mpidr_to_vcpu(kvm, reg & MPIDR_HWID_BITMASK);
299 * The spec says that non-existent MPIDR values should not be
300 * forwarded to any existent (v)CPU, but should be able to become
301 * pending anyway. We simply keep the irq_spi_target[] array empty, so
302 * the interrupt will never be injected.
303 * irq_spi_cpu[irq] gets a magic value in this case.
305 if (likely(vcpu)) {
306 vcpu_id = vcpu->vcpu_id;
307 dist->irq_spi_cpu[spi] = vcpu_id;
308 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]);
309 __set_bit(spi, bmap);
310 } else {
311 dist->irq_spi_cpu[spi] = VCPU_NOT_ALLOCATED;
314 vgic_update_state(kvm);
316 return true;
320 * We should be careful about promising too much when a guest reads
321 * this register. Don't claim to be like any hardware implementation,
322 * but just report the GIC as version 3 - which is what a Linux guest
323 * would check.
325 static bool handle_mmio_idregs(struct kvm_vcpu *vcpu,
326 struct kvm_exit_mmio *mmio,
327 phys_addr_t offset)
329 u32 reg = 0;
331 switch (offset + GICD_IDREGS) {
332 case GICD_PIDR2:
333 reg = 0x3b;
334 break;
337 vgic_reg_access(mmio, &reg, offset,
338 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
340 return false;
343 static const struct vgic_io_range vgic_v3_dist_ranges[] = {
345 .base = GICD_CTLR,
346 .len = 0x04,
347 .bits_per_irq = 0,
348 .handle_mmio = handle_mmio_ctlr,
351 .base = GICD_TYPER,
352 .len = 0x04,
353 .bits_per_irq = 0,
354 .handle_mmio = handle_mmio_typer,
357 .base = GICD_IIDR,
358 .len = 0x04,
359 .bits_per_irq = 0,
360 .handle_mmio = handle_mmio_iidr,
363 /* this register is optional, it is RAZ/WI if not implemented */
364 .base = GICD_STATUSR,
365 .len = 0x04,
366 .bits_per_irq = 0,
367 .handle_mmio = handle_mmio_raz_wi,
370 /* this write only register is WI when TYPER.MBIS=0 */
371 .base = GICD_SETSPI_NSR,
372 .len = 0x04,
373 .bits_per_irq = 0,
374 .handle_mmio = handle_mmio_raz_wi,
377 /* this write only register is WI when TYPER.MBIS=0 */
378 .base = GICD_CLRSPI_NSR,
379 .len = 0x04,
380 .bits_per_irq = 0,
381 .handle_mmio = handle_mmio_raz_wi,
384 /* this is RAZ/WI when DS=1 */
385 .base = GICD_SETSPI_SR,
386 .len = 0x04,
387 .bits_per_irq = 0,
388 .handle_mmio = handle_mmio_raz_wi,
391 /* this is RAZ/WI when DS=1 */
392 .base = GICD_CLRSPI_SR,
393 .len = 0x04,
394 .bits_per_irq = 0,
395 .handle_mmio = handle_mmio_raz_wi,
398 .base = GICD_IGROUPR,
399 .len = 0x80,
400 .bits_per_irq = 1,
401 .handle_mmio = handle_mmio_rao_wi,
404 .base = GICD_ISENABLER,
405 .len = 0x80,
406 .bits_per_irq = 1,
407 .handle_mmio = handle_mmio_set_enable_reg_dist,
410 .base = GICD_ICENABLER,
411 .len = 0x80,
412 .bits_per_irq = 1,
413 .handle_mmio = handle_mmio_clear_enable_reg_dist,
416 .base = GICD_ISPENDR,
417 .len = 0x80,
418 .bits_per_irq = 1,
419 .handle_mmio = handle_mmio_set_pending_reg_dist,
422 .base = GICD_ICPENDR,
423 .len = 0x80,
424 .bits_per_irq = 1,
425 .handle_mmio = handle_mmio_clear_pending_reg_dist,
428 .base = GICD_ISACTIVER,
429 .len = 0x80,
430 .bits_per_irq = 1,
431 .handle_mmio = handle_mmio_raz_wi,
434 .base = GICD_ICACTIVER,
435 .len = 0x80,
436 .bits_per_irq = 1,
437 .handle_mmio = handle_mmio_raz_wi,
440 .base = GICD_IPRIORITYR,
441 .len = 0x400,
442 .bits_per_irq = 8,
443 .handle_mmio = handle_mmio_priority_reg_dist,
446 /* TARGETSRn is RES0 when ARE=1 */
447 .base = GICD_ITARGETSR,
448 .len = 0x400,
449 .bits_per_irq = 8,
450 .handle_mmio = handle_mmio_raz_wi,
453 .base = GICD_ICFGR,
454 .len = 0x100,
455 .bits_per_irq = 2,
456 .handle_mmio = handle_mmio_cfg_reg_dist,
459 /* this is RAZ/WI when DS=1 */
460 .base = GICD_IGRPMODR,
461 .len = 0x80,
462 .bits_per_irq = 1,
463 .handle_mmio = handle_mmio_raz_wi,
466 /* this is RAZ/WI when DS=1 */
467 .base = GICD_NSACR,
468 .len = 0x100,
469 .bits_per_irq = 2,
470 .handle_mmio = handle_mmio_raz_wi,
473 /* this is RAZ/WI when ARE=1 */
474 .base = GICD_SGIR,
475 .len = 0x04,
476 .handle_mmio = handle_mmio_raz_wi,
479 /* this is RAZ/WI when ARE=1 */
480 .base = GICD_CPENDSGIR,
481 .len = 0x10,
482 .handle_mmio = handle_mmio_raz_wi,
485 /* this is RAZ/WI when ARE=1 */
486 .base = GICD_SPENDSGIR,
487 .len = 0x10,
488 .handle_mmio = handle_mmio_raz_wi,
491 .base = GICD_IROUTER + 0x100,
492 .len = 0x1ee0,
493 .bits_per_irq = 64,
494 .handle_mmio = handle_mmio_route_reg,
497 .base = GICD_IDREGS,
498 .len = 0x30,
499 .bits_per_irq = 0,
500 .handle_mmio = handle_mmio_idregs,
505 static bool handle_mmio_ctlr_redist(struct kvm_vcpu *vcpu,
506 struct kvm_exit_mmio *mmio,
507 phys_addr_t offset)
509 /* since we don't support LPIs, this register is zero for now */
510 vgic_reg_access(mmio, NULL, offset,
511 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
512 return false;
515 static bool handle_mmio_typer_redist(struct kvm_vcpu *vcpu,
516 struct kvm_exit_mmio *mmio,
517 phys_addr_t offset)
519 u32 reg;
520 u64 mpidr;
521 struct kvm_vcpu *redist_vcpu = mmio->private;
522 int target_vcpu_id = redist_vcpu->vcpu_id;
524 /* the upper 32 bits contain the affinity value */
525 if ((offset & ~3) == 4) {
526 mpidr = kvm_vcpu_get_mpidr_aff(redist_vcpu);
527 reg = compress_mpidr(mpidr);
529 vgic_reg_access(mmio, &reg, offset,
530 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
531 return false;
534 reg = redist_vcpu->vcpu_id << 8;
535 if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
536 reg |= GICR_TYPER_LAST;
537 vgic_reg_access(mmio, &reg, offset,
538 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
539 return false;
542 static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu *vcpu,
543 struct kvm_exit_mmio *mmio,
544 phys_addr_t offset)
546 struct kvm_vcpu *redist_vcpu = mmio->private;
548 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
549 redist_vcpu->vcpu_id,
550 ACCESS_WRITE_SETBIT);
553 static bool handle_mmio_clear_enable_reg_redist(struct kvm_vcpu *vcpu,
554 struct kvm_exit_mmio *mmio,
555 phys_addr_t offset)
557 struct kvm_vcpu *redist_vcpu = mmio->private;
559 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
560 redist_vcpu->vcpu_id,
561 ACCESS_WRITE_CLEARBIT);
564 static bool handle_mmio_set_pending_reg_redist(struct kvm_vcpu *vcpu,
565 struct kvm_exit_mmio *mmio,
566 phys_addr_t offset)
568 struct kvm_vcpu *redist_vcpu = mmio->private;
570 return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
571 redist_vcpu->vcpu_id);
574 static bool handle_mmio_clear_pending_reg_redist(struct kvm_vcpu *vcpu,
575 struct kvm_exit_mmio *mmio,
576 phys_addr_t offset)
578 struct kvm_vcpu *redist_vcpu = mmio->private;
580 return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
581 redist_vcpu->vcpu_id);
584 static bool handle_mmio_priority_reg_redist(struct kvm_vcpu *vcpu,
585 struct kvm_exit_mmio *mmio,
586 phys_addr_t offset)
588 struct kvm_vcpu *redist_vcpu = mmio->private;
589 u32 *reg;
591 reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
592 redist_vcpu->vcpu_id, offset);
593 vgic_reg_access(mmio, reg, offset,
594 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
595 return false;
598 static bool handle_mmio_cfg_reg_redist(struct kvm_vcpu *vcpu,
599 struct kvm_exit_mmio *mmio,
600 phys_addr_t offset)
602 struct kvm_vcpu *redist_vcpu = mmio->private;
604 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
605 redist_vcpu->vcpu_id, offset >> 1);
607 return vgic_handle_cfg_reg(reg, mmio, offset);
610 #define SGI_base(x) ((x) + SZ_64K)
612 static const struct vgic_io_range vgic_redist_ranges[] = {
614 .base = GICR_CTLR,
615 .len = 0x04,
616 .bits_per_irq = 0,
617 .handle_mmio = handle_mmio_ctlr_redist,
620 .base = GICR_TYPER,
621 .len = 0x08,
622 .bits_per_irq = 0,
623 .handle_mmio = handle_mmio_typer_redist,
626 .base = GICR_IIDR,
627 .len = 0x04,
628 .bits_per_irq = 0,
629 .handle_mmio = handle_mmio_iidr,
632 .base = GICR_WAKER,
633 .len = 0x04,
634 .bits_per_irq = 0,
635 .handle_mmio = handle_mmio_raz_wi,
638 .base = GICR_IDREGS,
639 .len = 0x30,
640 .bits_per_irq = 0,
641 .handle_mmio = handle_mmio_idregs,
644 .base = SGI_base(GICR_IGROUPR0),
645 .len = 0x04,
646 .bits_per_irq = 1,
647 .handle_mmio = handle_mmio_rao_wi,
650 .base = SGI_base(GICR_ISENABLER0),
651 .len = 0x04,
652 .bits_per_irq = 1,
653 .handle_mmio = handle_mmio_set_enable_reg_redist,
656 .base = SGI_base(GICR_ICENABLER0),
657 .len = 0x04,
658 .bits_per_irq = 1,
659 .handle_mmio = handle_mmio_clear_enable_reg_redist,
662 .base = SGI_base(GICR_ISPENDR0),
663 .len = 0x04,
664 .bits_per_irq = 1,
665 .handle_mmio = handle_mmio_set_pending_reg_redist,
668 .base = SGI_base(GICR_ICPENDR0),
669 .len = 0x04,
670 .bits_per_irq = 1,
671 .handle_mmio = handle_mmio_clear_pending_reg_redist,
674 .base = SGI_base(GICR_ISACTIVER0),
675 .len = 0x04,
676 .bits_per_irq = 1,
677 .handle_mmio = handle_mmio_raz_wi,
680 .base = SGI_base(GICR_ICACTIVER0),
681 .len = 0x04,
682 .bits_per_irq = 1,
683 .handle_mmio = handle_mmio_raz_wi,
686 .base = SGI_base(GICR_IPRIORITYR0),
687 .len = 0x20,
688 .bits_per_irq = 8,
689 .handle_mmio = handle_mmio_priority_reg_redist,
692 .base = SGI_base(GICR_ICFGR0),
693 .len = 0x08,
694 .bits_per_irq = 2,
695 .handle_mmio = handle_mmio_cfg_reg_redist,
698 .base = SGI_base(GICR_IGRPMODR0),
699 .len = 0x04,
700 .bits_per_irq = 1,
701 .handle_mmio = handle_mmio_raz_wi,
704 .base = SGI_base(GICR_NSACR),
705 .len = 0x04,
706 .handle_mmio = handle_mmio_raz_wi,
711 static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq)
713 if (vgic_queue_irq(vcpu, 0, irq)) {
714 vgic_dist_irq_clear_pending(vcpu, irq);
715 vgic_cpu_irq_clear(vcpu, irq);
716 return true;
719 return false;
722 static int vgic_v3_map_resources(struct kvm *kvm,
723 const struct vgic_params *params)
725 int ret = 0;
726 struct vgic_dist *dist = &kvm->arch.vgic;
727 gpa_t rdbase = dist->vgic_redist_base;
728 struct vgic_io_device *iodevs = NULL;
729 int i;
731 if (!irqchip_in_kernel(kvm))
732 return 0;
734 mutex_lock(&kvm->lock);
736 if (vgic_ready(kvm))
737 goto out;
739 if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
740 IS_VGIC_ADDR_UNDEF(dist->vgic_redist_base)) {
741 kvm_err("Need to set vgic distributor addresses first\n");
742 ret = -ENXIO;
743 goto out;
747 * For a VGICv3 we require the userland to explicitly initialize
748 * the VGIC before we need to use it.
750 if (!vgic_initialized(kvm)) {
751 ret = -EBUSY;
752 goto out;
755 ret = vgic_register_kvm_io_dev(kvm, dist->vgic_dist_base,
756 GIC_V3_DIST_SIZE, vgic_v3_dist_ranges,
757 -1, &dist->dist_iodev);
758 if (ret)
759 goto out;
761 iodevs = kcalloc(dist->nr_cpus, sizeof(iodevs[0]), GFP_KERNEL);
762 if (!iodevs) {
763 ret = -ENOMEM;
764 goto out_unregister;
767 for (i = 0; i < dist->nr_cpus; i++) {
768 ret = vgic_register_kvm_io_dev(kvm, rdbase,
769 SZ_128K, vgic_redist_ranges,
770 i, &iodevs[i]);
771 if (ret)
772 goto out_unregister;
773 rdbase += GIC_V3_REDIST_SIZE;
776 dist->redist_iodevs = iodevs;
777 dist->ready = true;
778 goto out;
780 out_unregister:
781 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dist->dist_iodev.dev);
782 if (iodevs) {
783 for (i = 0; i < dist->nr_cpus; i++) {
784 if (iodevs[i].dev.ops)
785 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
786 &iodevs[i].dev);
790 out:
791 if (ret)
792 kvm_vgic_destroy(kvm);
793 mutex_unlock(&kvm->lock);
794 return ret;
797 static int vgic_v3_init_model(struct kvm *kvm)
799 int i;
800 u32 mpidr;
801 struct vgic_dist *dist = &kvm->arch.vgic;
802 int nr_spis = dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
804 dist->irq_spi_mpidr = kcalloc(nr_spis, sizeof(dist->irq_spi_mpidr[0]),
805 GFP_KERNEL);
807 if (!dist->irq_spi_mpidr)
808 return -ENOMEM;
810 /* Initialize the target VCPUs for each IRQ to VCPU 0 */
811 mpidr = compress_mpidr(kvm_vcpu_get_mpidr_aff(kvm_get_vcpu(kvm, 0)));
812 for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i++) {
813 dist->irq_spi_cpu[i - VGIC_NR_PRIVATE_IRQS] = 0;
814 dist->irq_spi_mpidr[i - VGIC_NR_PRIVATE_IRQS] = mpidr;
815 vgic_bitmap_set_irq_val(dist->irq_spi_target, 0, i, 1);
818 return 0;
821 /* GICv3 does not keep track of SGI sources anymore. */
822 static void vgic_v3_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
826 void vgic_v3_init_emulation(struct kvm *kvm)
828 struct vgic_dist *dist = &kvm->arch.vgic;
830 dist->vm_ops.queue_sgi = vgic_v3_queue_sgi;
831 dist->vm_ops.add_sgi_source = vgic_v3_add_sgi_source;
832 dist->vm_ops.init_model = vgic_v3_init_model;
833 dist->vm_ops.map_resources = vgic_v3_map_resources;
835 kvm->arch.max_vcpus = KVM_MAX_VCPUS;
839 * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
840 * generation register ICC_SGI1R_EL1) with a given VCPU.
841 * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
842 * return -1.
844 static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
846 unsigned long affinity;
847 int level0;
850 * Split the current VCPU's MPIDR into affinity level 0 and the
851 * rest as this is what we have to compare against.
853 affinity = kvm_vcpu_get_mpidr_aff(vcpu);
854 level0 = MPIDR_AFFINITY_LEVEL(affinity, 0);
855 affinity &= ~MPIDR_LEVEL_MASK;
857 /* bail out if the upper three levels don't match */
858 if (sgi_aff != affinity)
859 return -1;
861 /* Is this VCPU's bit set in the mask ? */
862 if (!(sgi_cpu_mask & BIT(level0)))
863 return -1;
865 return level0;
868 #define SGI_AFFINITY_LEVEL(reg, level) \
869 ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
870 >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
873 * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
874 * @vcpu: The VCPU requesting a SGI
875 * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU
877 * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
878 * This will trap in sys_regs.c and call this function.
879 * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
880 * target processors as well as a bitmask of 16 Aff0 CPUs.
881 * If the interrupt routing mode bit is not set, we iterate over all VCPUs to
882 * check for matching ones. If this bit is set, we signal all, but not the
883 * calling VCPU.
885 void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
887 struct kvm *kvm = vcpu->kvm;
888 struct kvm_vcpu *c_vcpu;
889 struct vgic_dist *dist = &kvm->arch.vgic;
890 u16 target_cpus;
891 u64 mpidr;
892 int sgi, c;
893 int vcpu_id = vcpu->vcpu_id;
894 bool broadcast;
895 int updated = 0;
897 sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
898 broadcast = reg & BIT(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
899 target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT;
900 mpidr = SGI_AFFINITY_LEVEL(reg, 3);
901 mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
902 mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
905 * We take the dist lock here, because we come from the sysregs
906 * code path and not from the MMIO one (which already takes the lock).
908 spin_lock(&dist->lock);
911 * We iterate over all VCPUs to find the MPIDRs matching the request.
912 * If we have handled one CPU, we clear it's bit to detect early
913 * if we are already finished. This avoids iterating through all
914 * VCPUs when most of the times we just signal a single VCPU.
916 kvm_for_each_vcpu(c, c_vcpu, kvm) {
918 /* Exit early if we have dealt with all requested CPUs */
919 if (!broadcast && target_cpus == 0)
920 break;
922 /* Don't signal the calling VCPU */
923 if (broadcast && c == vcpu_id)
924 continue;
926 if (!broadcast) {
927 int level0;
929 level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
930 if (level0 == -1)
931 continue;
933 /* remove this matching VCPU from the mask */
934 target_cpus &= ~BIT(level0);
937 /* Flag the SGI as pending */
938 vgic_dist_irq_set_pending(c_vcpu, sgi);
939 updated = 1;
940 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
942 if (updated)
943 vgic_update_state(vcpu->kvm);
944 spin_unlock(&dist->lock);
945 if (updated)
946 vgic_kick_vcpus(vcpu->kvm);
949 static int vgic_v3_create(struct kvm_device *dev, u32 type)
951 return kvm_vgic_create(dev->kvm, type);
954 static void vgic_v3_destroy(struct kvm_device *dev)
956 kfree(dev);
959 static int vgic_v3_set_attr(struct kvm_device *dev,
960 struct kvm_device_attr *attr)
962 int ret;
964 ret = vgic_set_common_attr(dev, attr);
965 if (ret != -ENXIO)
966 return ret;
968 switch (attr->group) {
969 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
970 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
971 return -ENXIO;
974 return -ENXIO;
977 static int vgic_v3_get_attr(struct kvm_device *dev,
978 struct kvm_device_attr *attr)
980 int ret;
982 ret = vgic_get_common_attr(dev, attr);
983 if (ret != -ENXIO)
984 return ret;
986 switch (attr->group) {
987 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
988 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
989 return -ENXIO;
992 return -ENXIO;
995 static int vgic_v3_has_attr(struct kvm_device *dev,
996 struct kvm_device_attr *attr)
998 switch (attr->group) {
999 case KVM_DEV_ARM_VGIC_GRP_ADDR:
1000 switch (attr->attr) {
1001 case KVM_VGIC_V2_ADDR_TYPE_DIST:
1002 case KVM_VGIC_V2_ADDR_TYPE_CPU:
1003 return -ENXIO;
1004 case KVM_VGIC_V3_ADDR_TYPE_DIST:
1005 case KVM_VGIC_V3_ADDR_TYPE_REDIST:
1006 return 0;
1008 break;
1009 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
1010 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
1011 return -ENXIO;
1012 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
1013 return 0;
1014 case KVM_DEV_ARM_VGIC_GRP_CTRL:
1015 switch (attr->attr) {
1016 case KVM_DEV_ARM_VGIC_CTRL_INIT:
1017 return 0;
1020 return -ENXIO;
1023 struct kvm_device_ops kvm_arm_vgic_v3_ops = {
1024 .name = "kvm-arm-vgic-v3",
1025 .create = vgic_v3_create,
1026 .destroy = vgic_v3_destroy,
1027 .set_attr = vgic_v3_set_attr,
1028 .get_attr = vgic_v3_get_attr,
1029 .has_attr = vgic_v3_has_attr,