2 * VGICv3 MMIO handling functions
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/irqchip/arm-gic-v3.h>
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <kvm/iodev.h>
18 #include <kvm/arm_vgic.h>
20 #include <asm/kvm_emulate.h>
21 #include <asm/kvm_arm.h>
22 #include <asm/kvm_mmu.h>
25 #include "vgic-mmio.h"
27 /* extract @num bytes at @offset bytes offset in data */
28 unsigned long extract_bytes(u64 data
, unsigned int offset
,
31 return (data
>> (offset
* 8)) & GENMASK_ULL(num
* 8 - 1, 0);
34 /* allows updates of any half of a 64-bit register (or the whole thing) */
35 u64
update_64bit_reg(u64 reg
, unsigned int offset
, unsigned int len
,
38 int lower
= (offset
& 4) * 8;
39 int upper
= lower
+ 8 * len
- 1;
41 reg
&= ~GENMASK_ULL(upper
, lower
);
42 val
&= GENMASK_ULL(len
* 8 - 1, 0);
44 return reg
| ((u64
)val
<< lower
);
47 bool vgic_has_its(struct kvm
*kvm
)
49 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
51 if (dist
->vgic_model
!= KVM_DEV_TYPE_ARM_VGIC_V3
)
57 bool vgic_supports_direct_msis(struct kvm
*kvm
)
59 return kvm_vgic_global_state
.has_gicv4
&& vgic_has_its(kvm
);
62 static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu
*vcpu
,
63 gpa_t addr
, unsigned int len
)
67 switch (addr
& 0x0c) {
69 if (vcpu
->kvm
->arch
.vgic
.enabled
)
70 value
|= GICD_CTLR_ENABLE_SS_G1
;
71 value
|= GICD_CTLR_ARE_NS
| GICD_CTLR_DS
;
74 value
= vcpu
->kvm
->arch
.vgic
.nr_spis
+ VGIC_NR_PRIVATE_IRQS
;
75 value
= (value
>> 5) - 1;
76 if (vgic_has_its(vcpu
->kvm
)) {
77 value
|= (INTERRUPT_ID_BITS_ITS
- 1) << 19;
78 value
|= GICD_TYPER_LPIS
;
80 value
|= (INTERRUPT_ID_BITS_SPIS
- 1) << 19;
84 value
= (PRODUCT_ID_KVM
<< 24) | (IMPLEMENTER_ARM
<< 0);
93 static void vgic_mmio_write_v3_misc(struct kvm_vcpu
*vcpu
,
94 gpa_t addr
, unsigned int len
,
97 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
98 bool was_enabled
= dist
->enabled
;
100 switch (addr
& 0x0c) {
102 dist
->enabled
= val
& GICD_CTLR_ENABLE_SS_G1
;
104 if (!was_enabled
&& dist
->enabled
)
105 vgic_kick_vcpus(vcpu
->kvm
);
113 static unsigned long vgic_mmio_read_irouter(struct kvm_vcpu
*vcpu
,
114 gpa_t addr
, unsigned int len
)
116 int intid
= VGIC_ADDR_TO_INTID(addr
, 64);
117 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, NULL
, intid
);
118 unsigned long ret
= 0;
123 /* The upper word is RAZ for us. */
125 ret
= extract_bytes(READ_ONCE(irq
->mpidr
), addr
& 7, len
);
127 vgic_put_irq(vcpu
->kvm
, irq
);
131 static void vgic_mmio_write_irouter(struct kvm_vcpu
*vcpu
,
132 gpa_t addr
, unsigned int len
,
135 int intid
= VGIC_ADDR_TO_INTID(addr
, 64);
136 struct vgic_irq
*irq
;
139 /* The upper word is WI for us since we don't implement Aff3. */
143 irq
= vgic_get_irq(vcpu
->kvm
, NULL
, intid
);
148 spin_lock_irqsave(&irq
->irq_lock
, flags
);
150 /* We only care about and preserve Aff0, Aff1 and Aff2. */
151 irq
->mpidr
= val
& GENMASK(23, 0);
152 irq
->target_vcpu
= kvm_mpidr_to_vcpu(vcpu
->kvm
, irq
->mpidr
);
154 spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
155 vgic_put_irq(vcpu
->kvm
, irq
);
158 static unsigned long vgic_mmio_read_v3r_ctlr(struct kvm_vcpu
*vcpu
,
159 gpa_t addr
, unsigned int len
)
161 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
163 return vgic_cpu
->lpis_enabled
? GICR_CTLR_ENABLE_LPIS
: 0;
167 static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu
*vcpu
,
168 gpa_t addr
, unsigned int len
,
171 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
172 bool was_enabled
= vgic_cpu
->lpis_enabled
;
174 if (!vgic_has_its(vcpu
->kvm
))
177 vgic_cpu
->lpis_enabled
= val
& GICR_CTLR_ENABLE_LPIS
;
179 if (!was_enabled
&& vgic_cpu
->lpis_enabled
)
180 vgic_enable_lpis(vcpu
);
183 static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu
*vcpu
,
184 gpa_t addr
, unsigned int len
)
186 unsigned long mpidr
= kvm_vcpu_get_mpidr_aff(vcpu
);
187 int target_vcpu_id
= vcpu
->vcpu_id
;
190 value
= (u64
)(mpidr
& GENMASK(23, 0)) << 32;
191 value
|= ((target_vcpu_id
& 0xffff) << 8);
192 if (target_vcpu_id
== atomic_read(&vcpu
->kvm
->online_vcpus
) - 1)
193 value
|= GICR_TYPER_LAST
;
194 if (vgic_has_its(vcpu
->kvm
))
195 value
|= GICR_TYPER_PLPIS
;
197 return extract_bytes(value
, addr
& 7, len
);
200 static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu
*vcpu
,
201 gpa_t addr
, unsigned int len
)
203 return (PRODUCT_ID_KVM
<< 24) | (IMPLEMENTER_ARM
<< 0);
206 static unsigned long vgic_mmio_read_v3_idregs(struct kvm_vcpu
*vcpu
,
207 gpa_t addr
, unsigned int len
)
209 switch (addr
& 0xffff) {
211 /* report a GICv3 compliant implementation */
218 static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu
*vcpu
,
219 gpa_t addr
, unsigned int len
)
221 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 1);
226 * pending state of interrupt is latched in pending_latch variable.
227 * Userspace will save and restore pending state and line_level
229 * Refer to Documentation/virtual/kvm/devices/arm-vgic-v3.txt
230 * for handling of ISPENDR and ICPENDR.
232 for (i
= 0; i
< len
* 8; i
++) {
233 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
235 if (irq
->pending_latch
)
238 vgic_put_irq(vcpu
->kvm
, irq
);
244 static void vgic_v3_uaccess_write_pending(struct kvm_vcpu
*vcpu
,
245 gpa_t addr
, unsigned int len
,
248 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 1);
252 for (i
= 0; i
< len
* 8; i
++) {
253 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
255 spin_lock_irqsave(&irq
->irq_lock
, flags
);
256 if (test_bit(i
, &val
)) {
258 * pending_latch is set irrespective of irq type
259 * (level or edge) to avoid dependency that VM should
260 * restore irq config before pending info.
262 irq
->pending_latch
= true;
263 vgic_queue_irq_unlock(vcpu
->kvm
, irq
, flags
);
265 irq
->pending_latch
= false;
266 spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
269 vgic_put_irq(vcpu
->kvm
, irq
);
273 /* We want to avoid outer shareable. */
274 u64
vgic_sanitise_shareability(u64 field
)
277 case GIC_BASER_OuterShareable
:
278 return GIC_BASER_InnerShareable
;
284 /* Avoid any inner non-cacheable mapping. */
285 u64
vgic_sanitise_inner_cacheability(u64 field
)
288 case GIC_BASER_CACHE_nCnB
:
289 case GIC_BASER_CACHE_nC
:
290 return GIC_BASER_CACHE_RaWb
;
296 /* Non-cacheable or same-as-inner are OK. */
297 u64
vgic_sanitise_outer_cacheability(u64 field
)
300 case GIC_BASER_CACHE_SameAsInner
:
301 case GIC_BASER_CACHE_nC
:
304 return GIC_BASER_CACHE_nC
;
308 u64
vgic_sanitise_field(u64 reg
, u64 field_mask
, int field_shift
,
309 u64 (*sanitise_fn
)(u64
))
311 u64 field
= (reg
& field_mask
) >> field_shift
;
313 field
= sanitise_fn(field
) << field_shift
;
314 return (reg
& ~field_mask
) | field
;
317 #define PROPBASER_RES0_MASK \
318 (GENMASK_ULL(63, 59) | GENMASK_ULL(55, 52) | GENMASK_ULL(6, 5))
319 #define PENDBASER_RES0_MASK \
320 (BIT_ULL(63) | GENMASK_ULL(61, 59) | GENMASK_ULL(55, 52) | \
321 GENMASK_ULL(15, 12) | GENMASK_ULL(6, 0))
323 static u64
vgic_sanitise_pendbaser(u64 reg
)
325 reg
= vgic_sanitise_field(reg
, GICR_PENDBASER_SHAREABILITY_MASK
,
326 GICR_PENDBASER_SHAREABILITY_SHIFT
,
327 vgic_sanitise_shareability
);
328 reg
= vgic_sanitise_field(reg
, GICR_PENDBASER_INNER_CACHEABILITY_MASK
,
329 GICR_PENDBASER_INNER_CACHEABILITY_SHIFT
,
330 vgic_sanitise_inner_cacheability
);
331 reg
= vgic_sanitise_field(reg
, GICR_PENDBASER_OUTER_CACHEABILITY_MASK
,
332 GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT
,
333 vgic_sanitise_outer_cacheability
);
335 reg
&= ~PENDBASER_RES0_MASK
;
336 reg
&= ~GENMASK_ULL(51, 48);
341 static u64
vgic_sanitise_propbaser(u64 reg
)
343 reg
= vgic_sanitise_field(reg
, GICR_PROPBASER_SHAREABILITY_MASK
,
344 GICR_PROPBASER_SHAREABILITY_SHIFT
,
345 vgic_sanitise_shareability
);
346 reg
= vgic_sanitise_field(reg
, GICR_PROPBASER_INNER_CACHEABILITY_MASK
,
347 GICR_PROPBASER_INNER_CACHEABILITY_SHIFT
,
348 vgic_sanitise_inner_cacheability
);
349 reg
= vgic_sanitise_field(reg
, GICR_PROPBASER_OUTER_CACHEABILITY_MASK
,
350 GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT
,
351 vgic_sanitise_outer_cacheability
);
353 reg
&= ~PROPBASER_RES0_MASK
;
354 reg
&= ~GENMASK_ULL(51, 48);
358 static unsigned long vgic_mmio_read_propbase(struct kvm_vcpu
*vcpu
,
359 gpa_t addr
, unsigned int len
)
361 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
363 return extract_bytes(dist
->propbaser
, addr
& 7, len
);
366 static void vgic_mmio_write_propbase(struct kvm_vcpu
*vcpu
,
367 gpa_t addr
, unsigned int len
,
370 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
371 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
372 u64 old_propbaser
, propbaser
;
374 /* Storing a value with LPIs already enabled is undefined */
375 if (vgic_cpu
->lpis_enabled
)
379 old_propbaser
= READ_ONCE(dist
->propbaser
);
380 propbaser
= old_propbaser
;
381 propbaser
= update_64bit_reg(propbaser
, addr
& 4, len
, val
);
382 propbaser
= vgic_sanitise_propbaser(propbaser
);
383 } while (cmpxchg64(&dist
->propbaser
, old_propbaser
,
384 propbaser
) != old_propbaser
);
387 static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu
*vcpu
,
388 gpa_t addr
, unsigned int len
)
390 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
392 return extract_bytes(vgic_cpu
->pendbaser
, addr
& 7, len
);
395 static void vgic_mmio_write_pendbase(struct kvm_vcpu
*vcpu
,
396 gpa_t addr
, unsigned int len
,
399 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
400 u64 old_pendbaser
, pendbaser
;
402 /* Storing a value with LPIs already enabled is undefined */
403 if (vgic_cpu
->lpis_enabled
)
407 old_pendbaser
= READ_ONCE(vgic_cpu
->pendbaser
);
408 pendbaser
= old_pendbaser
;
409 pendbaser
= update_64bit_reg(pendbaser
, addr
& 4, len
, val
);
410 pendbaser
= vgic_sanitise_pendbaser(pendbaser
);
411 } while (cmpxchg64(&vgic_cpu
->pendbaser
, old_pendbaser
,
412 pendbaser
) != old_pendbaser
);
416 * The GICv3 per-IRQ registers are split to control PPIs and SGIs in the
417 * redistributors, while SPIs are covered by registers in the distributor
418 * block. Trying to set private IRQs in this block gets ignored.
419 * We take some special care here to fix the calculation of the register
422 #define REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(off, rd, wr, ur, uw, bpi, acc) \
425 .bits_per_irq = bpi, \
426 .len = (bpi * VGIC_NR_PRIVATE_IRQS) / 8, \
427 .access_flags = acc, \
428 .read = vgic_mmio_read_raz, \
429 .write = vgic_mmio_write_wi, \
431 .reg_offset = off + (bpi * VGIC_NR_PRIVATE_IRQS) / 8, \
432 .bits_per_irq = bpi, \
433 .len = (bpi * (1024 - VGIC_NR_PRIVATE_IRQS)) / 8, \
434 .access_flags = acc, \
437 .uaccess_read = ur, \
438 .uaccess_write = uw, \
441 static const struct vgic_register_region vgic_v3_dist_registers
[] = {
442 REGISTER_DESC_WITH_LENGTH(GICD_CTLR
,
443 vgic_mmio_read_v3_misc
, vgic_mmio_write_v3_misc
, 16,
445 REGISTER_DESC_WITH_LENGTH(GICD_STATUSR
,
446 vgic_mmio_read_rao
, vgic_mmio_write_wi
, 4,
448 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGROUPR
,
449 vgic_mmio_read_rao
, vgic_mmio_write_wi
, NULL
, NULL
, 1,
451 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISENABLER
,
452 vgic_mmio_read_enable
, vgic_mmio_write_senable
, NULL
, NULL
, 1,
454 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICENABLER
,
455 vgic_mmio_read_enable
, vgic_mmio_write_cenable
, NULL
, NULL
, 1,
457 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR
,
458 vgic_mmio_read_pending
, vgic_mmio_write_spending
,
459 vgic_v3_uaccess_read_pending
, vgic_v3_uaccess_write_pending
, 1,
461 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICPENDR
,
462 vgic_mmio_read_pending
, vgic_mmio_write_cpending
,
463 vgic_mmio_read_raz
, vgic_mmio_write_wi
, 1,
465 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER
,
466 vgic_mmio_read_active
, vgic_mmio_write_sactive
,
467 NULL
, vgic_mmio_uaccess_write_sactive
, 1,
469 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER
,
470 vgic_mmio_read_active
, vgic_mmio_write_cactive
,
471 NULL
, vgic_mmio_uaccess_write_cactive
,
472 1, VGIC_ACCESS_32bit
),
473 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR
,
474 vgic_mmio_read_priority
, vgic_mmio_write_priority
, NULL
, NULL
,
475 8, VGIC_ACCESS_32bit
| VGIC_ACCESS_8bit
),
476 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ITARGETSR
,
477 vgic_mmio_read_raz
, vgic_mmio_write_wi
, NULL
, NULL
, 8,
478 VGIC_ACCESS_32bit
| VGIC_ACCESS_8bit
),
479 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICFGR
,
480 vgic_mmio_read_config
, vgic_mmio_write_config
, NULL
, NULL
, 2,
482 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGRPMODR
,
483 vgic_mmio_read_raz
, vgic_mmio_write_wi
, NULL
, NULL
, 1,
485 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IROUTER
,
486 vgic_mmio_read_irouter
, vgic_mmio_write_irouter
, NULL
, NULL
, 64,
487 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
488 REGISTER_DESC_WITH_LENGTH(GICD_IDREGS
,
489 vgic_mmio_read_v3_idregs
, vgic_mmio_write_wi
, 48,
493 static const struct vgic_register_region vgic_v3_rdbase_registers
[] = {
494 REGISTER_DESC_WITH_LENGTH(GICR_CTLR
,
495 vgic_mmio_read_v3r_ctlr
, vgic_mmio_write_v3r_ctlr
, 4,
497 REGISTER_DESC_WITH_LENGTH(GICR_STATUSR
,
498 vgic_mmio_read_raz
, vgic_mmio_write_wi
, 4,
500 REGISTER_DESC_WITH_LENGTH(GICR_IIDR
,
501 vgic_mmio_read_v3r_iidr
, vgic_mmio_write_wi
, 4,
503 REGISTER_DESC_WITH_LENGTH(GICR_TYPER
,
504 vgic_mmio_read_v3r_typer
, vgic_mmio_write_wi
, 8,
505 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
506 REGISTER_DESC_WITH_LENGTH(GICR_WAKER
,
507 vgic_mmio_read_raz
, vgic_mmio_write_wi
, 4,
509 REGISTER_DESC_WITH_LENGTH(GICR_PROPBASER
,
510 vgic_mmio_read_propbase
, vgic_mmio_write_propbase
, 8,
511 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
512 REGISTER_DESC_WITH_LENGTH(GICR_PENDBASER
,
513 vgic_mmio_read_pendbase
, vgic_mmio_write_pendbase
, 8,
514 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
515 REGISTER_DESC_WITH_LENGTH(GICR_IDREGS
,
516 vgic_mmio_read_v3_idregs
, vgic_mmio_write_wi
, 48,
520 static const struct vgic_register_region vgic_v3_sgibase_registers
[] = {
521 REGISTER_DESC_WITH_LENGTH(GICR_IGROUPR0
,
522 vgic_mmio_read_rao
, vgic_mmio_write_wi
, 4,
524 REGISTER_DESC_WITH_LENGTH(GICR_ISENABLER0
,
525 vgic_mmio_read_enable
, vgic_mmio_write_senable
, 4,
527 REGISTER_DESC_WITH_LENGTH(GICR_ICENABLER0
,
528 vgic_mmio_read_enable
, vgic_mmio_write_cenable
, 4,
530 REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ISPENDR0
,
531 vgic_mmio_read_pending
, vgic_mmio_write_spending
,
532 vgic_v3_uaccess_read_pending
, vgic_v3_uaccess_write_pending
, 4,
534 REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ICPENDR0
,
535 vgic_mmio_read_pending
, vgic_mmio_write_cpending
,
536 vgic_mmio_read_raz
, vgic_mmio_write_wi
, 4,
538 REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ISACTIVER0
,
539 vgic_mmio_read_active
, vgic_mmio_write_sactive
,
540 NULL
, vgic_mmio_uaccess_write_sactive
,
541 4, VGIC_ACCESS_32bit
),
542 REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ICACTIVER0
,
543 vgic_mmio_read_active
, vgic_mmio_write_cactive
,
544 NULL
, vgic_mmio_uaccess_write_cactive
,
545 4, VGIC_ACCESS_32bit
),
546 REGISTER_DESC_WITH_LENGTH(GICR_IPRIORITYR0
,
547 vgic_mmio_read_priority
, vgic_mmio_write_priority
, 32,
548 VGIC_ACCESS_32bit
| VGIC_ACCESS_8bit
),
549 REGISTER_DESC_WITH_LENGTH(GICR_ICFGR0
,
550 vgic_mmio_read_config
, vgic_mmio_write_config
, 8,
552 REGISTER_DESC_WITH_LENGTH(GICR_IGRPMODR0
,
553 vgic_mmio_read_raz
, vgic_mmio_write_wi
, 4,
555 REGISTER_DESC_WITH_LENGTH(GICR_NSACR
,
556 vgic_mmio_read_raz
, vgic_mmio_write_wi
, 4,
560 unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device
*dev
)
562 dev
->regions
= vgic_v3_dist_registers
;
563 dev
->nr_regions
= ARRAY_SIZE(vgic_v3_dist_registers
);
565 kvm_iodevice_init(&dev
->dev
, &kvm_io_gic_ops
);
571 * vgic_register_redist_iodev - register a single redist iodev
572 * @vcpu: The VCPU to which the redistributor belongs
574 * Register a KVM iodev for this VCPU's redistributor using the address
577 * Return 0 on success, -ERRNO otherwise.
579 int vgic_register_redist_iodev(struct kvm_vcpu
*vcpu
)
581 struct kvm
*kvm
= vcpu
->kvm
;
582 struct vgic_dist
*vgic
= &kvm
->arch
.vgic
;
583 struct vgic_io_device
*rd_dev
= &vcpu
->arch
.vgic_cpu
.rd_iodev
;
584 struct vgic_io_device
*sgi_dev
= &vcpu
->arch
.vgic_cpu
.sgi_iodev
;
585 gpa_t rd_base
, sgi_base
;
589 * We may be creating VCPUs before having set the base address for the
590 * redistributor region, in which case we will come back to this
591 * function for all VCPUs when the base address is set. Just return
592 * without doing any work for now.
594 if (IS_VGIC_ADDR_UNDEF(vgic
->vgic_redist_base
))
597 if (!vgic_v3_check_base(kvm
))
600 rd_base
= vgic
->vgic_redist_base
+ vgic
->vgic_redist_free_offset
;
601 sgi_base
= rd_base
+ SZ_64K
;
603 kvm_iodevice_init(&rd_dev
->dev
, &kvm_io_gic_ops
);
604 rd_dev
->base_addr
= rd_base
;
605 rd_dev
->iodev_type
= IODEV_REDIST
;
606 rd_dev
->regions
= vgic_v3_rdbase_registers
;
607 rd_dev
->nr_regions
= ARRAY_SIZE(vgic_v3_rdbase_registers
);
608 rd_dev
->redist_vcpu
= vcpu
;
610 mutex_lock(&kvm
->slots_lock
);
611 ret
= kvm_io_bus_register_dev(kvm
, KVM_MMIO_BUS
, rd_base
,
612 SZ_64K
, &rd_dev
->dev
);
613 mutex_unlock(&kvm
->slots_lock
);
618 kvm_iodevice_init(&sgi_dev
->dev
, &kvm_io_gic_ops
);
619 sgi_dev
->base_addr
= sgi_base
;
620 sgi_dev
->iodev_type
= IODEV_REDIST
;
621 sgi_dev
->regions
= vgic_v3_sgibase_registers
;
622 sgi_dev
->nr_regions
= ARRAY_SIZE(vgic_v3_sgibase_registers
);
623 sgi_dev
->redist_vcpu
= vcpu
;
625 mutex_lock(&kvm
->slots_lock
);
626 ret
= kvm_io_bus_register_dev(kvm
, KVM_MMIO_BUS
, sgi_base
,
627 SZ_64K
, &sgi_dev
->dev
);
629 kvm_io_bus_unregister_dev(kvm
, KVM_MMIO_BUS
,
634 vgic
->vgic_redist_free_offset
+= 2 * SZ_64K
;
636 mutex_unlock(&kvm
->slots_lock
);
640 static void vgic_unregister_redist_iodev(struct kvm_vcpu
*vcpu
)
642 struct vgic_io_device
*rd_dev
= &vcpu
->arch
.vgic_cpu
.rd_iodev
;
643 struct vgic_io_device
*sgi_dev
= &vcpu
->arch
.vgic_cpu
.sgi_iodev
;
645 kvm_io_bus_unregister_dev(vcpu
->kvm
, KVM_MMIO_BUS
, &rd_dev
->dev
);
646 kvm_io_bus_unregister_dev(vcpu
->kvm
, KVM_MMIO_BUS
, &sgi_dev
->dev
);
649 static int vgic_register_all_redist_iodevs(struct kvm
*kvm
)
651 struct kvm_vcpu
*vcpu
;
654 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
655 ret
= vgic_register_redist_iodev(vcpu
);
661 /* The current c failed, so we start with the previous one. */
662 mutex_lock(&kvm
->slots_lock
);
663 for (c
--; c
>= 0; c
--) {
664 vcpu
= kvm_get_vcpu(kvm
, c
);
665 vgic_unregister_redist_iodev(vcpu
);
667 mutex_unlock(&kvm
->slots_lock
);
673 int vgic_v3_set_redist_base(struct kvm
*kvm
, u64 addr
)
675 struct vgic_dist
*vgic
= &kvm
->arch
.vgic
;
678 /* vgic_check_ioaddr makes sure we don't do this twice */
679 ret
= vgic_check_ioaddr(kvm
, &vgic
->vgic_redist_base
, addr
, SZ_64K
);
683 vgic
->vgic_redist_base
= addr
;
684 if (!vgic_v3_check_base(kvm
)) {
685 vgic
->vgic_redist_base
= VGIC_ADDR_UNDEF
;
690 * Register iodevs for each existing VCPU. Adding more VCPUs
691 * afterwards will register the iodevs when needed.
693 ret
= vgic_register_all_redist_iodevs(kvm
);
700 int vgic_v3_has_attr_regs(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
702 const struct vgic_register_region
*region
;
703 struct vgic_io_device iodev
;
704 struct vgic_reg_attr reg_attr
;
705 struct kvm_vcpu
*vcpu
;
709 ret
= vgic_v3_parse_attr(dev
, attr
, ®_attr
);
713 vcpu
= reg_attr
.vcpu
;
714 addr
= reg_attr
.addr
;
716 switch (attr
->group
) {
717 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
718 iodev
.regions
= vgic_v3_dist_registers
;
719 iodev
.nr_regions
= ARRAY_SIZE(vgic_v3_dist_registers
);
722 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS
:{
723 iodev
.regions
= vgic_v3_rdbase_registers
;
724 iodev
.nr_regions
= ARRAY_SIZE(vgic_v3_rdbase_registers
);
728 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS
: {
731 id
= (attr
->attr
& KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK
);
732 return vgic_v3_has_cpu_sysregs_attr(vcpu
, 0, id
, ®
);
738 /* We only support aligned 32-bit accesses. */
742 region
= vgic_get_mmio_region(vcpu
, &iodev
, addr
, sizeof(u32
));
749 * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
750 * generation register ICC_SGI1R_EL1) with a given VCPU.
751 * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
754 static int match_mpidr(u64 sgi_aff
, u16 sgi_cpu_mask
, struct kvm_vcpu
*vcpu
)
756 unsigned long affinity
;
760 * Split the current VCPU's MPIDR into affinity level 0 and the
761 * rest as this is what we have to compare against.
763 affinity
= kvm_vcpu_get_mpidr_aff(vcpu
);
764 level0
= MPIDR_AFFINITY_LEVEL(affinity
, 0);
765 affinity
&= ~MPIDR_LEVEL_MASK
;
767 /* bail out if the upper three levels don't match */
768 if (sgi_aff
!= affinity
)
771 /* Is this VCPU's bit set in the mask ? */
772 if (!(sgi_cpu_mask
& BIT(level0
)))
779 * The ICC_SGI* registers encode the affinity differently from the MPIDR,
780 * so provide a wrapper to use the existing defines to isolate a certain
783 #define SGI_AFFINITY_LEVEL(reg, level) \
784 ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
785 >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
788 * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
789 * @vcpu: The VCPU requesting a SGI
790 * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU
792 * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
793 * This will trap in sys_regs.c and call this function.
794 * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
795 * target processors as well as a bitmask of 16 Aff0 CPUs.
796 * If the interrupt routing mode bit is not set, we iterate over all VCPUs to
797 * check for matching ones. If this bit is set, we signal all, but not the
800 void vgic_v3_dispatch_sgi(struct kvm_vcpu
*vcpu
, u64 reg
)
802 struct kvm
*kvm
= vcpu
->kvm
;
803 struct kvm_vcpu
*c_vcpu
;
807 int vcpu_id
= vcpu
->vcpu_id
;
811 sgi
= (reg
& ICC_SGI1R_SGI_ID_MASK
) >> ICC_SGI1R_SGI_ID_SHIFT
;
812 broadcast
= reg
& BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT
);
813 target_cpus
= (reg
& ICC_SGI1R_TARGET_LIST_MASK
) >> ICC_SGI1R_TARGET_LIST_SHIFT
;
814 mpidr
= SGI_AFFINITY_LEVEL(reg
, 3);
815 mpidr
|= SGI_AFFINITY_LEVEL(reg
, 2);
816 mpidr
|= SGI_AFFINITY_LEVEL(reg
, 1);
819 * We iterate over all VCPUs to find the MPIDRs matching the request.
820 * If we have handled one CPU, we clear its bit to detect early
821 * if we are already finished. This avoids iterating through all
822 * VCPUs when most of the times we just signal a single VCPU.
824 kvm_for_each_vcpu(c
, c_vcpu
, kvm
) {
825 struct vgic_irq
*irq
;
827 /* Exit early if we have dealt with all requested CPUs */
828 if (!broadcast
&& target_cpus
== 0)
831 /* Don't signal the calling VCPU */
832 if (broadcast
&& c
== vcpu_id
)
838 level0
= match_mpidr(mpidr
, target_cpus
, c_vcpu
);
842 /* remove this matching VCPU from the mask */
843 target_cpus
&= ~BIT(level0
);
846 irq
= vgic_get_irq(vcpu
->kvm
, c_vcpu
, sgi
);
848 spin_lock_irqsave(&irq
->irq_lock
, flags
);
849 irq
->pending_latch
= true;
851 vgic_queue_irq_unlock(vcpu
->kvm
, irq
, flags
);
852 vgic_put_irq(vcpu
->kvm
, irq
);
856 int vgic_v3_dist_uaccess(struct kvm_vcpu
*vcpu
, bool is_write
,
857 int offset
, u32
*val
)
859 struct vgic_io_device dev
= {
860 .regions
= vgic_v3_dist_registers
,
861 .nr_regions
= ARRAY_SIZE(vgic_v3_dist_registers
),
864 return vgic_uaccess(vcpu
, &dev
, is_write
, offset
, val
);
867 int vgic_v3_redist_uaccess(struct kvm_vcpu
*vcpu
, bool is_write
,
868 int offset
, u32
*val
)
870 struct vgic_io_device rd_dev
= {
871 .regions
= vgic_v3_rdbase_registers
,
872 .nr_regions
= ARRAY_SIZE(vgic_v3_rdbase_registers
),
875 struct vgic_io_device sgi_dev
= {
876 .regions
= vgic_v3_sgibase_registers
,
877 .nr_regions
= ARRAY_SIZE(vgic_v3_sgibase_registers
),
880 /* SGI_base is the next 64K frame after RD_base */
881 if (offset
>= SZ_64K
)
882 return vgic_uaccess(vcpu
, &sgi_dev
, is_write
, offset
- SZ_64K
,
885 return vgic_uaccess(vcpu
, &rd_dev
, is_write
, offset
, val
);
888 int vgic_v3_line_level_info_uaccess(struct kvm_vcpu
*vcpu
, bool is_write
,
895 vgic_write_irq_line_level_info(vcpu
, intid
, *val
);
897 *val
= vgic_read_irq_line_level_info(vcpu
, intid
);