1 // SPDX-License-Identifier: GPL-2.0-only
3 * VGIC MMIO handling functions
6 #include <linux/bitops.h>
7 #include <linux/bsearch.h>
8 #include <linux/interrupt.h>
10 #include <linux/kvm.h>
11 #include <linux/kvm_host.h>
12 #include <kvm/iodev.h>
13 #include <kvm/arm_arch_timer.h>
14 #include <kvm/arm_vgic.h>
17 #include "vgic-mmio.h"
19 unsigned long vgic_mmio_read_raz(struct kvm_vcpu
*vcpu
,
20 gpa_t addr
, unsigned int len
)
25 unsigned long vgic_mmio_read_rao(struct kvm_vcpu
*vcpu
,
26 gpa_t addr
, unsigned int len
)
31 void vgic_mmio_write_wi(struct kvm_vcpu
*vcpu
, gpa_t addr
,
32 unsigned int len
, unsigned long val
)
37 int vgic_mmio_uaccess_write_wi(struct kvm_vcpu
*vcpu
, gpa_t addr
,
38 unsigned int len
, unsigned long val
)
44 unsigned long vgic_mmio_read_group(struct kvm_vcpu
*vcpu
,
45 gpa_t addr
, unsigned int len
)
47 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 1);
51 /* Loop over all IRQs affected by this read */
52 for (i
= 0; i
< len
* 8; i
++) {
53 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
58 vgic_put_irq(vcpu
->kvm
, irq
);
64 static void vgic_update_vsgi(struct vgic_irq
*irq
)
66 WARN_ON(its_prop_update_vsgi(irq
->host_irq
, irq
->priority
, irq
->group
));
69 void vgic_mmio_write_group(struct kvm_vcpu
*vcpu
, gpa_t addr
,
70 unsigned int len
, unsigned long val
)
72 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 1);
76 for (i
= 0; i
< len
* 8; i
++) {
77 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
79 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
80 irq
->group
= !!(val
& BIT(i
));
81 if (irq
->hw
&& vgic_irq_is_sgi(irq
->intid
)) {
82 vgic_update_vsgi(irq
);
83 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
85 vgic_queue_irq_unlock(vcpu
->kvm
, irq
, flags
);
88 vgic_put_irq(vcpu
->kvm
, irq
);
93 * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
94 * of the enabled bit, so there is only one function for both here.
96 unsigned long vgic_mmio_read_enable(struct kvm_vcpu
*vcpu
,
97 gpa_t addr
, unsigned int len
)
99 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 1);
103 /* Loop over all IRQs affected by this read */
104 for (i
= 0; i
< len
* 8; i
++) {
105 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
110 vgic_put_irq(vcpu
->kvm
, irq
);
116 void vgic_mmio_write_senable(struct kvm_vcpu
*vcpu
,
117 gpa_t addr
, unsigned int len
,
120 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 1);
124 for_each_set_bit(i
, &val
, len
* 8) {
125 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
127 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
128 if (irq
->hw
&& vgic_irq_is_sgi(irq
->intid
)) {
130 struct irq_data
*data
;
133 data
= &irq_to_desc(irq
->host_irq
)->irq_data
;
134 while (irqd_irq_disabled(data
))
135 enable_irq(irq
->host_irq
);
138 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
139 vgic_put_irq(vcpu
->kvm
, irq
);
142 } else if (vgic_irq_is_mapped_level(irq
)) {
143 bool was_high
= irq
->line_level
;
146 * We need to update the state of the interrupt because
147 * the guest might have changed the state of the device
148 * while the interrupt was disabled at the VGIC level.
150 irq
->line_level
= vgic_get_phys_line_level(irq
);
152 * Deactivate the physical interrupt so the GIC will let
153 * us know when it is asserted again.
155 if (!irq
->active
&& was_high
&& !irq
->line_level
)
156 vgic_irq_set_phys_active(irq
, false);
159 vgic_queue_irq_unlock(vcpu
->kvm
, irq
, flags
);
161 vgic_put_irq(vcpu
->kvm
, irq
);
165 void vgic_mmio_write_cenable(struct kvm_vcpu
*vcpu
,
166 gpa_t addr
, unsigned int len
,
169 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 1);
173 for_each_set_bit(i
, &val
, len
* 8) {
174 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
176 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
177 if (irq
->hw
&& vgic_irq_is_sgi(irq
->intid
) && irq
->enabled
)
178 disable_irq_nosync(irq
->host_irq
);
180 irq
->enabled
= false;
182 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
183 vgic_put_irq(vcpu
->kvm
, irq
);
187 int vgic_uaccess_write_senable(struct kvm_vcpu
*vcpu
,
188 gpa_t addr
, unsigned int len
,
191 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 1);
195 for_each_set_bit(i
, &val
, len
* 8) {
196 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
198 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
200 vgic_queue_irq_unlock(vcpu
->kvm
, irq
, flags
);
202 vgic_put_irq(vcpu
->kvm
, irq
);
208 int vgic_uaccess_write_cenable(struct kvm_vcpu
*vcpu
,
209 gpa_t addr
, unsigned int len
,
212 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 1);
216 for_each_set_bit(i
, &val
, len
* 8) {
217 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
219 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
220 irq
->enabled
= false;
221 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
223 vgic_put_irq(vcpu
->kvm
, irq
);
229 unsigned long vgic_mmio_read_pending(struct kvm_vcpu
*vcpu
,
230 gpa_t addr
, unsigned int len
)
232 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 1);
236 /* Loop over all IRQs affected by this read */
237 for (i
= 0; i
< len
* 8; i
++) {
238 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
242 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
243 if (irq
->hw
&& vgic_irq_is_sgi(irq
->intid
)) {
247 err
= irq_get_irqchip_state(irq
->host_irq
,
248 IRQCHIP_STATE_PENDING
,
250 WARN_RATELIMIT(err
, "IRQ %d", irq
->host_irq
);
252 val
= irq_is_pending(irq
);
255 value
|= ((u32
)val
<< i
);
256 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
258 vgic_put_irq(vcpu
->kvm
, irq
);
264 static bool is_vgic_v2_sgi(struct kvm_vcpu
*vcpu
, struct vgic_irq
*irq
)
266 return (vgic_irq_is_sgi(irq
->intid
) &&
267 vcpu
->kvm
->arch
.vgic
.vgic_model
== KVM_DEV_TYPE_ARM_VGIC_V2
);
270 void vgic_mmio_write_spending(struct kvm_vcpu
*vcpu
,
271 gpa_t addr
, unsigned int len
,
274 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 1);
278 for_each_set_bit(i
, &val
, len
* 8) {
279 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
281 /* GICD_ISPENDR0 SGI bits are WI */
282 if (is_vgic_v2_sgi(vcpu
, irq
)) {
283 vgic_put_irq(vcpu
->kvm
, irq
);
287 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
289 if (irq
->hw
&& vgic_irq_is_sgi(irq
->intid
)) {
290 /* HW SGI? Ask the GIC to inject it */
292 err
= irq_set_irqchip_state(irq
->host_irq
,
293 IRQCHIP_STATE_PENDING
,
295 WARN_RATELIMIT(err
, "IRQ %d", irq
->host_irq
);
297 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
298 vgic_put_irq(vcpu
->kvm
, irq
);
303 irq
->pending_latch
= true;
305 vgic_irq_set_phys_active(irq
, true);
307 vgic_queue_irq_unlock(vcpu
->kvm
, irq
, flags
);
308 vgic_put_irq(vcpu
->kvm
, irq
);
312 int vgic_uaccess_write_spending(struct kvm_vcpu
*vcpu
,
313 gpa_t addr
, unsigned int len
,
316 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 1);
320 for_each_set_bit(i
, &val
, len
* 8) {
321 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
323 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
324 irq
->pending_latch
= true;
327 * GICv2 SGIs are terribly broken. We can't restore
328 * the source of the interrupt, so just pick the vcpu
329 * itself as the source...
331 if (is_vgic_v2_sgi(vcpu
, irq
))
332 irq
->source
|= BIT(vcpu
->vcpu_id
);
334 vgic_queue_irq_unlock(vcpu
->kvm
, irq
, flags
);
336 vgic_put_irq(vcpu
->kvm
, irq
);
342 /* Must be called with irq->irq_lock held */
343 static void vgic_hw_irq_cpending(struct kvm_vcpu
*vcpu
, struct vgic_irq
*irq
)
345 irq
->pending_latch
= false;
348 * We don't want the guest to effectively mask the physical
349 * interrupt by doing a write to SPENDR followed by a write to
350 * CPENDR for HW interrupts, so we clear the active state on
351 * the physical side if the virtual interrupt is not active.
352 * This may lead to taking an additional interrupt on the
353 * host, but that should not be a problem as the worst that
354 * can happen is an additional vgic injection. We also clear
355 * the pending state to maintain proper semantics for edge HW
358 vgic_irq_set_phys_pending(irq
, false);
360 vgic_irq_set_phys_active(irq
, false);
363 void vgic_mmio_write_cpending(struct kvm_vcpu
*vcpu
,
364 gpa_t addr
, unsigned int len
,
367 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 1);
371 for_each_set_bit(i
, &val
, len
* 8) {
372 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
374 /* GICD_ICPENDR0 SGI bits are WI */
375 if (is_vgic_v2_sgi(vcpu
, irq
)) {
376 vgic_put_irq(vcpu
->kvm
, irq
);
380 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
382 if (irq
->hw
&& vgic_irq_is_sgi(irq
->intid
)) {
383 /* HW SGI? Ask the GIC to clear its pending bit */
385 err
= irq_set_irqchip_state(irq
->host_irq
,
386 IRQCHIP_STATE_PENDING
,
388 WARN_RATELIMIT(err
, "IRQ %d", irq
->host_irq
);
390 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
391 vgic_put_irq(vcpu
->kvm
, irq
);
397 vgic_hw_irq_cpending(vcpu
, irq
);
399 irq
->pending_latch
= false;
401 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
402 vgic_put_irq(vcpu
->kvm
, irq
);
406 int vgic_uaccess_write_cpending(struct kvm_vcpu
*vcpu
,
407 gpa_t addr
, unsigned int len
,
410 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 1);
414 for_each_set_bit(i
, &val
, len
* 8) {
415 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
417 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
419 * More fun with GICv2 SGIs! If we're clearing one of them
420 * from userspace, which source vcpu to clear? Let's not
421 * even think of it, and blow the whole set.
423 if (is_vgic_v2_sgi(vcpu
, irq
))
426 irq
->pending_latch
= false;
428 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
430 vgic_put_irq(vcpu
->kvm
, irq
);
437 * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
438 * is not queued on some running VCPU's LRs, because then the change to the
439 * active state can be overwritten when the VCPU's state is synced coming back
442 * For shared interrupts as well as GICv3 private interrupts, we have to
443 * stop all the VCPUs because interrupts can be migrated while we don't hold
444 * the IRQ locks and we don't want to be chasing moving targets.
446 * For GICv2 private interrupts we don't have to do anything because
447 * userspace accesses to the VGIC state already require all VCPUs to be
448 * stopped, and only the VCPU itself can modify its private interrupts
449 * active state, which guarantees that the VCPU is not running.
451 static void vgic_access_active_prepare(struct kvm_vcpu
*vcpu
, u32 intid
)
453 if (vcpu
->kvm
->arch
.vgic
.vgic_model
== KVM_DEV_TYPE_ARM_VGIC_V3
||
454 intid
>= VGIC_NR_PRIVATE_IRQS
)
455 kvm_arm_halt_guest(vcpu
->kvm
);
458 /* See vgic_access_active_prepare */
459 static void vgic_access_active_finish(struct kvm_vcpu
*vcpu
, u32 intid
)
461 if (vcpu
->kvm
->arch
.vgic
.vgic_model
== KVM_DEV_TYPE_ARM_VGIC_V3
||
462 intid
>= VGIC_NR_PRIVATE_IRQS
)
463 kvm_arm_resume_guest(vcpu
->kvm
);
466 static unsigned long __vgic_mmio_read_active(struct kvm_vcpu
*vcpu
,
467 gpa_t addr
, unsigned int len
)
469 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 1);
473 /* Loop over all IRQs affected by this read */
474 for (i
= 0; i
< len
* 8; i
++) {
475 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
478 * Even for HW interrupts, don't evaluate the HW state as
479 * all the guest is interested in is the virtual state.
484 vgic_put_irq(vcpu
->kvm
, irq
);
490 unsigned long vgic_mmio_read_active(struct kvm_vcpu
*vcpu
,
491 gpa_t addr
, unsigned int len
)
493 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 1);
496 mutex_lock(&vcpu
->kvm
->lock
);
497 vgic_access_active_prepare(vcpu
, intid
);
499 val
= __vgic_mmio_read_active(vcpu
, addr
, len
);
501 vgic_access_active_finish(vcpu
, intid
);
502 mutex_unlock(&vcpu
->kvm
->lock
);
507 unsigned long vgic_uaccess_read_active(struct kvm_vcpu
*vcpu
,
508 gpa_t addr
, unsigned int len
)
510 return __vgic_mmio_read_active(vcpu
, addr
, len
);
513 /* Must be called with irq->irq_lock held */
514 static void vgic_hw_irq_change_active(struct kvm_vcpu
*vcpu
, struct vgic_irq
*irq
,
515 bool active
, bool is_uaccess
)
520 irq
->active
= active
;
521 vgic_irq_set_phys_active(irq
, active
);
524 static void vgic_mmio_change_active(struct kvm_vcpu
*vcpu
, struct vgic_irq
*irq
,
528 struct kvm_vcpu
*requester_vcpu
= kvm_get_running_vcpu();
530 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
532 if (irq
->hw
&& !vgic_irq_is_sgi(irq
->intid
)) {
533 vgic_hw_irq_change_active(vcpu
, irq
, active
, !requester_vcpu
);
534 } else if (irq
->hw
&& vgic_irq_is_sgi(irq
->intid
)) {
536 * GICv4.1 VSGI feature doesn't track an active state,
537 * so let's not kid ourselves, there is nothing we can
542 u32 model
= vcpu
->kvm
->arch
.vgic
.vgic_model
;
545 irq
->active
= active
;
548 * The GICv2 architecture indicates that the source CPUID for
549 * an SGI should be provided during an EOI which implies that
550 * the active state is stored somewhere, but at the same time
551 * this state is not architecturally exposed anywhere and we
552 * have no way of knowing the right source.
554 * This may lead to a VCPU not being able to receive
555 * additional instances of a particular SGI after migration
556 * for a GICv2 VM on some GIC implementations. Oh well.
558 active_source
= (requester_vcpu
) ? requester_vcpu
->vcpu_id
: 0;
560 if (model
== KVM_DEV_TYPE_ARM_VGIC_V2
&&
561 active
&& vgic_irq_is_sgi(irq
->intid
))
562 irq
->active_source
= active_source
;
566 vgic_queue_irq_unlock(vcpu
->kvm
, irq
, flags
);
568 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
571 static void __vgic_mmio_write_cactive(struct kvm_vcpu
*vcpu
,
572 gpa_t addr
, unsigned int len
,
575 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 1);
578 for_each_set_bit(i
, &val
, len
* 8) {
579 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
580 vgic_mmio_change_active(vcpu
, irq
, false);
581 vgic_put_irq(vcpu
->kvm
, irq
);
585 void vgic_mmio_write_cactive(struct kvm_vcpu
*vcpu
,
586 gpa_t addr
, unsigned int len
,
589 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 1);
591 mutex_lock(&vcpu
->kvm
->lock
);
592 vgic_access_active_prepare(vcpu
, intid
);
594 __vgic_mmio_write_cactive(vcpu
, addr
, len
, val
);
596 vgic_access_active_finish(vcpu
, intid
);
597 mutex_unlock(&vcpu
->kvm
->lock
);
600 int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu
*vcpu
,
601 gpa_t addr
, unsigned int len
,
604 __vgic_mmio_write_cactive(vcpu
, addr
, len
, val
);
608 static void __vgic_mmio_write_sactive(struct kvm_vcpu
*vcpu
,
609 gpa_t addr
, unsigned int len
,
612 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 1);
615 for_each_set_bit(i
, &val
, len
* 8) {
616 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
617 vgic_mmio_change_active(vcpu
, irq
, true);
618 vgic_put_irq(vcpu
->kvm
, irq
);
622 void vgic_mmio_write_sactive(struct kvm_vcpu
*vcpu
,
623 gpa_t addr
, unsigned int len
,
626 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 1);
628 mutex_lock(&vcpu
->kvm
->lock
);
629 vgic_access_active_prepare(vcpu
, intid
);
631 __vgic_mmio_write_sactive(vcpu
, addr
, len
, val
);
633 vgic_access_active_finish(vcpu
, intid
);
634 mutex_unlock(&vcpu
->kvm
->lock
);
637 int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu
*vcpu
,
638 gpa_t addr
, unsigned int len
,
641 __vgic_mmio_write_sactive(vcpu
, addr
, len
, val
);
645 unsigned long vgic_mmio_read_priority(struct kvm_vcpu
*vcpu
,
646 gpa_t addr
, unsigned int len
)
648 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 8);
652 for (i
= 0; i
< len
; i
++) {
653 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
655 val
|= (u64
)irq
->priority
<< (i
* 8);
657 vgic_put_irq(vcpu
->kvm
, irq
);
664 * We currently don't handle changing the priority of an interrupt that
665 * is already pending on a VCPU. If there is a need for this, we would
666 * need to make this VCPU exit and re-evaluate the priorities, potentially
667 * leading to this interrupt getting presented now to the guest (if it has
668 * been masked by the priority mask before).
670 void vgic_mmio_write_priority(struct kvm_vcpu
*vcpu
,
671 gpa_t addr
, unsigned int len
,
674 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 8);
678 for (i
= 0; i
< len
; i
++) {
679 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
681 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
682 /* Narrow the priority range to what we actually support */
683 irq
->priority
= (val
>> (i
* 8)) & GENMASK(7, 8 - VGIC_PRI_BITS
);
684 if (irq
->hw
&& vgic_irq_is_sgi(irq
->intid
))
685 vgic_update_vsgi(irq
);
686 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
688 vgic_put_irq(vcpu
->kvm
, irq
);
692 unsigned long vgic_mmio_read_config(struct kvm_vcpu
*vcpu
,
693 gpa_t addr
, unsigned int len
)
695 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 2);
699 for (i
= 0; i
< len
* 4; i
++) {
700 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
702 if (irq
->config
== VGIC_CONFIG_EDGE
)
703 value
|= (2U << (i
* 2));
705 vgic_put_irq(vcpu
->kvm
, irq
);
711 void vgic_mmio_write_config(struct kvm_vcpu
*vcpu
,
712 gpa_t addr
, unsigned int len
,
715 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 2);
719 for (i
= 0; i
< len
* 4; i
++) {
720 struct vgic_irq
*irq
;
723 * The configuration cannot be changed for SGIs in general,
724 * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
725 * code relies on PPIs being level triggered, so we also
726 * make them read-only here.
728 if (intid
+ i
< VGIC_NR_PRIVATE_IRQS
)
731 irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
732 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
734 if (test_bit(i
* 2 + 1, &val
))
735 irq
->config
= VGIC_CONFIG_EDGE
;
737 irq
->config
= VGIC_CONFIG_LEVEL
;
739 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
740 vgic_put_irq(vcpu
->kvm
, irq
);
744 u64
vgic_read_irq_line_level_info(struct kvm_vcpu
*vcpu
, u32 intid
)
748 int nr_irqs
= vcpu
->kvm
->arch
.vgic
.nr_spis
+ VGIC_NR_PRIVATE_IRQS
;
750 for (i
= 0; i
< 32; i
++) {
751 struct vgic_irq
*irq
;
753 if ((intid
+ i
) < VGIC_NR_SGIS
|| (intid
+ i
) >= nr_irqs
)
756 irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
757 if (irq
->config
== VGIC_CONFIG_LEVEL
&& irq
->line_level
)
760 vgic_put_irq(vcpu
->kvm
, irq
);
766 void vgic_write_irq_line_level_info(struct kvm_vcpu
*vcpu
, u32 intid
,
770 int nr_irqs
= vcpu
->kvm
->arch
.vgic
.nr_spis
+ VGIC_NR_PRIVATE_IRQS
;
773 for (i
= 0; i
< 32; i
++) {
774 struct vgic_irq
*irq
;
777 if ((intid
+ i
) < VGIC_NR_SGIS
|| (intid
+ i
) >= nr_irqs
)
780 irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
783 * Line level is set irrespective of irq type
784 * (level or edge) to avoid dependency that VM should
785 * restore irq config before line level.
787 new_level
= !!(val
& (1U << i
));
788 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
789 irq
->line_level
= new_level
;
791 vgic_queue_irq_unlock(vcpu
->kvm
, irq
, flags
);
793 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
795 vgic_put_irq(vcpu
->kvm
, irq
);
799 static int match_region(const void *key
, const void *elt
)
801 const unsigned int offset
= (unsigned long)key
;
802 const struct vgic_register_region
*region
= elt
;
804 if (offset
< region
->reg_offset
)
807 if (offset
>= region
->reg_offset
+ region
->len
)
813 const struct vgic_register_region
*
814 vgic_find_mmio_region(const struct vgic_register_region
*regions
,
815 int nr_regions
, unsigned int offset
)
817 return bsearch((void *)(uintptr_t)offset
, regions
, nr_regions
,
818 sizeof(regions
[0]), match_region
);
821 void vgic_set_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcr
)
823 if (kvm_vgic_global_state
.type
== VGIC_V2
)
824 vgic_v2_set_vmcr(vcpu
, vmcr
);
826 vgic_v3_set_vmcr(vcpu
, vmcr
);
829 void vgic_get_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcr
)
831 if (kvm_vgic_global_state
.type
== VGIC_V2
)
832 vgic_v2_get_vmcr(vcpu
, vmcr
);
834 vgic_v3_get_vmcr(vcpu
, vmcr
);
838 * kvm_mmio_read_buf() returns a value in a format where it can be converted
839 * to a byte array and be directly observed as the guest wanted it to appear
840 * in memory if it had done the store itself, which is LE for the GIC, as the
841 * guest knows the GIC is always LE.
843 * We convert this value to the CPUs native format to deal with it as a data
846 unsigned long vgic_data_mmio_bus_to_host(const void *val
, unsigned int len
)
848 unsigned long data
= kvm_mmio_read_buf(val
, len
);
854 return le16_to_cpu(data
);
856 return le32_to_cpu(data
);
858 return le64_to_cpu(data
);
863 * kvm_mmio_write_buf() expects a value in a format such that if converted to
864 * a byte array it is observed as the guest would see it if it could perform
865 * the load directly. Since the GIC is LE, and the guest knows this, the
866 * guest expects a value in little endian format.
868 * We convert the data value from the CPUs native format to LE so that the
869 * value is returned in the proper format.
871 void vgic_data_host_to_mmio_bus(void *buf
, unsigned int len
,
878 data
= cpu_to_le16(data
);
881 data
= cpu_to_le32(data
);
884 data
= cpu_to_le64(data
);
887 kvm_mmio_write_buf(buf
, len
, data
);
891 struct vgic_io_device
*kvm_to_vgic_iodev(const struct kvm_io_device
*dev
)
893 return container_of(dev
, struct vgic_io_device
, dev
);
896 static bool check_region(const struct kvm
*kvm
,
897 const struct vgic_register_region
*region
,
900 int flags
, nr_irqs
= kvm
->arch
.vgic
.nr_spis
+ VGIC_NR_PRIVATE_IRQS
;
904 flags
= VGIC_ACCESS_8bit
;
907 flags
= VGIC_ACCESS_32bit
;
910 flags
= VGIC_ACCESS_64bit
;
916 if ((region
->access_flags
& flags
) && IS_ALIGNED(addr
, len
)) {
917 if (!region
->bits_per_irq
)
920 /* Do we access a non-allocated IRQ? */
921 return VGIC_ADDR_TO_INTID(addr
, region
->bits_per_irq
) < nr_irqs
;
927 const struct vgic_register_region
*
928 vgic_get_mmio_region(struct kvm_vcpu
*vcpu
, struct vgic_io_device
*iodev
,
931 const struct vgic_register_region
*region
;
933 region
= vgic_find_mmio_region(iodev
->regions
, iodev
->nr_regions
,
934 addr
- iodev
->base_addr
);
935 if (!region
|| !check_region(vcpu
->kvm
, region
, addr
, len
))
941 static int vgic_uaccess_read(struct kvm_vcpu
*vcpu
, struct kvm_io_device
*dev
,
942 gpa_t addr
, u32
*val
)
944 struct vgic_io_device
*iodev
= kvm_to_vgic_iodev(dev
);
945 const struct vgic_register_region
*region
;
946 struct kvm_vcpu
*r_vcpu
;
948 region
= vgic_get_mmio_region(vcpu
, iodev
, addr
, sizeof(u32
));
954 r_vcpu
= iodev
->redist_vcpu
? iodev
->redist_vcpu
: vcpu
;
955 if (region
->uaccess_read
)
956 *val
= region
->uaccess_read(r_vcpu
, addr
, sizeof(u32
));
958 *val
= region
->read(r_vcpu
, addr
, sizeof(u32
));
963 static int vgic_uaccess_write(struct kvm_vcpu
*vcpu
, struct kvm_io_device
*dev
,
964 gpa_t addr
, const u32
*val
)
966 struct vgic_io_device
*iodev
= kvm_to_vgic_iodev(dev
);
967 const struct vgic_register_region
*region
;
968 struct kvm_vcpu
*r_vcpu
;
970 region
= vgic_get_mmio_region(vcpu
, iodev
, addr
, sizeof(u32
));
974 r_vcpu
= iodev
->redist_vcpu
? iodev
->redist_vcpu
: vcpu
;
975 if (region
->uaccess_write
)
976 return region
->uaccess_write(r_vcpu
, addr
, sizeof(u32
), *val
);
978 region
->write(r_vcpu
, addr
, sizeof(u32
), *val
);
983 * Userland access to VGIC registers.
985 int vgic_uaccess(struct kvm_vcpu
*vcpu
, struct vgic_io_device
*dev
,
986 bool is_write
, int offset
, u32
*val
)
989 return vgic_uaccess_write(vcpu
, &dev
->dev
, offset
, val
);
991 return vgic_uaccess_read(vcpu
, &dev
->dev
, offset
, val
);
994 static int dispatch_mmio_read(struct kvm_vcpu
*vcpu
, struct kvm_io_device
*dev
,
995 gpa_t addr
, int len
, void *val
)
997 struct vgic_io_device
*iodev
= kvm_to_vgic_iodev(dev
);
998 const struct vgic_register_region
*region
;
999 unsigned long data
= 0;
1001 region
= vgic_get_mmio_region(vcpu
, iodev
, addr
, len
);
1003 memset(val
, 0, len
);
1007 switch (iodev
->iodev_type
) {
1009 data
= region
->read(vcpu
, addr
, len
);
1012 data
= region
->read(vcpu
, addr
, len
);
1015 data
= region
->read(iodev
->redist_vcpu
, addr
, len
);
1018 data
= region
->its_read(vcpu
->kvm
, iodev
->its
, addr
, len
);
1022 vgic_data_host_to_mmio_bus(val
, len
, data
);
1026 static int dispatch_mmio_write(struct kvm_vcpu
*vcpu
, struct kvm_io_device
*dev
,
1027 gpa_t addr
, int len
, const void *val
)
1029 struct vgic_io_device
*iodev
= kvm_to_vgic_iodev(dev
);
1030 const struct vgic_register_region
*region
;
1031 unsigned long data
= vgic_data_mmio_bus_to_host(val
, len
);
1033 region
= vgic_get_mmio_region(vcpu
, iodev
, addr
, len
);
1037 switch (iodev
->iodev_type
) {
1039 region
->write(vcpu
, addr
, len
, data
);
1042 region
->write(vcpu
, addr
, len
, data
);
1045 region
->write(iodev
->redist_vcpu
, addr
, len
, data
);
1048 region
->its_write(vcpu
->kvm
, iodev
->its
, addr
, len
, data
);
1055 struct kvm_io_device_ops kvm_io_gic_ops
= {
1056 .read
= dispatch_mmio_read
,
1057 .write
= dispatch_mmio_write
,
1060 int vgic_register_dist_iodev(struct kvm
*kvm
, gpa_t dist_base_address
,
1061 enum vgic_type type
)
1063 struct vgic_io_device
*io_device
= &kvm
->arch
.vgic
.dist_iodev
;
1069 len
= vgic_v2_init_dist_iodev(io_device
);
1072 len
= vgic_v3_init_dist_iodev(io_device
);
1078 io_device
->base_addr
= dist_base_address
;
1079 io_device
->iodev_type
= IODEV_DIST
;
1080 io_device
->redist_vcpu
= NULL
;
1082 mutex_lock(&kvm
->slots_lock
);
1083 ret
= kvm_io_bus_register_dev(kvm
, KVM_MMIO_BUS
, dist_base_address
,
1084 len
, &io_device
->dev
);
1085 mutex_unlock(&kvm
->slots_lock
);