2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/cpu.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/interrupt.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/rculist.h>
28 #include <linux/uaccess.h>
30 #include <asm/kvm_emulate.h>
31 #include <asm/kvm_arm.h>
32 #include <asm/kvm_mmu.h>
33 #include <trace/events/kvm.h>
35 #include <kvm/iodev.h>
38 * How the whole thing works (courtesy of Christoffer Dall):
40 * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
41 * something is pending on the CPU interface.
42 * - Interrupts that are pending on the distributor are stored on the
43 * vgic.irq_pending vgic bitmap (this bitmap is updated by both user land
44 * ioctls and guest mmio ops, and other in-kernel peripherals such as the
46 * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
48 * - To calculate the oracle, we need info for each cpu from
49 * compute_pending_for_cpu, which considers:
50 * - PPI: dist->irq_pending & dist->irq_enable
51 * - SPI: dist->irq_pending & dist->irq_enable & dist->irq_spi_target
52 * - irq_spi_target is a 'formatted' version of the GICD_ITARGETSRn
53 * registers, stored on each vcpu. We only keep one bit of
54 * information per interrupt, making sure that only one vcpu can
55 * accept the interrupt.
56 * - If any of the above state changes, we must recalculate the oracle.
57 * - The same is true when injecting an interrupt, except that we only
58 * consider a single interrupt at a time. The irq_spi_cpu array
59 * contains the target CPU for each SPI.
61 * The handling of level interrupts adds some extra complexity. We
62 * need to track when the interrupt has been EOIed, so we can sample
63 * the 'line' again. This is achieved as such:
65 * - When a level interrupt is moved onto a vcpu, the corresponding
66 * bit in irq_queued is set. As long as this bit is set, the line
67 * will be ignored for further interrupts. The interrupt is injected
68 * into the vcpu with the GICH_LR_EOI bit set (generate a
69 * maintenance interrupt on EOI).
70 * - When the interrupt is EOIed, the maintenance interrupt fires,
71 * and clears the corresponding bit in irq_queued. This allows the
72 * interrupt line to be sampled again.
73 * - Note that level-triggered interrupts can also be set to pending from
74 * writes to GICD_ISPENDRn and lowering the external input line does not
75 * cause the interrupt to become inactive in such a situation.
76 * Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become
77 * inactive as long as the external input line is held high.
80 * Initialization rules: there are multiple stages to the vgic
81 * initialization, both for the distributor and the CPU interfaces.
85 * - kvm_vgic_early_init(): initialization of static data that doesn't
86 * depend on any sizing information or emulation type. No allocation
89 * - vgic_init(): allocation and initialization of the generic data
90 * structures that depend on sizing information (number of CPUs,
91 * number of interrupts). Also initializes the vcpu specific data
92 * structures. Can be executed lazily for GICv2.
93 * [to be renamed to kvm_vgic_init??]
97 * - kvm_vgic_cpu_early_init(): initialization of static data that
98 * doesn't depend on any sizing information or emulation type. No
99 * allocation is allowed there.
104 static void vgic_retire_disabled_irqs(struct kvm_vcpu
*vcpu
);
105 static void vgic_retire_lr(int lr_nr
, int irq
, struct kvm_vcpu
*vcpu
);
106 static struct vgic_lr
vgic_get_lr(const struct kvm_vcpu
*vcpu
, int lr
);
107 static void vgic_set_lr(struct kvm_vcpu
*vcpu
, int lr
, struct vgic_lr lr_desc
);
108 static struct irq_phys_map
*vgic_irq_map_search(struct kvm_vcpu
*vcpu
,
110 static int compute_pending_for_cpu(struct kvm_vcpu
*vcpu
);
112 static const struct vgic_ops
*vgic_ops
;
113 static const struct vgic_params
*vgic
;
115 static void add_sgi_source(struct kvm_vcpu
*vcpu
, int irq
, int source
)
117 vcpu
->kvm
->arch
.vgic
.vm_ops
.add_sgi_source(vcpu
, irq
, source
);
120 static bool queue_sgi(struct kvm_vcpu
*vcpu
, int irq
)
122 return vcpu
->kvm
->arch
.vgic
.vm_ops
.queue_sgi(vcpu
, irq
);
125 int kvm_vgic_map_resources(struct kvm
*kvm
)
127 return kvm
->arch
.vgic
.vm_ops
.map_resources(kvm
, vgic
);
131 * struct vgic_bitmap contains a bitmap made of unsigned longs, but
132 * extracts u32s out of them.
134 * This does not work on 64-bit BE systems, because the bitmap access
135 * will store two consecutive 32-bit words with the higher-addressed
136 * register's bits at the lower index and the lower-addressed register's
137 * bits at the higher index.
139 * Therefore, swizzle the register index when accessing the 32-bit word
140 * registers to access the right register's value.
142 #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64
143 #define REG_OFFSET_SWIZZLE 1
145 #define REG_OFFSET_SWIZZLE 0
148 static int vgic_init_bitmap(struct vgic_bitmap
*b
, int nr_cpus
, int nr_irqs
)
152 nr_longs
= nr_cpus
+ BITS_TO_LONGS(nr_irqs
- VGIC_NR_PRIVATE_IRQS
);
154 b
->private = kzalloc(sizeof(unsigned long) * nr_longs
, GFP_KERNEL
);
158 b
->shared
= b
->private + nr_cpus
;
163 static void vgic_free_bitmap(struct vgic_bitmap
*b
)
171 * Call this function to convert a u64 value to an unsigned long * bitmask
172 * in a way that works on both 32-bit and 64-bit LE and BE platforms.
174 * Warning: Calling this function may modify *val.
176 static unsigned long *u64_to_bitmask(u64
*val
)
178 #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
179 *val
= (*val
>> 32) | (*val
<< 32);
181 return (unsigned long *)val
;
184 u32
*vgic_bitmap_get_reg(struct vgic_bitmap
*x
, int cpuid
, u32 offset
)
188 return (u32
*)(x
->private + cpuid
) + REG_OFFSET_SWIZZLE
;
190 return (u32
*)(x
->shared
) + ((offset
- 1) ^ REG_OFFSET_SWIZZLE
);
193 static int vgic_bitmap_get_irq_val(struct vgic_bitmap
*x
,
196 if (irq
< VGIC_NR_PRIVATE_IRQS
)
197 return test_bit(irq
, x
->private + cpuid
);
199 return test_bit(irq
- VGIC_NR_PRIVATE_IRQS
, x
->shared
);
202 void vgic_bitmap_set_irq_val(struct vgic_bitmap
*x
, int cpuid
,
207 if (irq
< VGIC_NR_PRIVATE_IRQS
) {
208 reg
= x
->private + cpuid
;
211 irq
-= VGIC_NR_PRIVATE_IRQS
;
220 static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap
*x
, int cpuid
)
222 return x
->private + cpuid
;
225 unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap
*x
)
230 static int vgic_init_bytemap(struct vgic_bytemap
*x
, int nr_cpus
, int nr_irqs
)
234 size
= nr_cpus
* VGIC_NR_PRIVATE_IRQS
;
235 size
+= nr_irqs
- VGIC_NR_PRIVATE_IRQS
;
237 x
->private = kzalloc(size
, GFP_KERNEL
);
241 x
->shared
= x
->private + nr_cpus
* VGIC_NR_PRIVATE_IRQS
/ sizeof(u32
);
245 static void vgic_free_bytemap(struct vgic_bytemap
*b
)
252 u32
*vgic_bytemap_get_reg(struct vgic_bytemap
*x
, int cpuid
, u32 offset
)
256 if (offset
< VGIC_NR_PRIVATE_IRQS
) {
258 offset
+= cpuid
* VGIC_NR_PRIVATE_IRQS
;
261 offset
-= VGIC_NR_PRIVATE_IRQS
;
264 return reg
+ (offset
/ sizeof(u32
));
267 #define VGIC_CFG_LEVEL 0
268 #define VGIC_CFG_EDGE 1
270 static bool vgic_irq_is_edge(struct kvm_vcpu
*vcpu
, int irq
)
272 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
275 irq_val
= vgic_bitmap_get_irq_val(&dist
->irq_cfg
, vcpu
->vcpu_id
, irq
);
276 return irq_val
== VGIC_CFG_EDGE
;
279 static int vgic_irq_is_enabled(struct kvm_vcpu
*vcpu
, int irq
)
281 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
283 return vgic_bitmap_get_irq_val(&dist
->irq_enabled
, vcpu
->vcpu_id
, irq
);
286 static int vgic_irq_is_queued(struct kvm_vcpu
*vcpu
, int irq
)
288 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
290 return vgic_bitmap_get_irq_val(&dist
->irq_queued
, vcpu
->vcpu_id
, irq
);
293 static int vgic_irq_is_active(struct kvm_vcpu
*vcpu
, int irq
)
295 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
297 return vgic_bitmap_get_irq_val(&dist
->irq_active
, vcpu
->vcpu_id
, irq
);
300 static void vgic_irq_set_queued(struct kvm_vcpu
*vcpu
, int irq
)
302 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
304 vgic_bitmap_set_irq_val(&dist
->irq_queued
, vcpu
->vcpu_id
, irq
, 1);
307 static void vgic_irq_clear_queued(struct kvm_vcpu
*vcpu
, int irq
)
309 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
311 vgic_bitmap_set_irq_val(&dist
->irq_queued
, vcpu
->vcpu_id
, irq
, 0);
314 static void vgic_irq_set_active(struct kvm_vcpu
*vcpu
, int irq
)
316 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
318 vgic_bitmap_set_irq_val(&dist
->irq_active
, vcpu
->vcpu_id
, irq
, 1);
321 static void vgic_irq_clear_active(struct kvm_vcpu
*vcpu
, int irq
)
323 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
325 vgic_bitmap_set_irq_val(&dist
->irq_active
, vcpu
->vcpu_id
, irq
, 0);
328 static int vgic_dist_irq_get_level(struct kvm_vcpu
*vcpu
, int irq
)
330 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
332 return vgic_bitmap_get_irq_val(&dist
->irq_level
, vcpu
->vcpu_id
, irq
);
335 static void vgic_dist_irq_set_level(struct kvm_vcpu
*vcpu
, int irq
)
337 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
339 vgic_bitmap_set_irq_val(&dist
->irq_level
, vcpu
->vcpu_id
, irq
, 1);
342 static void vgic_dist_irq_clear_level(struct kvm_vcpu
*vcpu
, int irq
)
344 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
346 vgic_bitmap_set_irq_val(&dist
->irq_level
, vcpu
->vcpu_id
, irq
, 0);
349 static int vgic_dist_irq_soft_pend(struct kvm_vcpu
*vcpu
, int irq
)
351 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
353 return vgic_bitmap_get_irq_val(&dist
->irq_soft_pend
, vcpu
->vcpu_id
, irq
);
356 static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu
*vcpu
, int irq
)
358 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
360 vgic_bitmap_set_irq_val(&dist
->irq_soft_pend
, vcpu
->vcpu_id
, irq
, 0);
361 if (!vgic_dist_irq_get_level(vcpu
, irq
)) {
362 vgic_dist_irq_clear_pending(vcpu
, irq
);
363 if (!compute_pending_for_cpu(vcpu
))
364 clear_bit(vcpu
->vcpu_id
, dist
->irq_pending_on_cpu
);
368 static int vgic_dist_irq_is_pending(struct kvm_vcpu
*vcpu
, int irq
)
370 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
372 return vgic_bitmap_get_irq_val(&dist
->irq_pending
, vcpu
->vcpu_id
, irq
);
375 void vgic_dist_irq_set_pending(struct kvm_vcpu
*vcpu
, int irq
)
377 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
379 vgic_bitmap_set_irq_val(&dist
->irq_pending
, vcpu
->vcpu_id
, irq
, 1);
382 void vgic_dist_irq_clear_pending(struct kvm_vcpu
*vcpu
, int irq
)
384 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
386 vgic_bitmap_set_irq_val(&dist
->irq_pending
, vcpu
->vcpu_id
, irq
, 0);
389 static void vgic_cpu_irq_set(struct kvm_vcpu
*vcpu
, int irq
)
391 if (irq
< VGIC_NR_PRIVATE_IRQS
)
392 set_bit(irq
, vcpu
->arch
.vgic_cpu
.pending_percpu
);
394 set_bit(irq
- VGIC_NR_PRIVATE_IRQS
,
395 vcpu
->arch
.vgic_cpu
.pending_shared
);
398 void vgic_cpu_irq_clear(struct kvm_vcpu
*vcpu
, int irq
)
400 if (irq
< VGIC_NR_PRIVATE_IRQS
)
401 clear_bit(irq
, vcpu
->arch
.vgic_cpu
.pending_percpu
);
403 clear_bit(irq
- VGIC_NR_PRIVATE_IRQS
,
404 vcpu
->arch
.vgic_cpu
.pending_shared
);
407 static bool vgic_can_sample_irq(struct kvm_vcpu
*vcpu
, int irq
)
409 return !vgic_irq_is_queued(vcpu
, irq
);
413 * vgic_reg_access - access vgic register
414 * @mmio: pointer to the data describing the mmio access
415 * @reg: pointer to the virtual backing of vgic distributor data
416 * @offset: least significant 2 bits used for word offset
417 * @mode: ACCESS_ mode (see defines above)
419 * Helper to make vgic register access easier using one of the access
420 * modes defined for vgic register access
421 * (read,raz,write-ignored,setbit,clearbit,write)
423 void vgic_reg_access(struct kvm_exit_mmio
*mmio
, u32
*reg
,
424 phys_addr_t offset
, int mode
)
426 int word_offset
= (offset
& 3) * 8;
427 u32 mask
= (1UL << (mmio
->len
* 8)) - 1;
431 * Any alignment fault should have been delivered to the guest
432 * directly (ARM ARM B3.12.7 "Prioritization of aborts").
438 BUG_ON(mode
!= (ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
));
442 if (mmio
->is_write
) {
443 u32 data
= mmio_data_read(mmio
, mask
) << word_offset
;
444 switch (ACCESS_WRITE_MASK(mode
)) {
445 case ACCESS_WRITE_IGNORED
:
448 case ACCESS_WRITE_SETBIT
:
452 case ACCESS_WRITE_CLEARBIT
:
456 case ACCESS_WRITE_VALUE
:
457 regval
= (regval
& ~(mask
<< word_offset
)) | data
;
462 switch (ACCESS_READ_MASK(mode
)) {
463 case ACCESS_READ_RAZ
:
467 case ACCESS_READ_VALUE
:
468 mmio_data_write(mmio
, mask
, regval
>> word_offset
);
473 bool handle_mmio_raz_wi(struct kvm_vcpu
*vcpu
, struct kvm_exit_mmio
*mmio
,
476 vgic_reg_access(mmio
, NULL
, offset
,
477 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
481 bool vgic_handle_enable_reg(struct kvm
*kvm
, struct kvm_exit_mmio
*mmio
,
482 phys_addr_t offset
, int vcpu_id
, int access
)
485 int mode
= ACCESS_READ_VALUE
| access
;
486 struct kvm_vcpu
*target_vcpu
= kvm_get_vcpu(kvm
, vcpu_id
);
488 reg
= vgic_bitmap_get_reg(&kvm
->arch
.vgic
.irq_enabled
, vcpu_id
, offset
);
489 vgic_reg_access(mmio
, reg
, offset
, mode
);
490 if (mmio
->is_write
) {
491 if (access
& ACCESS_WRITE_CLEARBIT
) {
492 if (offset
< 4) /* Force SGI enabled */
494 vgic_retire_disabled_irqs(target_vcpu
);
496 vgic_update_state(kvm
);
503 bool vgic_handle_set_pending_reg(struct kvm
*kvm
,
504 struct kvm_exit_mmio
*mmio
,
505 phys_addr_t offset
, int vcpu_id
)
509 int mode
= ACCESS_READ_VALUE
| ACCESS_WRITE_SETBIT
;
510 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
512 reg
= vgic_bitmap_get_reg(&dist
->irq_cfg
, vcpu_id
, offset
);
513 level_mask
= (~(*reg
));
515 /* Mark both level and edge triggered irqs as pending */
516 reg
= vgic_bitmap_get_reg(&dist
->irq_pending
, vcpu_id
, offset
);
518 vgic_reg_access(mmio
, reg
, offset
, mode
);
520 if (mmio
->is_write
) {
521 /* Set the soft-pending flag only for level-triggered irqs */
522 reg
= vgic_bitmap_get_reg(&dist
->irq_soft_pend
,
524 vgic_reg_access(mmio
, reg
, offset
, mode
);
527 /* Ignore writes to SGIs */
530 *reg
|= orig
& 0xffff;
533 vgic_update_state(kvm
);
541 * If a mapped interrupt's state has been modified by the guest such that it
542 * is no longer active or pending, without it have gone through the sync path,
543 * then the map->active field must be cleared so the interrupt can be taken
546 static void vgic_handle_clear_mapped_irq(struct kvm_vcpu
*vcpu
)
548 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
549 struct list_head
*root
;
550 struct irq_phys_map_entry
*entry
;
551 struct irq_phys_map
*map
;
556 root
= &vgic_cpu
->irq_phys_map_list
;
557 list_for_each_entry_rcu(entry
, root
, entry
) {
560 if (!vgic_dist_irq_is_pending(vcpu
, map
->virt_irq
) &&
561 !vgic_irq_is_active(vcpu
, map
->virt_irq
))
568 bool vgic_handle_clear_pending_reg(struct kvm
*kvm
,
569 struct kvm_exit_mmio
*mmio
,
570 phys_addr_t offset
, int vcpu_id
)
574 int mode
= ACCESS_READ_VALUE
| ACCESS_WRITE_CLEARBIT
;
575 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
577 reg
= vgic_bitmap_get_reg(&dist
->irq_pending
, vcpu_id
, offset
);
579 vgic_reg_access(mmio
, reg
, offset
, mode
);
580 if (mmio
->is_write
) {
581 /* Re-set level triggered level-active interrupts */
582 level_active
= vgic_bitmap_get_reg(&dist
->irq_level
,
584 reg
= vgic_bitmap_get_reg(&dist
->irq_pending
, vcpu_id
, offset
);
585 *reg
|= *level_active
;
587 /* Ignore writes to SGIs */
590 *reg
|= orig
& 0xffff;
593 /* Clear soft-pending flags */
594 reg
= vgic_bitmap_get_reg(&dist
->irq_soft_pend
,
596 vgic_reg_access(mmio
, reg
, offset
, mode
);
598 vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm
, vcpu_id
));
599 vgic_update_state(kvm
);
605 bool vgic_handle_set_active_reg(struct kvm
*kvm
,
606 struct kvm_exit_mmio
*mmio
,
607 phys_addr_t offset
, int vcpu_id
)
610 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
612 reg
= vgic_bitmap_get_reg(&dist
->irq_active
, vcpu_id
, offset
);
613 vgic_reg_access(mmio
, reg
, offset
,
614 ACCESS_READ_VALUE
| ACCESS_WRITE_SETBIT
);
616 if (mmio
->is_write
) {
617 vgic_update_state(kvm
);
624 bool vgic_handle_clear_active_reg(struct kvm
*kvm
,
625 struct kvm_exit_mmio
*mmio
,
626 phys_addr_t offset
, int vcpu_id
)
629 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
631 reg
= vgic_bitmap_get_reg(&dist
->irq_active
, vcpu_id
, offset
);
632 vgic_reg_access(mmio
, reg
, offset
,
633 ACCESS_READ_VALUE
| ACCESS_WRITE_CLEARBIT
);
635 if (mmio
->is_write
) {
636 vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm
, vcpu_id
));
637 vgic_update_state(kvm
);
644 static u32
vgic_cfg_expand(u16 val
)
650 * Turn a 16bit value like abcd...mnop into a 32bit word
651 * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
653 for (i
= 0; i
< 16; i
++)
654 res
|= ((val
>> i
) & VGIC_CFG_EDGE
) << (2 * i
+ 1);
659 static u16
vgic_cfg_compress(u32 val
)
665 * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
666 * abcd...mnop which is what we really care about.
668 for (i
= 0; i
< 16; i
++)
669 res
|= ((val
>> (i
* 2 + 1)) & VGIC_CFG_EDGE
) << i
;
675 * The distributor uses 2 bits per IRQ for the CFG register, but the
676 * LSB is always 0. As such, we only keep the upper bit, and use the
677 * two above functions to compress/expand the bits
679 bool vgic_handle_cfg_reg(u32
*reg
, struct kvm_exit_mmio
*mmio
,
689 val
= vgic_cfg_expand(val
);
690 vgic_reg_access(mmio
, &val
, offset
,
691 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
692 if (mmio
->is_write
) {
694 *reg
= ~0U; /* Force PPIs/SGIs to 1 */
698 val
= vgic_cfg_compress(val
);
703 *reg
&= 0xffff << 16;
712 * vgic_unqueue_irqs - move pending/active IRQs from LRs to the distributor
713 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
715 * Move any IRQs that have already been assigned to LRs back to the
716 * emulated distributor state so that the complete emulated state can be read
717 * from the main emulation structures without investigating the LRs.
719 void vgic_unqueue_irqs(struct kvm_vcpu
*vcpu
)
721 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
724 for_each_set_bit(i
, vgic_cpu
->lr_used
, vgic_cpu
->nr_lr
) {
725 struct vgic_lr lr
= vgic_get_lr(vcpu
, i
);
728 * There are three options for the state bits:
732 * 11: pending and active
734 BUG_ON(!(lr
.state
& LR_STATE_MASK
));
736 /* Reestablish SGI source for pending and active IRQs */
737 if (lr
.irq
< VGIC_NR_SGIS
)
738 add_sgi_source(vcpu
, lr
.irq
, lr
.source
);
741 * If the LR holds an active (10) or a pending and active (11)
742 * interrupt then move the active state to the
743 * distributor tracking bit.
745 if (lr
.state
& LR_STATE_ACTIVE
) {
746 vgic_irq_set_active(vcpu
, lr
.irq
);
747 lr
.state
&= ~LR_STATE_ACTIVE
;
751 * Reestablish the pending state on the distributor and the
752 * CPU interface. It may have already been pending, but that
753 * is fine, then we are only setting a few bits that were
756 if (lr
.state
& LR_STATE_PENDING
) {
757 vgic_dist_irq_set_pending(vcpu
, lr
.irq
);
758 lr
.state
&= ~LR_STATE_PENDING
;
761 vgic_set_lr(vcpu
, i
, lr
);
764 * Mark the LR as free for other use.
766 BUG_ON(lr
.state
& LR_STATE_MASK
);
767 vgic_retire_lr(i
, lr
.irq
, vcpu
);
768 vgic_irq_clear_queued(vcpu
, lr
.irq
);
770 /* Finally update the VGIC state. */
771 vgic_update_state(vcpu
->kvm
);
776 struct vgic_io_range
*vgic_find_range(const struct vgic_io_range
*ranges
,
777 int len
, gpa_t offset
)
779 while (ranges
->len
) {
780 if (offset
>= ranges
->base
&&
781 (offset
+ len
) <= (ranges
->base
+ ranges
->len
))
789 static bool vgic_validate_access(const struct vgic_dist
*dist
,
790 const struct vgic_io_range
*range
,
791 unsigned long offset
)
795 if (!range
->bits_per_irq
)
796 return true; /* Not an irq-based access */
798 irq
= offset
* 8 / range
->bits_per_irq
;
799 if (irq
>= dist
->nr_irqs
)
806 * Call the respective handler function for the given range.
807 * We split up any 64 bit accesses into two consecutive 32 bit
808 * handler calls and merge the result afterwards.
809 * We do this in a little endian fashion regardless of the host's
810 * or guest's endianness, because the GIC is always LE and the rest of
811 * the code (vgic_reg_access) also puts it in a LE fashion already.
812 * At this point we have already identified the handle function, so
813 * range points to that one entry and offset is relative to this.
815 static bool call_range_handler(struct kvm_vcpu
*vcpu
,
816 struct kvm_exit_mmio
*mmio
,
817 unsigned long offset
,
818 const struct vgic_io_range
*range
)
820 struct kvm_exit_mmio mmio32
;
823 if (likely(mmio
->len
<= 4))
824 return range
->handle_mmio(vcpu
, mmio
, offset
);
827 * Any access bigger than 4 bytes (that we currently handle in KVM)
828 * is actually 8 bytes long, caused by a 64-bit access
832 mmio32
.is_write
= mmio
->is_write
;
833 mmio32
.private = mmio
->private;
835 mmio32
.phys_addr
= mmio
->phys_addr
+ 4;
836 mmio32
.data
= &((u32
*)mmio
->data
)[1];
837 ret
= range
->handle_mmio(vcpu
, &mmio32
, offset
+ 4);
839 mmio32
.phys_addr
= mmio
->phys_addr
;
840 mmio32
.data
= &((u32
*)mmio
->data
)[0];
841 ret
|= range
->handle_mmio(vcpu
, &mmio32
, offset
);
847 * vgic_handle_mmio_access - handle an in-kernel MMIO access
848 * This is called by the read/write KVM IO device wrappers below.
849 * @vcpu: pointer to the vcpu performing the access
850 * @this: pointer to the KVM IO device in charge
851 * @addr: guest physical address of the access
852 * @len: size of the access
853 * @val: pointer to the data region
854 * @is_write: read or write access
856 * returns true if the MMIO access could be performed
858 static int vgic_handle_mmio_access(struct kvm_vcpu
*vcpu
,
859 struct kvm_io_device
*this, gpa_t addr
,
860 int len
, void *val
, bool is_write
)
862 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
863 struct vgic_io_device
*iodev
= container_of(this,
864 struct vgic_io_device
, dev
);
865 struct kvm_run
*run
= vcpu
->run
;
866 const struct vgic_io_range
*range
;
867 struct kvm_exit_mmio mmio
;
871 offset
= addr
- iodev
->addr
;
872 range
= vgic_find_range(iodev
->reg_ranges
, len
, offset
);
873 if (unlikely(!range
|| !range
->handle_mmio
)) {
874 pr_warn("Unhandled access %d %08llx %d\n", is_write
, addr
, len
);
878 mmio
.phys_addr
= addr
;
880 mmio
.is_write
= is_write
;
882 mmio
.private = iodev
->redist_vcpu
;
884 spin_lock(&dist
->lock
);
885 offset
-= range
->base
;
886 if (vgic_validate_access(dist
, range
, offset
)) {
887 updated_state
= call_range_handler(vcpu
, &mmio
, offset
, range
);
891 updated_state
= false;
893 spin_unlock(&dist
->lock
);
894 run
->mmio
.is_write
= is_write
;
896 run
->mmio
.phys_addr
= addr
;
897 memcpy(run
->mmio
.data
, val
, len
);
899 kvm_handle_mmio_return(vcpu
, run
);
902 vgic_kick_vcpus(vcpu
->kvm
);
907 static int vgic_handle_mmio_read(struct kvm_vcpu
*vcpu
,
908 struct kvm_io_device
*this,
909 gpa_t addr
, int len
, void *val
)
911 return vgic_handle_mmio_access(vcpu
, this, addr
, len
, val
, false);
914 static int vgic_handle_mmio_write(struct kvm_vcpu
*vcpu
,
915 struct kvm_io_device
*this,
916 gpa_t addr
, int len
, const void *val
)
918 return vgic_handle_mmio_access(vcpu
, this, addr
, len
, (void *)val
,
922 struct kvm_io_device_ops vgic_io_ops
= {
923 .read
= vgic_handle_mmio_read
,
924 .write
= vgic_handle_mmio_write
,
928 * vgic_register_kvm_io_dev - register VGIC register frame on the KVM I/O bus
929 * @kvm: The VM structure pointer
930 * @base: The (guest) base address for the register frame
931 * @len: Length of the register frame window
932 * @ranges: Describing the handler functions for each register
933 * @redist_vcpu_id: The VCPU ID to pass on to the handlers on call
934 * @iodev: Points to memory to be passed on to the handler
936 * @iodev stores the parameters of this function to be usable by the handler
937 * respectively the dispatcher function (since the KVM I/O bus framework lacks
938 * an opaque parameter). Initialization is done in this function, but the
939 * reference should be valid and unique for the whole VGIC lifetime.
940 * If the register frame is not mapped for a specific VCPU, pass -1 to
943 int vgic_register_kvm_io_dev(struct kvm
*kvm
, gpa_t base
, int len
,
944 const struct vgic_io_range
*ranges
,
946 struct vgic_io_device
*iodev
)
948 struct kvm_vcpu
*vcpu
= NULL
;
951 if (redist_vcpu_id
>= 0)
952 vcpu
= kvm_get_vcpu(kvm
, redist_vcpu_id
);
956 iodev
->reg_ranges
= ranges
;
957 iodev
->redist_vcpu
= vcpu
;
959 kvm_iodevice_init(&iodev
->dev
, &vgic_io_ops
);
961 mutex_lock(&kvm
->slots_lock
);
963 ret
= kvm_io_bus_register_dev(kvm
, KVM_MMIO_BUS
, base
, len
,
965 mutex_unlock(&kvm
->slots_lock
);
967 /* Mark the iodev as invalid if registration fails. */
969 iodev
->dev
.ops
= NULL
;
974 static int vgic_nr_shared_irqs(struct vgic_dist
*dist
)
976 return dist
->nr_irqs
- VGIC_NR_PRIVATE_IRQS
;
979 static int compute_active_for_cpu(struct kvm_vcpu
*vcpu
)
981 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
982 unsigned long *active
, *enabled
, *act_percpu
, *act_shared
;
983 unsigned long active_private
, active_shared
;
984 int nr_shared
= vgic_nr_shared_irqs(dist
);
987 vcpu_id
= vcpu
->vcpu_id
;
988 act_percpu
= vcpu
->arch
.vgic_cpu
.active_percpu
;
989 act_shared
= vcpu
->arch
.vgic_cpu
.active_shared
;
991 active
= vgic_bitmap_get_cpu_map(&dist
->irq_active
, vcpu_id
);
992 enabled
= vgic_bitmap_get_cpu_map(&dist
->irq_enabled
, vcpu_id
);
993 bitmap_and(act_percpu
, active
, enabled
, VGIC_NR_PRIVATE_IRQS
);
995 active
= vgic_bitmap_get_shared_map(&dist
->irq_active
);
996 enabled
= vgic_bitmap_get_shared_map(&dist
->irq_enabled
);
997 bitmap_and(act_shared
, active
, enabled
, nr_shared
);
998 bitmap_and(act_shared
, act_shared
,
999 vgic_bitmap_get_shared_map(&dist
->irq_spi_target
[vcpu_id
]),
1002 active_private
= find_first_bit(act_percpu
, VGIC_NR_PRIVATE_IRQS
);
1003 active_shared
= find_first_bit(act_shared
, nr_shared
);
1005 return (active_private
< VGIC_NR_PRIVATE_IRQS
||
1006 active_shared
< nr_shared
);
1009 static int compute_pending_for_cpu(struct kvm_vcpu
*vcpu
)
1011 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1012 unsigned long *pending
, *enabled
, *pend_percpu
, *pend_shared
;
1013 unsigned long pending_private
, pending_shared
;
1014 int nr_shared
= vgic_nr_shared_irqs(dist
);
1017 vcpu_id
= vcpu
->vcpu_id
;
1018 pend_percpu
= vcpu
->arch
.vgic_cpu
.pending_percpu
;
1019 pend_shared
= vcpu
->arch
.vgic_cpu
.pending_shared
;
1021 if (!dist
->enabled
) {
1022 bitmap_zero(pend_percpu
, VGIC_NR_PRIVATE_IRQS
);
1023 bitmap_zero(pend_shared
, nr_shared
);
1027 pending
= vgic_bitmap_get_cpu_map(&dist
->irq_pending
, vcpu_id
);
1028 enabled
= vgic_bitmap_get_cpu_map(&dist
->irq_enabled
, vcpu_id
);
1029 bitmap_and(pend_percpu
, pending
, enabled
, VGIC_NR_PRIVATE_IRQS
);
1031 pending
= vgic_bitmap_get_shared_map(&dist
->irq_pending
);
1032 enabled
= vgic_bitmap_get_shared_map(&dist
->irq_enabled
);
1033 bitmap_and(pend_shared
, pending
, enabled
, nr_shared
);
1034 bitmap_and(pend_shared
, pend_shared
,
1035 vgic_bitmap_get_shared_map(&dist
->irq_spi_target
[vcpu_id
]),
1038 pending_private
= find_first_bit(pend_percpu
, VGIC_NR_PRIVATE_IRQS
);
1039 pending_shared
= find_first_bit(pend_shared
, nr_shared
);
1040 return (pending_private
< VGIC_NR_PRIVATE_IRQS
||
1041 pending_shared
< vgic_nr_shared_irqs(dist
));
1045 * Update the interrupt state and determine which CPUs have pending
1046 * or active interrupts. Must be called with distributor lock held.
1048 void vgic_update_state(struct kvm
*kvm
)
1050 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1051 struct kvm_vcpu
*vcpu
;
1054 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
1055 if (compute_pending_for_cpu(vcpu
))
1056 set_bit(c
, dist
->irq_pending_on_cpu
);
1058 if (compute_active_for_cpu(vcpu
))
1059 set_bit(c
, dist
->irq_active_on_cpu
);
1061 clear_bit(c
, dist
->irq_active_on_cpu
);
1065 static struct vgic_lr
vgic_get_lr(const struct kvm_vcpu
*vcpu
, int lr
)
1067 return vgic_ops
->get_lr(vcpu
, lr
);
1070 static void vgic_set_lr(struct kvm_vcpu
*vcpu
, int lr
,
1073 vgic_ops
->set_lr(vcpu
, lr
, vlr
);
1076 static void vgic_sync_lr_elrsr(struct kvm_vcpu
*vcpu
, int lr
,
1079 vgic_ops
->sync_lr_elrsr(vcpu
, lr
, vlr
);
1082 static inline u64
vgic_get_elrsr(struct kvm_vcpu
*vcpu
)
1084 return vgic_ops
->get_elrsr(vcpu
);
1087 static inline u64
vgic_get_eisr(struct kvm_vcpu
*vcpu
)
1089 return vgic_ops
->get_eisr(vcpu
);
1092 static inline void vgic_clear_eisr(struct kvm_vcpu
*vcpu
)
1094 vgic_ops
->clear_eisr(vcpu
);
1097 static inline u32
vgic_get_interrupt_status(struct kvm_vcpu
*vcpu
)
1099 return vgic_ops
->get_interrupt_status(vcpu
);
1102 static inline void vgic_enable_underflow(struct kvm_vcpu
*vcpu
)
1104 vgic_ops
->enable_underflow(vcpu
);
1107 static inline void vgic_disable_underflow(struct kvm_vcpu
*vcpu
)
1109 vgic_ops
->disable_underflow(vcpu
);
1112 void vgic_get_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcr
)
1114 vgic_ops
->get_vmcr(vcpu
, vmcr
);
1117 void vgic_set_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcr
)
1119 vgic_ops
->set_vmcr(vcpu
, vmcr
);
1122 static inline void vgic_enable(struct kvm_vcpu
*vcpu
)
1124 vgic_ops
->enable(vcpu
);
1127 static void vgic_retire_lr(int lr_nr
, int irq
, struct kvm_vcpu
*vcpu
)
1129 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1130 struct vgic_lr vlr
= vgic_get_lr(vcpu
, lr_nr
);
1133 * We must transfer the pending state back to the distributor before
1134 * retiring the LR, otherwise we may loose edge-triggered interrupts.
1136 if (vlr
.state
& LR_STATE_PENDING
) {
1137 vgic_dist_irq_set_pending(vcpu
, irq
);
1142 vgic_set_lr(vcpu
, lr_nr
, vlr
);
1143 clear_bit(lr_nr
, vgic_cpu
->lr_used
);
1144 vgic_cpu
->vgic_irq_lr_map
[irq
] = LR_EMPTY
;
1145 vgic_sync_lr_elrsr(vcpu
, lr_nr
, vlr
);
1149 * An interrupt may have been disabled after being made pending on the
1150 * CPU interface (the classic case is a timer running while we're
1151 * rebooting the guest - the interrupt would kick as soon as the CPU
1152 * interface gets enabled, with deadly consequences).
1154 * The solution is to examine already active LRs, and check the
1155 * interrupt is still enabled. If not, just retire it.
1157 static void vgic_retire_disabled_irqs(struct kvm_vcpu
*vcpu
)
1159 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1162 for_each_set_bit(lr
, vgic_cpu
->lr_used
, vgic
->nr_lr
) {
1163 struct vgic_lr vlr
= vgic_get_lr(vcpu
, lr
);
1165 if (!vgic_irq_is_enabled(vcpu
, vlr
.irq
)) {
1166 vgic_retire_lr(lr
, vlr
.irq
, vcpu
);
1167 if (vgic_irq_is_queued(vcpu
, vlr
.irq
))
1168 vgic_irq_clear_queued(vcpu
, vlr
.irq
);
1173 static void vgic_queue_irq_to_lr(struct kvm_vcpu
*vcpu
, int irq
,
1174 int lr_nr
, struct vgic_lr vlr
)
1176 if (vgic_irq_is_active(vcpu
, irq
)) {
1177 vlr
.state
|= LR_STATE_ACTIVE
;
1178 kvm_debug("Set active, clear distributor: 0x%x\n", vlr
.state
);
1179 vgic_irq_clear_active(vcpu
, irq
);
1180 vgic_update_state(vcpu
->kvm
);
1182 WARN_ON(!vgic_dist_irq_is_pending(vcpu
, irq
));
1183 vlr
.state
|= LR_STATE_PENDING
;
1184 kvm_debug("Set pending: 0x%x\n", vlr
.state
);
1187 if (!vgic_irq_is_edge(vcpu
, irq
))
1188 vlr
.state
|= LR_EOI_INT
;
1190 if (vlr
.irq
>= VGIC_NR_SGIS
) {
1191 struct irq_phys_map
*map
;
1192 map
= vgic_irq_map_search(vcpu
, irq
);
1195 vlr
.hwirq
= map
->phys_irq
;
1197 vlr
.state
&= ~LR_EOI_INT
;
1200 * Make sure we're not going to sample this
1201 * again, as a HW-backed interrupt cannot be
1202 * in the PENDING_ACTIVE stage.
1204 vgic_irq_set_queued(vcpu
, irq
);
1208 vgic_set_lr(vcpu
, lr_nr
, vlr
);
1209 vgic_sync_lr_elrsr(vcpu
, lr_nr
, vlr
);
1213 * Queue an interrupt to a CPU virtual interface. Return true on success,
1214 * or false if it wasn't possible to queue it.
1215 * sgi_source must be zero for any non-SGI interrupts.
1217 bool vgic_queue_irq(struct kvm_vcpu
*vcpu
, u8 sgi_source_id
, int irq
)
1219 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1220 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1224 /* Sanitize the input... */
1225 BUG_ON(sgi_source_id
& ~7);
1226 BUG_ON(sgi_source_id
&& irq
>= VGIC_NR_SGIS
);
1227 BUG_ON(irq
>= dist
->nr_irqs
);
1229 kvm_debug("Queue IRQ%d\n", irq
);
1231 lr
= vgic_cpu
->vgic_irq_lr_map
[irq
];
1233 /* Do we have an active interrupt for the same CPUID? */
1234 if (lr
!= LR_EMPTY
) {
1235 vlr
= vgic_get_lr(vcpu
, lr
);
1236 if (vlr
.source
== sgi_source_id
) {
1237 kvm_debug("LR%d piggyback for IRQ%d\n", lr
, vlr
.irq
);
1238 BUG_ON(!test_bit(lr
, vgic_cpu
->lr_used
));
1239 vgic_queue_irq_to_lr(vcpu
, irq
, lr
, vlr
);
1244 /* Try to use another LR for this interrupt */
1245 lr
= find_first_zero_bit((unsigned long *)vgic_cpu
->lr_used
,
1247 if (lr
>= vgic
->nr_lr
)
1250 kvm_debug("LR%d allocated for IRQ%d %x\n", lr
, irq
, sgi_source_id
);
1251 vgic_cpu
->vgic_irq_lr_map
[irq
] = lr
;
1252 set_bit(lr
, vgic_cpu
->lr_used
);
1255 vlr
.source
= sgi_source_id
;
1257 vgic_queue_irq_to_lr(vcpu
, irq
, lr
, vlr
);
1262 static bool vgic_queue_hwirq(struct kvm_vcpu
*vcpu
, int irq
)
1264 if (!vgic_can_sample_irq(vcpu
, irq
))
1265 return true; /* level interrupt, already queued */
1267 if (vgic_queue_irq(vcpu
, 0, irq
)) {
1268 if (vgic_irq_is_edge(vcpu
, irq
)) {
1269 vgic_dist_irq_clear_pending(vcpu
, irq
);
1270 vgic_cpu_irq_clear(vcpu
, irq
);
1272 vgic_irq_set_queued(vcpu
, irq
);
1282 * Fill the list registers with pending interrupts before running the
1285 static void __kvm_vgic_flush_hwstate(struct kvm_vcpu
*vcpu
)
1287 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1288 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1289 unsigned long *pa_percpu
, *pa_shared
;
1292 int nr_shared
= vgic_nr_shared_irqs(dist
);
1294 vcpu_id
= vcpu
->vcpu_id
;
1296 pa_percpu
= vcpu
->arch
.vgic_cpu
.pend_act_percpu
;
1297 pa_shared
= vcpu
->arch
.vgic_cpu
.pend_act_shared
;
1299 bitmap_or(pa_percpu
, vgic_cpu
->pending_percpu
, vgic_cpu
->active_percpu
,
1300 VGIC_NR_PRIVATE_IRQS
);
1301 bitmap_or(pa_shared
, vgic_cpu
->pending_shared
, vgic_cpu
->active_shared
,
1304 * We may not have any pending interrupt, or the interrupts
1305 * may have been serviced from another vcpu. In all cases,
1308 if (!kvm_vgic_vcpu_pending_irq(vcpu
) && !kvm_vgic_vcpu_active_irq(vcpu
))
1312 for_each_set_bit(i
, pa_percpu
, VGIC_NR_SGIS
) {
1313 if (!queue_sgi(vcpu
, i
))
1318 for_each_set_bit_from(i
, pa_percpu
, VGIC_NR_PRIVATE_IRQS
) {
1319 if (!vgic_queue_hwirq(vcpu
, i
))
1324 for_each_set_bit(i
, pa_shared
, nr_shared
) {
1325 if (!vgic_queue_hwirq(vcpu
, i
+ VGIC_NR_PRIVATE_IRQS
))
1334 vgic_enable_underflow(vcpu
);
1336 vgic_disable_underflow(vcpu
);
1338 * We're about to run this VCPU, and we've consumed
1339 * everything the distributor had in store for
1340 * us. Claim we don't have anything pending. We'll
1341 * adjust that if needed while exiting.
1343 clear_bit(vcpu_id
, dist
->irq_pending_on_cpu
);
1347 static int process_level_irq(struct kvm_vcpu
*vcpu
, int lr
, struct vgic_lr vlr
)
1349 int level_pending
= 0;
1353 vgic_set_lr(vcpu
, lr
, vlr
);
1356 * If the IRQ was EOIed (called from vgic_process_maintenance) or it
1357 * went from active to non-active (called from vgic_sync_hwirq) it was
1358 * also ACKed and we we therefore assume we can clear the soft pending
1359 * state (should it had been set) for this interrupt.
1361 * Note: if the IRQ soft pending state was set after the IRQ was
1362 * acked, it actually shouldn't be cleared, but we have no way of
1363 * knowing that unless we start trapping ACKs when the soft-pending
1366 vgic_dist_irq_clear_soft_pend(vcpu
, vlr
.irq
);
1369 * Tell the gic to start sampling the line of this interrupt again.
1371 vgic_irq_clear_queued(vcpu
, vlr
.irq
);
1373 /* Any additional pending interrupt? */
1374 if (vgic_dist_irq_get_level(vcpu
, vlr
.irq
)) {
1375 vgic_cpu_irq_set(vcpu
, vlr
.irq
);
1378 vgic_dist_irq_clear_pending(vcpu
, vlr
.irq
);
1379 vgic_cpu_irq_clear(vcpu
, vlr
.irq
);
1383 * Despite being EOIed, the LR may not have
1384 * been marked as empty.
1386 vgic_sync_lr_elrsr(vcpu
, lr
, vlr
);
1388 return level_pending
;
1391 static bool vgic_process_maintenance(struct kvm_vcpu
*vcpu
)
1393 u32 status
= vgic_get_interrupt_status(vcpu
);
1394 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1395 struct kvm
*kvm
= vcpu
->kvm
;
1396 int level_pending
= 0;
1398 kvm_debug("STATUS = %08x\n", status
);
1400 if (status
& INT_STATUS_EOI
) {
1402 * Some level interrupts have been EOIed. Clear their
1405 u64 eisr
= vgic_get_eisr(vcpu
);
1406 unsigned long *eisr_ptr
= u64_to_bitmask(&eisr
);
1409 for_each_set_bit(lr
, eisr_ptr
, vgic
->nr_lr
) {
1410 struct vgic_lr vlr
= vgic_get_lr(vcpu
, lr
);
1412 WARN_ON(vgic_irq_is_edge(vcpu
, vlr
.irq
));
1413 WARN_ON(vlr
.state
& LR_STATE_MASK
);
1417 * kvm_notify_acked_irq calls kvm_set_irq()
1418 * to reset the IRQ level, which grabs the dist->lock
1419 * so we call this before taking the dist->lock.
1421 kvm_notify_acked_irq(kvm
, 0,
1422 vlr
.irq
- VGIC_NR_PRIVATE_IRQS
);
1424 spin_lock(&dist
->lock
);
1425 level_pending
|= process_level_irq(vcpu
, lr
, vlr
);
1426 spin_unlock(&dist
->lock
);
1430 if (status
& INT_STATUS_UNDERFLOW
)
1431 vgic_disable_underflow(vcpu
);
1434 * In the next iterations of the vcpu loop, if we sync the vgic state
1435 * after flushing it, but before entering the guest (this happens for
1436 * pending signals and vmid rollovers), then make sure we don't pick
1437 * up any old maintenance interrupts here.
1439 vgic_clear_eisr(vcpu
);
1441 return level_pending
;
1445 * Save the physical active state, and reset it to inactive.
1447 * Return 1 if HW interrupt went from active to inactive, and 0 otherwise.
1449 static int vgic_sync_hwirq(struct kvm_vcpu
*vcpu
, struct vgic_lr vlr
)
1451 struct irq_phys_map
*map
;
1454 if (!(vlr
.state
& LR_HW
))
1457 map
= vgic_irq_map_search(vcpu
, vlr
.irq
);
1460 ret
= irq_get_irqchip_state(map
->irq
,
1461 IRQCHIP_STATE_ACTIVE
,
1472 /* Sync back the VGIC state after a guest run */
1473 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu
*vcpu
)
1475 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1476 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1478 unsigned long *elrsr_ptr
;
1482 level_pending
= vgic_process_maintenance(vcpu
);
1483 elrsr
= vgic_get_elrsr(vcpu
);
1484 elrsr_ptr
= u64_to_bitmask(&elrsr
);
1486 /* Deal with HW interrupts, and clear mappings for empty LRs */
1487 for (lr
= 0; lr
< vgic
->nr_lr
; lr
++) {
1490 if (!test_bit(lr
, vgic_cpu
->lr_used
))
1493 vlr
= vgic_get_lr(vcpu
, lr
);
1494 if (vgic_sync_hwirq(vcpu
, vlr
)) {
1496 * So this is a HW interrupt that the guest
1497 * EOI-ed. Clean the LR state and allow the
1498 * interrupt to be sampled again.
1502 vgic_set_lr(vcpu
, lr
, vlr
);
1503 vgic_irq_clear_queued(vcpu
, vlr
.irq
);
1504 set_bit(lr
, elrsr_ptr
);
1507 if (!test_bit(lr
, elrsr_ptr
))
1510 clear_bit(lr
, vgic_cpu
->lr_used
);
1512 BUG_ON(vlr
.irq
>= dist
->nr_irqs
);
1513 vgic_cpu
->vgic_irq_lr_map
[vlr
.irq
] = LR_EMPTY
;
1516 /* Check if we still have something up our sleeve... */
1517 pending
= find_first_zero_bit(elrsr_ptr
, vgic
->nr_lr
);
1518 if (level_pending
|| pending
< vgic
->nr_lr
)
1519 set_bit(vcpu
->vcpu_id
, dist
->irq_pending_on_cpu
);
1522 void kvm_vgic_flush_hwstate(struct kvm_vcpu
*vcpu
)
1524 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1526 if (!irqchip_in_kernel(vcpu
->kvm
))
1529 spin_lock(&dist
->lock
);
1530 __kvm_vgic_flush_hwstate(vcpu
);
1531 spin_unlock(&dist
->lock
);
1534 void kvm_vgic_sync_hwstate(struct kvm_vcpu
*vcpu
)
1536 if (!irqchip_in_kernel(vcpu
->kvm
))
1539 __kvm_vgic_sync_hwstate(vcpu
);
1542 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu
*vcpu
)
1544 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1546 if (!irqchip_in_kernel(vcpu
->kvm
))
1549 return test_bit(vcpu
->vcpu_id
, dist
->irq_pending_on_cpu
);
1552 int kvm_vgic_vcpu_active_irq(struct kvm_vcpu
*vcpu
)
1554 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1556 if (!irqchip_in_kernel(vcpu
->kvm
))
1559 return test_bit(vcpu
->vcpu_id
, dist
->irq_active_on_cpu
);
1563 void vgic_kick_vcpus(struct kvm
*kvm
)
1565 struct kvm_vcpu
*vcpu
;
1569 * We've injected an interrupt, time to find out who deserves
1572 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
1573 if (kvm_vgic_vcpu_pending_irq(vcpu
))
1574 kvm_vcpu_kick(vcpu
);
1578 static int vgic_validate_injection(struct kvm_vcpu
*vcpu
, int irq
, int level
)
1580 int edge_triggered
= vgic_irq_is_edge(vcpu
, irq
);
1583 * Only inject an interrupt if:
1584 * - edge triggered and we have a rising edge
1585 * - level triggered and we change level
1587 if (edge_triggered
) {
1588 int state
= vgic_dist_irq_is_pending(vcpu
, irq
);
1589 return level
> state
;
1591 int state
= vgic_dist_irq_get_level(vcpu
, irq
);
1592 return level
!= state
;
1596 static int vgic_update_irq_pending(struct kvm
*kvm
, int cpuid
,
1597 struct irq_phys_map
*map
,
1598 unsigned int irq_num
, bool level
)
1600 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1601 struct kvm_vcpu
*vcpu
;
1602 int edge_triggered
, level_triggered
;
1604 bool ret
= true, can_inject
= true;
1606 if (irq_num
>= min(kvm
->arch
.vgic
.nr_irqs
, 1020))
1609 spin_lock(&dist
->lock
);
1611 vcpu
= kvm_get_vcpu(kvm
, cpuid
);
1612 edge_triggered
= vgic_irq_is_edge(vcpu
, irq_num
);
1613 level_triggered
= !edge_triggered
;
1615 if (!vgic_validate_injection(vcpu
, irq_num
, level
)) {
1620 if (irq_num
>= VGIC_NR_PRIVATE_IRQS
) {
1621 cpuid
= dist
->irq_spi_cpu
[irq_num
- VGIC_NR_PRIVATE_IRQS
];
1622 if (cpuid
== VCPU_NOT_ALLOCATED
) {
1623 /* Pretend we use CPU0, and prevent injection */
1627 vcpu
= kvm_get_vcpu(kvm
, cpuid
);
1630 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num
, level
, cpuid
);
1633 if (level_triggered
)
1634 vgic_dist_irq_set_level(vcpu
, irq_num
);
1635 vgic_dist_irq_set_pending(vcpu
, irq_num
);
1637 if (level_triggered
) {
1638 vgic_dist_irq_clear_level(vcpu
, irq_num
);
1639 if (!vgic_dist_irq_soft_pend(vcpu
, irq_num
)) {
1640 vgic_dist_irq_clear_pending(vcpu
, irq_num
);
1641 vgic_cpu_irq_clear(vcpu
, irq_num
);
1642 if (!compute_pending_for_cpu(vcpu
))
1643 clear_bit(cpuid
, dist
->irq_pending_on_cpu
);
1651 enabled
= vgic_irq_is_enabled(vcpu
, irq_num
);
1653 if (!enabled
|| !can_inject
) {
1658 if (!vgic_can_sample_irq(vcpu
, irq_num
)) {
1660 * Level interrupt in progress, will be picked up
1668 vgic_cpu_irq_set(vcpu
, irq_num
);
1669 set_bit(cpuid
, dist
->irq_pending_on_cpu
);
1673 spin_unlock(&dist
->lock
);
1676 /* kick the specified vcpu */
1677 kvm_vcpu_kick(kvm_get_vcpu(kvm
, cpuid
));
1683 static int vgic_lazy_init(struct kvm
*kvm
)
1687 if (unlikely(!vgic_initialized(kvm
))) {
1689 * We only provide the automatic initialization of the VGIC
1690 * for the legacy case of a GICv2. Any other type must
1691 * be explicitly initialized once setup with the respective
1694 if (kvm
->arch
.vgic
.vgic_model
!= KVM_DEV_TYPE_ARM_VGIC_V2
)
1697 mutex_lock(&kvm
->lock
);
1698 ret
= vgic_init(kvm
);
1699 mutex_unlock(&kvm
->lock
);
1706 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
1707 * @kvm: The VM structure pointer
1708 * @cpuid: The CPU for PPIs
1709 * @irq_num: The IRQ number that is assigned to the device. This IRQ
1710 * must not be mapped to a HW interrupt.
1711 * @level: Edge-triggered: true: to trigger the interrupt
1712 * false: to ignore the call
1713 * Level-sensitive true: raise the input signal
1714 * false: lower the input signal
1716 * The GIC is not concerned with devices being active-LOW or active-HIGH for
1717 * level-sensitive interrupts. You can think of the level parameter as 1
1718 * being HIGH and 0 being LOW and all devices being active-HIGH.
1720 int kvm_vgic_inject_irq(struct kvm
*kvm
, int cpuid
, unsigned int irq_num
,
1723 struct irq_phys_map
*map
;
1726 ret
= vgic_lazy_init(kvm
);
1730 map
= vgic_irq_map_search(kvm_get_vcpu(kvm
, cpuid
), irq_num
);
1734 return vgic_update_irq_pending(kvm
, cpuid
, NULL
, irq_num
, level
);
1738 * kvm_vgic_inject_mapped_irq - Inject a physically mapped IRQ to the vgic
1739 * @kvm: The VM structure pointer
1740 * @cpuid: The CPU for PPIs
1741 * @map: Pointer to a irq_phys_map structure describing the mapping
1742 * @level: Edge-triggered: true: to trigger the interrupt
1743 * false: to ignore the call
1744 * Level-sensitive true: raise the input signal
1745 * false: lower the input signal
1747 * The GIC is not concerned with devices being active-LOW or active-HIGH for
1748 * level-sensitive interrupts. You can think of the level parameter as 1
1749 * being HIGH and 0 being LOW and all devices being active-HIGH.
1751 int kvm_vgic_inject_mapped_irq(struct kvm
*kvm
, int cpuid
,
1752 struct irq_phys_map
*map
, bool level
)
1756 ret
= vgic_lazy_init(kvm
);
1760 return vgic_update_irq_pending(kvm
, cpuid
, map
, map
->virt_irq
, level
);
1763 static irqreturn_t
vgic_maintenance_handler(int irq
, void *data
)
1766 * We cannot rely on the vgic maintenance interrupt to be
1767 * delivered synchronously. This means we can only use it to
1768 * exit the VM, and we perform the handling of EOIed
1769 * interrupts on the exit path (see vgic_process_maintenance).
1774 static struct list_head
*vgic_get_irq_phys_map_list(struct kvm_vcpu
*vcpu
,
1777 if (virt_irq
< VGIC_NR_PRIVATE_IRQS
)
1778 return &vcpu
->arch
.vgic_cpu
.irq_phys_map_list
;
1780 return &vcpu
->kvm
->arch
.vgic
.irq_phys_map_list
;
1784 * kvm_vgic_map_phys_irq - map a virtual IRQ to a physical IRQ
1785 * @vcpu: The VCPU pointer
1786 * @virt_irq: The virtual irq number
1787 * @irq: The Linux IRQ number
1789 * Establish a mapping between a guest visible irq (@virt_irq) and a
1790 * Linux irq (@irq). On injection, @virt_irq will be associated with
1791 * the physical interrupt represented by @irq. This mapping can be
1792 * established multiple times as long as the parameters are the same.
1794 * Returns a valid pointer on success, and an error pointer otherwise
1796 struct irq_phys_map
*kvm_vgic_map_phys_irq(struct kvm_vcpu
*vcpu
,
1797 int virt_irq
, int irq
)
1799 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1800 struct list_head
*root
= vgic_get_irq_phys_map_list(vcpu
, virt_irq
);
1801 struct irq_phys_map
*map
;
1802 struct irq_phys_map_entry
*entry
;
1803 struct irq_desc
*desc
;
1804 struct irq_data
*data
;
1807 desc
= irq_to_desc(irq
);
1809 kvm_err("%s: no interrupt descriptor\n", __func__
);
1810 return ERR_PTR(-EINVAL
);
1813 data
= irq_desc_get_irq_data(desc
);
1814 while (data
->parent_data
)
1815 data
= data
->parent_data
;
1817 phys_irq
= data
->hwirq
;
1819 /* Create a new mapping */
1820 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
1822 return ERR_PTR(-ENOMEM
);
1824 spin_lock(&dist
->irq_phys_map_lock
);
1826 /* Try to match an existing mapping */
1827 map
= vgic_irq_map_search(vcpu
, virt_irq
);
1829 /* Make sure this mapping matches */
1830 if (map
->phys_irq
!= phys_irq
||
1832 map
= ERR_PTR(-EINVAL
);
1834 /* Found an existing, valid mapping */
1839 map
->virt_irq
= virt_irq
;
1840 map
->phys_irq
= phys_irq
;
1843 list_add_tail_rcu(&entry
->entry
, root
);
1846 spin_unlock(&dist
->irq_phys_map_lock
);
1847 /* If we've found a hit in the existing list, free the useless
1849 if (IS_ERR(map
) || map
!= &entry
->map
)
1854 static struct irq_phys_map
*vgic_irq_map_search(struct kvm_vcpu
*vcpu
,
1857 struct list_head
*root
= vgic_get_irq_phys_map_list(vcpu
, virt_irq
);
1858 struct irq_phys_map_entry
*entry
;
1859 struct irq_phys_map
*map
;
1863 list_for_each_entry_rcu(entry
, root
, entry
) {
1865 if (map
->virt_irq
== virt_irq
) {
1876 static void vgic_free_phys_irq_map_rcu(struct rcu_head
*rcu
)
1878 struct irq_phys_map_entry
*entry
;
1880 entry
= container_of(rcu
, struct irq_phys_map_entry
, rcu
);
1885 * kvm_vgic_get_phys_irq_active - Return the active state of a mapped IRQ
1887 * Return the logical active state of a mapped interrupt. This doesn't
1888 * necessarily reflects the current HW state.
1890 bool kvm_vgic_get_phys_irq_active(struct irq_phys_map
*map
)
1897 * kvm_vgic_set_phys_irq_active - Set the active state of a mapped IRQ
1899 * Set the logical active state of a mapped interrupt. This doesn't
1900 * immediately affects the HW state.
1902 void kvm_vgic_set_phys_irq_active(struct irq_phys_map
*map
, bool active
)
1905 map
->active
= active
;
1909 * kvm_vgic_unmap_phys_irq - Remove a virtual to physical IRQ mapping
1910 * @vcpu: The VCPU pointer
1911 * @map: The pointer to a mapping obtained through kvm_vgic_map_phys_irq
1913 * Remove an existing mapping between virtual and physical interrupts.
1915 int kvm_vgic_unmap_phys_irq(struct kvm_vcpu
*vcpu
, struct irq_phys_map
*map
)
1917 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1918 struct irq_phys_map_entry
*entry
;
1919 struct list_head
*root
;
1924 root
= vgic_get_irq_phys_map_list(vcpu
, map
->virt_irq
);
1926 spin_lock(&dist
->irq_phys_map_lock
);
1928 list_for_each_entry(entry
, root
, entry
) {
1929 if (&entry
->map
== map
) {
1930 list_del_rcu(&entry
->entry
);
1931 call_rcu(&entry
->rcu
, vgic_free_phys_irq_map_rcu
);
1936 spin_unlock(&dist
->irq_phys_map_lock
);
1941 static void vgic_destroy_irq_phys_map(struct kvm
*kvm
, struct list_head
*root
)
1943 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1944 struct irq_phys_map_entry
*entry
;
1946 spin_lock(&dist
->irq_phys_map_lock
);
1948 list_for_each_entry(entry
, root
, entry
) {
1949 list_del_rcu(&entry
->entry
);
1950 call_rcu(&entry
->rcu
, vgic_free_phys_irq_map_rcu
);
1953 spin_unlock(&dist
->irq_phys_map_lock
);
1956 void kvm_vgic_vcpu_destroy(struct kvm_vcpu
*vcpu
)
1958 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1960 kfree(vgic_cpu
->pending_shared
);
1961 kfree(vgic_cpu
->active_shared
);
1962 kfree(vgic_cpu
->pend_act_shared
);
1963 kfree(vgic_cpu
->vgic_irq_lr_map
);
1964 vgic_destroy_irq_phys_map(vcpu
->kvm
, &vgic_cpu
->irq_phys_map_list
);
1965 vgic_cpu
->pending_shared
= NULL
;
1966 vgic_cpu
->active_shared
= NULL
;
1967 vgic_cpu
->pend_act_shared
= NULL
;
1968 vgic_cpu
->vgic_irq_lr_map
= NULL
;
1971 static int vgic_vcpu_init_maps(struct kvm_vcpu
*vcpu
, int nr_irqs
)
1973 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1975 int sz
= (nr_irqs
- VGIC_NR_PRIVATE_IRQS
) / 8;
1976 vgic_cpu
->pending_shared
= kzalloc(sz
, GFP_KERNEL
);
1977 vgic_cpu
->active_shared
= kzalloc(sz
, GFP_KERNEL
);
1978 vgic_cpu
->pend_act_shared
= kzalloc(sz
, GFP_KERNEL
);
1979 vgic_cpu
->vgic_irq_lr_map
= kmalloc(nr_irqs
, GFP_KERNEL
);
1981 if (!vgic_cpu
->pending_shared
1982 || !vgic_cpu
->active_shared
1983 || !vgic_cpu
->pend_act_shared
1984 || !vgic_cpu
->vgic_irq_lr_map
) {
1985 kvm_vgic_vcpu_destroy(vcpu
);
1989 memset(vgic_cpu
->vgic_irq_lr_map
, LR_EMPTY
, nr_irqs
);
1992 * Store the number of LRs per vcpu, so we don't have to go
1993 * all the way to the distributor structure to find out. Only
1994 * assembly code should use this one.
1996 vgic_cpu
->nr_lr
= vgic
->nr_lr
;
2002 * kvm_vgic_vcpu_early_init - Earliest possible per-vcpu vgic init stage
2004 * No memory allocation should be performed here, only static init.
2006 void kvm_vgic_vcpu_early_init(struct kvm_vcpu
*vcpu
)
2008 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
2009 INIT_LIST_HEAD(&vgic_cpu
->irq_phys_map_list
);
2013 * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
2015 * The host's GIC naturally limits the maximum amount of VCPUs a guest
2018 int kvm_vgic_get_max_vcpus(void)
2020 return vgic
->max_gic_vcpus
;
2023 void kvm_vgic_destroy(struct kvm
*kvm
)
2025 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
2026 struct kvm_vcpu
*vcpu
;
2029 kvm_for_each_vcpu(i
, vcpu
, kvm
)
2030 kvm_vgic_vcpu_destroy(vcpu
);
2032 vgic_free_bitmap(&dist
->irq_enabled
);
2033 vgic_free_bitmap(&dist
->irq_level
);
2034 vgic_free_bitmap(&dist
->irq_pending
);
2035 vgic_free_bitmap(&dist
->irq_soft_pend
);
2036 vgic_free_bitmap(&dist
->irq_queued
);
2037 vgic_free_bitmap(&dist
->irq_cfg
);
2038 vgic_free_bytemap(&dist
->irq_priority
);
2039 if (dist
->irq_spi_target
) {
2040 for (i
= 0; i
< dist
->nr_cpus
; i
++)
2041 vgic_free_bitmap(&dist
->irq_spi_target
[i
]);
2043 kfree(dist
->irq_sgi_sources
);
2044 kfree(dist
->irq_spi_cpu
);
2045 kfree(dist
->irq_spi_mpidr
);
2046 kfree(dist
->irq_spi_target
);
2047 kfree(dist
->irq_pending_on_cpu
);
2048 kfree(dist
->irq_active_on_cpu
);
2049 vgic_destroy_irq_phys_map(kvm
, &dist
->irq_phys_map_list
);
2050 dist
->irq_sgi_sources
= NULL
;
2051 dist
->irq_spi_cpu
= NULL
;
2052 dist
->irq_spi_target
= NULL
;
2053 dist
->irq_pending_on_cpu
= NULL
;
2054 dist
->irq_active_on_cpu
= NULL
;
2059 * Allocate and initialize the various data structures. Must be called
2060 * with kvm->lock held!
2062 int vgic_init(struct kvm
*kvm
)
2064 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
2065 struct kvm_vcpu
*vcpu
;
2066 int nr_cpus
, nr_irqs
;
2067 int ret
, i
, vcpu_id
;
2069 if (vgic_initialized(kvm
))
2072 nr_cpus
= dist
->nr_cpus
= atomic_read(&kvm
->online_vcpus
);
2073 if (!nr_cpus
) /* No vcpus? Can't be good... */
2077 * If nobody configured the number of interrupts, use the
2081 dist
->nr_irqs
= VGIC_NR_IRQS_LEGACY
;
2083 nr_irqs
= dist
->nr_irqs
;
2085 ret
= vgic_init_bitmap(&dist
->irq_enabled
, nr_cpus
, nr_irqs
);
2086 ret
|= vgic_init_bitmap(&dist
->irq_level
, nr_cpus
, nr_irqs
);
2087 ret
|= vgic_init_bitmap(&dist
->irq_pending
, nr_cpus
, nr_irqs
);
2088 ret
|= vgic_init_bitmap(&dist
->irq_soft_pend
, nr_cpus
, nr_irqs
);
2089 ret
|= vgic_init_bitmap(&dist
->irq_queued
, nr_cpus
, nr_irqs
);
2090 ret
|= vgic_init_bitmap(&dist
->irq_active
, nr_cpus
, nr_irqs
);
2091 ret
|= vgic_init_bitmap(&dist
->irq_cfg
, nr_cpus
, nr_irqs
);
2092 ret
|= vgic_init_bytemap(&dist
->irq_priority
, nr_cpus
, nr_irqs
);
2097 dist
->irq_sgi_sources
= kzalloc(nr_cpus
* VGIC_NR_SGIS
, GFP_KERNEL
);
2098 dist
->irq_spi_cpu
= kzalloc(nr_irqs
- VGIC_NR_PRIVATE_IRQS
, GFP_KERNEL
);
2099 dist
->irq_spi_target
= kzalloc(sizeof(*dist
->irq_spi_target
) * nr_cpus
,
2101 dist
->irq_pending_on_cpu
= kzalloc(BITS_TO_LONGS(nr_cpus
) * sizeof(long),
2103 dist
->irq_active_on_cpu
= kzalloc(BITS_TO_LONGS(nr_cpus
) * sizeof(long),
2105 if (!dist
->irq_sgi_sources
||
2106 !dist
->irq_spi_cpu
||
2107 !dist
->irq_spi_target
||
2108 !dist
->irq_pending_on_cpu
||
2109 !dist
->irq_active_on_cpu
) {
2114 for (i
= 0; i
< nr_cpus
; i
++)
2115 ret
|= vgic_init_bitmap(&dist
->irq_spi_target
[i
],
2121 ret
= kvm
->arch
.vgic
.vm_ops
.init_model(kvm
);
2125 kvm_for_each_vcpu(vcpu_id
, vcpu
, kvm
) {
2126 ret
= vgic_vcpu_init_maps(vcpu
, nr_irqs
);
2128 kvm_err("VGIC: Failed to allocate vcpu memory\n");
2132 for (i
= 0; i
< dist
->nr_irqs
; i
++) {
2133 if (i
< VGIC_NR_PPIS
)
2134 vgic_bitmap_set_irq_val(&dist
->irq_enabled
,
2135 vcpu
->vcpu_id
, i
, 1);
2136 if (i
< VGIC_NR_PRIVATE_IRQS
)
2137 vgic_bitmap_set_irq_val(&dist
->irq_cfg
,
2147 kvm_vgic_destroy(kvm
);
2152 static int init_vgic_model(struct kvm
*kvm
, int type
)
2155 case KVM_DEV_TYPE_ARM_VGIC_V2
:
2156 vgic_v2_init_emulation(kvm
);
2158 #ifdef CONFIG_ARM_GIC_V3
2159 case KVM_DEV_TYPE_ARM_VGIC_V3
:
2160 vgic_v3_init_emulation(kvm
);
2167 if (atomic_read(&kvm
->online_vcpus
) > kvm
->arch
.max_vcpus
)
2174 * kvm_vgic_early_init - Earliest possible vgic initialization stage
2176 * No memory allocation should be performed here, only static init.
2178 void kvm_vgic_early_init(struct kvm
*kvm
)
2180 spin_lock_init(&kvm
->arch
.vgic
.lock
);
2181 spin_lock_init(&kvm
->arch
.vgic
.irq_phys_map_lock
);
2182 INIT_LIST_HEAD(&kvm
->arch
.vgic
.irq_phys_map_list
);
2185 int kvm_vgic_create(struct kvm
*kvm
, u32 type
)
2187 int i
, vcpu_lock_idx
= -1, ret
;
2188 struct kvm_vcpu
*vcpu
;
2190 mutex_lock(&kvm
->lock
);
2192 if (irqchip_in_kernel(kvm
)) {
2198 * This function is also called by the KVM_CREATE_IRQCHIP handler,
2199 * which had no chance yet to check the availability of the GICv2
2200 * emulation. So check this here again. KVM_CREATE_DEVICE does
2201 * the proper checks already.
2203 if (type
== KVM_DEV_TYPE_ARM_VGIC_V2
&& !vgic
->can_emulate_gicv2
) {
2209 * Any time a vcpu is run, vcpu_load is called which tries to grab the
2210 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
2211 * that no other VCPUs are run while we create the vgic.
2214 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
2215 if (!mutex_trylock(&vcpu
->mutex
))
2220 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
2221 if (vcpu
->arch
.has_run_once
)
2226 ret
= init_vgic_model(kvm
, type
);
2230 kvm
->arch
.vgic
.in_kernel
= true;
2231 kvm
->arch
.vgic
.vgic_model
= type
;
2232 kvm
->arch
.vgic
.vctrl_base
= vgic
->vctrl_base
;
2233 kvm
->arch
.vgic
.vgic_dist_base
= VGIC_ADDR_UNDEF
;
2234 kvm
->arch
.vgic
.vgic_cpu_base
= VGIC_ADDR_UNDEF
;
2235 kvm
->arch
.vgic
.vgic_redist_base
= VGIC_ADDR_UNDEF
;
2238 for (; vcpu_lock_idx
>= 0; vcpu_lock_idx
--) {
2239 vcpu
= kvm_get_vcpu(kvm
, vcpu_lock_idx
);
2240 mutex_unlock(&vcpu
->mutex
);
2244 mutex_unlock(&kvm
->lock
);
2248 static int vgic_ioaddr_overlap(struct kvm
*kvm
)
2250 phys_addr_t dist
= kvm
->arch
.vgic
.vgic_dist_base
;
2251 phys_addr_t cpu
= kvm
->arch
.vgic
.vgic_cpu_base
;
2253 if (IS_VGIC_ADDR_UNDEF(dist
) || IS_VGIC_ADDR_UNDEF(cpu
))
2255 if ((dist
<= cpu
&& dist
+ KVM_VGIC_V2_DIST_SIZE
> cpu
) ||
2256 (cpu
<= dist
&& cpu
+ KVM_VGIC_V2_CPU_SIZE
> dist
))
2261 static int vgic_ioaddr_assign(struct kvm
*kvm
, phys_addr_t
*ioaddr
,
2262 phys_addr_t addr
, phys_addr_t size
)
2266 if (addr
& ~KVM_PHYS_MASK
)
2269 if (addr
& (SZ_4K
- 1))
2272 if (!IS_VGIC_ADDR_UNDEF(*ioaddr
))
2274 if (addr
+ size
< addr
)
2278 ret
= vgic_ioaddr_overlap(kvm
);
2280 *ioaddr
= VGIC_ADDR_UNDEF
;
2286 * kvm_vgic_addr - set or get vgic VM base addresses
2287 * @kvm: pointer to the vm struct
2288 * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
2289 * @addr: pointer to address value
2290 * @write: if true set the address in the VM address space, if false read the
2293 * Set or get the vgic base addresses for the distributor and the virtual CPU
2294 * interface in the VM physical address space. These addresses are properties
2295 * of the emulated core/SoC and therefore user space initially knows this
2298 int kvm_vgic_addr(struct kvm
*kvm
, unsigned long type
, u64
*addr
, bool write
)
2301 struct vgic_dist
*vgic
= &kvm
->arch
.vgic
;
2303 phys_addr_t
*addr_ptr
, block_size
;
2304 phys_addr_t alignment
;
2306 mutex_lock(&kvm
->lock
);
2308 case KVM_VGIC_V2_ADDR_TYPE_DIST
:
2309 type_needed
= KVM_DEV_TYPE_ARM_VGIC_V2
;
2310 addr_ptr
= &vgic
->vgic_dist_base
;
2311 block_size
= KVM_VGIC_V2_DIST_SIZE
;
2314 case KVM_VGIC_V2_ADDR_TYPE_CPU
:
2315 type_needed
= KVM_DEV_TYPE_ARM_VGIC_V2
;
2316 addr_ptr
= &vgic
->vgic_cpu_base
;
2317 block_size
= KVM_VGIC_V2_CPU_SIZE
;
2320 #ifdef CONFIG_ARM_GIC_V3
2321 case KVM_VGIC_V3_ADDR_TYPE_DIST
:
2322 type_needed
= KVM_DEV_TYPE_ARM_VGIC_V3
;
2323 addr_ptr
= &vgic
->vgic_dist_base
;
2324 block_size
= KVM_VGIC_V3_DIST_SIZE
;
2327 case KVM_VGIC_V3_ADDR_TYPE_REDIST
:
2328 type_needed
= KVM_DEV_TYPE_ARM_VGIC_V3
;
2329 addr_ptr
= &vgic
->vgic_redist_base
;
2330 block_size
= KVM_VGIC_V3_REDIST_SIZE
;
2339 if (vgic
->vgic_model
!= type_needed
) {
2345 if (!IS_ALIGNED(*addr
, alignment
))
2348 r
= vgic_ioaddr_assign(kvm
, addr_ptr
, *addr
,
2355 mutex_unlock(&kvm
->lock
);
2359 int vgic_set_common_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
2363 switch (attr
->group
) {
2364 case KVM_DEV_ARM_VGIC_GRP_ADDR
: {
2365 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2367 unsigned long type
= (unsigned long)attr
->attr
;
2369 if (copy_from_user(&addr
, uaddr
, sizeof(addr
)))
2372 r
= kvm_vgic_addr(dev
->kvm
, type
, &addr
, true);
2373 return (r
== -ENODEV
) ? -ENXIO
: r
;
2375 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS
: {
2376 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
2380 if (get_user(val
, uaddr
))
2385 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
2386 * - at most 1024 interrupts
2387 * - a multiple of 32 interrupts
2389 if (val
< (VGIC_NR_PRIVATE_IRQS
+ 32) ||
2390 val
> VGIC_MAX_IRQS
||
2394 mutex_lock(&dev
->kvm
->lock
);
2396 if (vgic_ready(dev
->kvm
) || dev
->kvm
->arch
.vgic
.nr_irqs
)
2399 dev
->kvm
->arch
.vgic
.nr_irqs
= val
;
2401 mutex_unlock(&dev
->kvm
->lock
);
2405 case KVM_DEV_ARM_VGIC_GRP_CTRL
: {
2406 switch (attr
->attr
) {
2407 case KVM_DEV_ARM_VGIC_CTRL_INIT
:
2408 r
= vgic_init(dev
->kvm
);
2418 int vgic_get_common_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
2422 switch (attr
->group
) {
2423 case KVM_DEV_ARM_VGIC_GRP_ADDR
: {
2424 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2426 unsigned long type
= (unsigned long)attr
->attr
;
2428 r
= kvm_vgic_addr(dev
->kvm
, type
, &addr
, false);
2430 return (r
== -ENODEV
) ? -ENXIO
: r
;
2432 if (copy_to_user(uaddr
, &addr
, sizeof(addr
)))
2436 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS
: {
2437 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
2439 r
= put_user(dev
->kvm
->arch
.vgic
.nr_irqs
, uaddr
);
2448 int vgic_has_attr_regs(const struct vgic_io_range
*ranges
, phys_addr_t offset
)
2450 if (vgic_find_range(ranges
, 4, offset
))
2456 static void vgic_init_maintenance_interrupt(void *info
)
2458 enable_percpu_irq(vgic
->maint_irq
, 0);
2461 static int vgic_cpu_notify(struct notifier_block
*self
,
2462 unsigned long action
, void *cpu
)
2466 case CPU_STARTING_FROZEN
:
2467 vgic_init_maintenance_interrupt(NULL
);
2470 case CPU_DYING_FROZEN
:
2471 disable_percpu_irq(vgic
->maint_irq
);
2478 static struct notifier_block vgic_cpu_nb
= {
2479 .notifier_call
= vgic_cpu_notify
,
2482 static const struct of_device_id vgic_ids
[] = {
2483 { .compatible
= "arm,cortex-a15-gic", .data
= vgic_v2_probe
, },
2484 { .compatible
= "arm,cortex-a7-gic", .data
= vgic_v2_probe
, },
2485 { .compatible
= "arm,gic-400", .data
= vgic_v2_probe
, },
2486 { .compatible
= "arm,gic-v3", .data
= vgic_v3_probe
, },
2490 int kvm_vgic_hyp_init(void)
2492 const struct of_device_id
*matched_id
;
2493 const int (*vgic_probe
)(struct device_node
*,const struct vgic_ops
**,
2494 const struct vgic_params
**);
2495 struct device_node
*vgic_node
;
2498 vgic_node
= of_find_matching_node_and_match(NULL
,
2499 vgic_ids
, &matched_id
);
2501 kvm_err("error: no compatible GIC node found\n");
2505 vgic_probe
= matched_id
->data
;
2506 ret
= vgic_probe(vgic_node
, &vgic_ops
, &vgic
);
2510 ret
= request_percpu_irq(vgic
->maint_irq
, vgic_maintenance_handler
,
2511 "vgic", kvm_get_running_vcpus());
2513 kvm_err("Cannot register interrupt %d\n", vgic
->maint_irq
);
2517 ret
= __register_cpu_notifier(&vgic_cpu_nb
);
2519 kvm_err("Cannot register vgic CPU notifier\n");
2523 on_each_cpu(vgic_init_maintenance_interrupt
, NULL
, 1);
2528 free_percpu_irq(vgic
->maint_irq
, kvm_get_running_vcpus());
2532 int kvm_irq_map_gsi(struct kvm
*kvm
,
2533 struct kvm_kernel_irq_routing_entry
*entries
,
2539 int kvm_irq_map_chip_pin(struct kvm
*kvm
, unsigned irqchip
, unsigned pin
)
2544 int kvm_set_irq(struct kvm
*kvm
, int irq_source_id
,
2545 u32 irq
, int level
, bool line_status
)
2547 unsigned int spi
= irq
+ VGIC_NR_PRIVATE_IRQS
;
2549 trace_kvm_set_irq(irq
, level
, irq_source_id
);
2551 BUG_ON(!vgic_initialized(kvm
));
2553 return kvm_vgic_inject_irq(kvm
, 0, spi
, level
);
2556 /* MSI not implemented yet */
2557 int kvm_set_msi(struct kvm_kernel_irq_routing_entry
*e
,
2558 struct kvm
*kvm
, int irq_source_id
,
2559 int level
, bool line_status
)