2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/cpu.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/interrupt.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/uaccess.h>
29 #include <linux/irqchip/arm-gic.h>
31 #include <asm/kvm_emulate.h>
32 #include <asm/kvm_arm.h>
33 #include <asm/kvm_mmu.h>
34 #include <trace/events/kvm.h>
36 #include <kvm/iodev.h>
39 * How the whole thing works (courtesy of Christoffer Dall):
41 * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
42 * something is pending on the CPU interface.
43 * - Interrupts that are pending on the distributor are stored on the
44 * vgic.irq_pending vgic bitmap (this bitmap is updated by both user land
45 * ioctls and guest mmio ops, and other in-kernel peripherals such as the
47 * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
49 * - To calculate the oracle, we need info for each cpu from
50 * compute_pending_for_cpu, which considers:
51 * - PPI: dist->irq_pending & dist->irq_enable
52 * - SPI: dist->irq_pending & dist->irq_enable & dist->irq_spi_target
53 * - irq_spi_target is a 'formatted' version of the GICD_ITARGETSRn
54 * registers, stored on each vcpu. We only keep one bit of
55 * information per interrupt, making sure that only one vcpu can
56 * accept the interrupt.
57 * - If any of the above state changes, we must recalculate the oracle.
58 * - The same is true when injecting an interrupt, except that we only
59 * consider a single interrupt at a time. The irq_spi_cpu array
60 * contains the target CPU for each SPI.
62 * The handling of level interrupts adds some extra complexity. We
63 * need to track when the interrupt has been EOIed, so we can sample
64 * the 'line' again. This is achieved as such:
66 * - When a level interrupt is moved onto a vcpu, the corresponding
67 * bit in irq_queued is set. As long as this bit is set, the line
68 * will be ignored for further interrupts. The interrupt is injected
69 * into the vcpu with the GICH_LR_EOI bit set (generate a
70 * maintenance interrupt on EOI).
71 * - When the interrupt is EOIed, the maintenance interrupt fires,
72 * and clears the corresponding bit in irq_queued. This allows the
73 * interrupt line to be sampled again.
74 * - Note that level-triggered interrupts can also be set to pending from
75 * writes to GICD_ISPENDRn and lowering the external input line does not
76 * cause the interrupt to become inactive in such a situation.
77 * Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become
78 * inactive as long as the external input line is held high.
83 static void vgic_retire_disabled_irqs(struct kvm_vcpu
*vcpu
);
84 static void vgic_retire_lr(int lr_nr
, int irq
, struct kvm_vcpu
*vcpu
);
85 static struct vgic_lr
vgic_get_lr(const struct kvm_vcpu
*vcpu
, int lr
);
86 static void vgic_set_lr(struct kvm_vcpu
*vcpu
, int lr
, struct vgic_lr lr_desc
);
88 static const struct vgic_ops
*vgic_ops
;
89 static const struct vgic_params
*vgic
;
91 static void add_sgi_source(struct kvm_vcpu
*vcpu
, int irq
, int source
)
93 vcpu
->kvm
->arch
.vgic
.vm_ops
.add_sgi_source(vcpu
, irq
, source
);
96 static bool queue_sgi(struct kvm_vcpu
*vcpu
, int irq
)
98 return vcpu
->kvm
->arch
.vgic
.vm_ops
.queue_sgi(vcpu
, irq
);
101 int kvm_vgic_map_resources(struct kvm
*kvm
)
103 return kvm
->arch
.vgic
.vm_ops
.map_resources(kvm
, vgic
);
107 * struct vgic_bitmap contains a bitmap made of unsigned longs, but
108 * extracts u32s out of them.
110 * This does not work on 64-bit BE systems, because the bitmap access
111 * will store two consecutive 32-bit words with the higher-addressed
112 * register's bits at the lower index and the lower-addressed register's
113 * bits at the higher index.
115 * Therefore, swizzle the register index when accessing the 32-bit word
116 * registers to access the right register's value.
118 #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64
119 #define REG_OFFSET_SWIZZLE 1
121 #define REG_OFFSET_SWIZZLE 0
124 static int vgic_init_bitmap(struct vgic_bitmap
*b
, int nr_cpus
, int nr_irqs
)
128 nr_longs
= nr_cpus
+ BITS_TO_LONGS(nr_irqs
- VGIC_NR_PRIVATE_IRQS
);
130 b
->private = kzalloc(sizeof(unsigned long) * nr_longs
, GFP_KERNEL
);
134 b
->shared
= b
->private + nr_cpus
;
139 static void vgic_free_bitmap(struct vgic_bitmap
*b
)
147 * Call this function to convert a u64 value to an unsigned long * bitmask
148 * in a way that works on both 32-bit and 64-bit LE and BE platforms.
150 * Warning: Calling this function may modify *val.
152 static unsigned long *u64_to_bitmask(u64
*val
)
154 #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
155 *val
= (*val
>> 32) | (*val
<< 32);
157 return (unsigned long *)val
;
160 u32
*vgic_bitmap_get_reg(struct vgic_bitmap
*x
, int cpuid
, u32 offset
)
164 return (u32
*)(x
->private + cpuid
) + REG_OFFSET_SWIZZLE
;
166 return (u32
*)(x
->shared
) + ((offset
- 1) ^ REG_OFFSET_SWIZZLE
);
169 static int vgic_bitmap_get_irq_val(struct vgic_bitmap
*x
,
172 if (irq
< VGIC_NR_PRIVATE_IRQS
)
173 return test_bit(irq
, x
->private + cpuid
);
175 return test_bit(irq
- VGIC_NR_PRIVATE_IRQS
, x
->shared
);
178 void vgic_bitmap_set_irq_val(struct vgic_bitmap
*x
, int cpuid
,
183 if (irq
< VGIC_NR_PRIVATE_IRQS
) {
184 reg
= x
->private + cpuid
;
187 irq
-= VGIC_NR_PRIVATE_IRQS
;
196 static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap
*x
, int cpuid
)
198 return x
->private + cpuid
;
201 unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap
*x
)
206 static int vgic_init_bytemap(struct vgic_bytemap
*x
, int nr_cpus
, int nr_irqs
)
210 size
= nr_cpus
* VGIC_NR_PRIVATE_IRQS
;
211 size
+= nr_irqs
- VGIC_NR_PRIVATE_IRQS
;
213 x
->private = kzalloc(size
, GFP_KERNEL
);
217 x
->shared
= x
->private + nr_cpus
* VGIC_NR_PRIVATE_IRQS
/ sizeof(u32
);
221 static void vgic_free_bytemap(struct vgic_bytemap
*b
)
228 u32
*vgic_bytemap_get_reg(struct vgic_bytemap
*x
, int cpuid
, u32 offset
)
232 if (offset
< VGIC_NR_PRIVATE_IRQS
) {
234 offset
+= cpuid
* VGIC_NR_PRIVATE_IRQS
;
237 offset
-= VGIC_NR_PRIVATE_IRQS
;
240 return reg
+ (offset
/ sizeof(u32
));
243 #define VGIC_CFG_LEVEL 0
244 #define VGIC_CFG_EDGE 1
246 static bool vgic_irq_is_edge(struct kvm_vcpu
*vcpu
, int irq
)
248 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
251 irq_val
= vgic_bitmap_get_irq_val(&dist
->irq_cfg
, vcpu
->vcpu_id
, irq
);
252 return irq_val
== VGIC_CFG_EDGE
;
255 static int vgic_irq_is_enabled(struct kvm_vcpu
*vcpu
, int irq
)
257 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
259 return vgic_bitmap_get_irq_val(&dist
->irq_enabled
, vcpu
->vcpu_id
, irq
);
262 static int vgic_irq_is_queued(struct kvm_vcpu
*vcpu
, int irq
)
264 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
266 return vgic_bitmap_get_irq_val(&dist
->irq_queued
, vcpu
->vcpu_id
, irq
);
269 static int vgic_irq_is_active(struct kvm_vcpu
*vcpu
, int irq
)
271 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
273 return vgic_bitmap_get_irq_val(&dist
->irq_active
, vcpu
->vcpu_id
, irq
);
276 static void vgic_irq_set_queued(struct kvm_vcpu
*vcpu
, int irq
)
278 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
280 vgic_bitmap_set_irq_val(&dist
->irq_queued
, vcpu
->vcpu_id
, irq
, 1);
283 static void vgic_irq_clear_queued(struct kvm_vcpu
*vcpu
, int irq
)
285 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
287 vgic_bitmap_set_irq_val(&dist
->irq_queued
, vcpu
->vcpu_id
, irq
, 0);
290 static void vgic_irq_set_active(struct kvm_vcpu
*vcpu
, int irq
)
292 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
294 vgic_bitmap_set_irq_val(&dist
->irq_active
, vcpu
->vcpu_id
, irq
, 1);
297 static void vgic_irq_clear_active(struct kvm_vcpu
*vcpu
, int irq
)
299 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
301 vgic_bitmap_set_irq_val(&dist
->irq_active
, vcpu
->vcpu_id
, irq
, 0);
304 static int vgic_dist_irq_get_level(struct kvm_vcpu
*vcpu
, int irq
)
306 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
308 return vgic_bitmap_get_irq_val(&dist
->irq_level
, vcpu
->vcpu_id
, irq
);
311 static void vgic_dist_irq_set_level(struct kvm_vcpu
*vcpu
, int irq
)
313 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
315 vgic_bitmap_set_irq_val(&dist
->irq_level
, vcpu
->vcpu_id
, irq
, 1);
318 static void vgic_dist_irq_clear_level(struct kvm_vcpu
*vcpu
, int irq
)
320 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
322 vgic_bitmap_set_irq_val(&dist
->irq_level
, vcpu
->vcpu_id
, irq
, 0);
325 static int vgic_dist_irq_soft_pend(struct kvm_vcpu
*vcpu
, int irq
)
327 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
329 return vgic_bitmap_get_irq_val(&dist
->irq_soft_pend
, vcpu
->vcpu_id
, irq
);
332 static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu
*vcpu
, int irq
)
334 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
336 vgic_bitmap_set_irq_val(&dist
->irq_soft_pend
, vcpu
->vcpu_id
, irq
, 0);
339 static int vgic_dist_irq_is_pending(struct kvm_vcpu
*vcpu
, int irq
)
341 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
343 return vgic_bitmap_get_irq_val(&dist
->irq_pending
, vcpu
->vcpu_id
, irq
);
346 void vgic_dist_irq_set_pending(struct kvm_vcpu
*vcpu
, int irq
)
348 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
350 vgic_bitmap_set_irq_val(&dist
->irq_pending
, vcpu
->vcpu_id
, irq
, 1);
353 void vgic_dist_irq_clear_pending(struct kvm_vcpu
*vcpu
, int irq
)
355 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
357 vgic_bitmap_set_irq_val(&dist
->irq_pending
, vcpu
->vcpu_id
, irq
, 0);
360 static void vgic_cpu_irq_set(struct kvm_vcpu
*vcpu
, int irq
)
362 if (irq
< VGIC_NR_PRIVATE_IRQS
)
363 set_bit(irq
, vcpu
->arch
.vgic_cpu
.pending_percpu
);
365 set_bit(irq
- VGIC_NR_PRIVATE_IRQS
,
366 vcpu
->arch
.vgic_cpu
.pending_shared
);
369 void vgic_cpu_irq_clear(struct kvm_vcpu
*vcpu
, int irq
)
371 if (irq
< VGIC_NR_PRIVATE_IRQS
)
372 clear_bit(irq
, vcpu
->arch
.vgic_cpu
.pending_percpu
);
374 clear_bit(irq
- VGIC_NR_PRIVATE_IRQS
,
375 vcpu
->arch
.vgic_cpu
.pending_shared
);
378 static bool vgic_can_sample_irq(struct kvm_vcpu
*vcpu
, int irq
)
380 return vgic_irq_is_edge(vcpu
, irq
) || !vgic_irq_is_queued(vcpu
, irq
);
384 * vgic_reg_access - access vgic register
385 * @mmio: pointer to the data describing the mmio access
386 * @reg: pointer to the virtual backing of vgic distributor data
387 * @offset: least significant 2 bits used for word offset
388 * @mode: ACCESS_ mode (see defines above)
390 * Helper to make vgic register access easier using one of the access
391 * modes defined for vgic register access
392 * (read,raz,write-ignored,setbit,clearbit,write)
394 void vgic_reg_access(struct kvm_exit_mmio
*mmio
, u32
*reg
,
395 phys_addr_t offset
, int mode
)
397 int word_offset
= (offset
& 3) * 8;
398 u32 mask
= (1UL << (mmio
->len
* 8)) - 1;
402 * Any alignment fault should have been delivered to the guest
403 * directly (ARM ARM B3.12.7 "Prioritization of aborts").
409 BUG_ON(mode
!= (ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
));
413 if (mmio
->is_write
) {
414 u32 data
= mmio_data_read(mmio
, mask
) << word_offset
;
415 switch (ACCESS_WRITE_MASK(mode
)) {
416 case ACCESS_WRITE_IGNORED
:
419 case ACCESS_WRITE_SETBIT
:
423 case ACCESS_WRITE_CLEARBIT
:
427 case ACCESS_WRITE_VALUE
:
428 regval
= (regval
& ~(mask
<< word_offset
)) | data
;
433 switch (ACCESS_READ_MASK(mode
)) {
434 case ACCESS_READ_RAZ
:
438 case ACCESS_READ_VALUE
:
439 mmio_data_write(mmio
, mask
, regval
>> word_offset
);
444 bool handle_mmio_raz_wi(struct kvm_vcpu
*vcpu
, struct kvm_exit_mmio
*mmio
,
447 vgic_reg_access(mmio
, NULL
, offset
,
448 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
452 bool vgic_handle_enable_reg(struct kvm
*kvm
, struct kvm_exit_mmio
*mmio
,
453 phys_addr_t offset
, int vcpu_id
, int access
)
456 int mode
= ACCESS_READ_VALUE
| access
;
457 struct kvm_vcpu
*target_vcpu
= kvm_get_vcpu(kvm
, vcpu_id
);
459 reg
= vgic_bitmap_get_reg(&kvm
->arch
.vgic
.irq_enabled
, vcpu_id
, offset
);
460 vgic_reg_access(mmio
, reg
, offset
, mode
);
461 if (mmio
->is_write
) {
462 if (access
& ACCESS_WRITE_CLEARBIT
) {
463 if (offset
< 4) /* Force SGI enabled */
465 vgic_retire_disabled_irqs(target_vcpu
);
467 vgic_update_state(kvm
);
474 bool vgic_handle_set_pending_reg(struct kvm
*kvm
,
475 struct kvm_exit_mmio
*mmio
,
476 phys_addr_t offset
, int vcpu_id
)
480 int mode
= ACCESS_READ_VALUE
| ACCESS_WRITE_SETBIT
;
481 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
483 reg
= vgic_bitmap_get_reg(&dist
->irq_cfg
, vcpu_id
, offset
);
484 level_mask
= (~(*reg
));
486 /* Mark both level and edge triggered irqs as pending */
487 reg
= vgic_bitmap_get_reg(&dist
->irq_pending
, vcpu_id
, offset
);
489 vgic_reg_access(mmio
, reg
, offset
, mode
);
491 if (mmio
->is_write
) {
492 /* Set the soft-pending flag only for level-triggered irqs */
493 reg
= vgic_bitmap_get_reg(&dist
->irq_soft_pend
,
495 vgic_reg_access(mmio
, reg
, offset
, mode
);
498 /* Ignore writes to SGIs */
501 *reg
|= orig
& 0xffff;
504 vgic_update_state(kvm
);
511 bool vgic_handle_clear_pending_reg(struct kvm
*kvm
,
512 struct kvm_exit_mmio
*mmio
,
513 phys_addr_t offset
, int vcpu_id
)
517 int mode
= ACCESS_READ_VALUE
| ACCESS_WRITE_CLEARBIT
;
518 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
520 reg
= vgic_bitmap_get_reg(&dist
->irq_pending
, vcpu_id
, offset
);
522 vgic_reg_access(mmio
, reg
, offset
, mode
);
523 if (mmio
->is_write
) {
524 /* Re-set level triggered level-active interrupts */
525 level_active
= vgic_bitmap_get_reg(&dist
->irq_level
,
527 reg
= vgic_bitmap_get_reg(&dist
->irq_pending
, vcpu_id
, offset
);
528 *reg
|= *level_active
;
530 /* Ignore writes to SGIs */
533 *reg
|= orig
& 0xffff;
536 /* Clear soft-pending flags */
537 reg
= vgic_bitmap_get_reg(&dist
->irq_soft_pend
,
539 vgic_reg_access(mmio
, reg
, offset
, mode
);
541 vgic_update_state(kvm
);
547 bool vgic_handle_set_active_reg(struct kvm
*kvm
,
548 struct kvm_exit_mmio
*mmio
,
549 phys_addr_t offset
, int vcpu_id
)
552 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
554 reg
= vgic_bitmap_get_reg(&dist
->irq_active
, vcpu_id
, offset
);
555 vgic_reg_access(mmio
, reg
, offset
,
556 ACCESS_READ_VALUE
| ACCESS_WRITE_SETBIT
);
558 if (mmio
->is_write
) {
559 vgic_update_state(kvm
);
566 bool vgic_handle_clear_active_reg(struct kvm
*kvm
,
567 struct kvm_exit_mmio
*mmio
,
568 phys_addr_t offset
, int vcpu_id
)
571 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
573 reg
= vgic_bitmap_get_reg(&dist
->irq_active
, vcpu_id
, offset
);
574 vgic_reg_access(mmio
, reg
, offset
,
575 ACCESS_READ_VALUE
| ACCESS_WRITE_CLEARBIT
);
577 if (mmio
->is_write
) {
578 vgic_update_state(kvm
);
585 static u32
vgic_cfg_expand(u16 val
)
591 * Turn a 16bit value like abcd...mnop into a 32bit word
592 * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
594 for (i
= 0; i
< 16; i
++)
595 res
|= ((val
>> i
) & VGIC_CFG_EDGE
) << (2 * i
+ 1);
600 static u16
vgic_cfg_compress(u32 val
)
606 * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
607 * abcd...mnop which is what we really care about.
609 for (i
= 0; i
< 16; i
++)
610 res
|= ((val
>> (i
* 2 + 1)) & VGIC_CFG_EDGE
) << i
;
616 * The distributor uses 2 bits per IRQ for the CFG register, but the
617 * LSB is always 0. As such, we only keep the upper bit, and use the
618 * two above functions to compress/expand the bits
620 bool vgic_handle_cfg_reg(u32
*reg
, struct kvm_exit_mmio
*mmio
,
630 val
= vgic_cfg_expand(val
);
631 vgic_reg_access(mmio
, &val
, offset
,
632 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
633 if (mmio
->is_write
) {
635 *reg
= ~0U; /* Force PPIs/SGIs to 1 */
639 val
= vgic_cfg_compress(val
);
644 *reg
&= 0xffff << 16;
653 * vgic_unqueue_irqs - move pending/active IRQs from LRs to the distributor
654 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
656 * Move any IRQs that have already been assigned to LRs back to the
657 * emulated distributor state so that the complete emulated state can be read
658 * from the main emulation structures without investigating the LRs.
660 void vgic_unqueue_irqs(struct kvm_vcpu
*vcpu
)
662 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
665 for_each_set_bit(i
, vgic_cpu
->lr_used
, vgic_cpu
->nr_lr
) {
666 struct vgic_lr lr
= vgic_get_lr(vcpu
, i
);
669 * There are three options for the state bits:
673 * 11: pending and active
675 BUG_ON(!(lr
.state
& LR_STATE_MASK
));
677 /* Reestablish SGI source for pending and active IRQs */
678 if (lr
.irq
< VGIC_NR_SGIS
)
679 add_sgi_source(vcpu
, lr
.irq
, lr
.source
);
682 * If the LR holds an active (10) or a pending and active (11)
683 * interrupt then move the active state to the
684 * distributor tracking bit.
686 if (lr
.state
& LR_STATE_ACTIVE
) {
687 vgic_irq_set_active(vcpu
, lr
.irq
);
688 lr
.state
&= ~LR_STATE_ACTIVE
;
692 * Reestablish the pending state on the distributor and the
693 * CPU interface. It may have already been pending, but that
694 * is fine, then we are only setting a few bits that were
697 if (lr
.state
& LR_STATE_PENDING
) {
698 vgic_dist_irq_set_pending(vcpu
, lr
.irq
);
699 lr
.state
&= ~LR_STATE_PENDING
;
702 vgic_set_lr(vcpu
, i
, lr
);
705 * Mark the LR as free for other use.
707 BUG_ON(lr
.state
& LR_STATE_MASK
);
708 vgic_retire_lr(i
, lr
.irq
, vcpu
);
709 vgic_irq_clear_queued(vcpu
, lr
.irq
);
711 /* Finally update the VGIC state. */
712 vgic_update_state(vcpu
->kvm
);
717 struct vgic_io_range
*vgic_find_range(const struct vgic_io_range
*ranges
,
718 int len
, gpa_t offset
)
720 while (ranges
->len
) {
721 if (offset
>= ranges
->base
&&
722 (offset
+ len
) <= (ranges
->base
+ ranges
->len
))
730 static bool vgic_validate_access(const struct vgic_dist
*dist
,
731 const struct vgic_io_range
*range
,
732 unsigned long offset
)
736 if (!range
->bits_per_irq
)
737 return true; /* Not an irq-based access */
739 irq
= offset
* 8 / range
->bits_per_irq
;
740 if (irq
>= dist
->nr_irqs
)
747 * Call the respective handler function for the given range.
748 * We split up any 64 bit accesses into two consecutive 32 bit
749 * handler calls and merge the result afterwards.
750 * We do this in a little endian fashion regardless of the host's
751 * or guest's endianness, because the GIC is always LE and the rest of
752 * the code (vgic_reg_access) also puts it in a LE fashion already.
753 * At this point we have already identified the handle function, so
754 * range points to that one entry and offset is relative to this.
756 static bool call_range_handler(struct kvm_vcpu
*vcpu
,
757 struct kvm_exit_mmio
*mmio
,
758 unsigned long offset
,
759 const struct vgic_io_range
*range
)
761 struct kvm_exit_mmio mmio32
;
764 if (likely(mmio
->len
<= 4))
765 return range
->handle_mmio(vcpu
, mmio
, offset
);
768 * Any access bigger than 4 bytes (that we currently handle in KVM)
769 * is actually 8 bytes long, caused by a 64-bit access
773 mmio32
.is_write
= mmio
->is_write
;
774 mmio32
.private = mmio
->private;
776 mmio32
.phys_addr
= mmio
->phys_addr
+ 4;
777 mmio32
.data
= &((u32
*)mmio
->data
)[1];
778 ret
= range
->handle_mmio(vcpu
, &mmio32
, offset
+ 4);
780 mmio32
.phys_addr
= mmio
->phys_addr
;
781 mmio32
.data
= &((u32
*)mmio
->data
)[0];
782 ret
|= range
->handle_mmio(vcpu
, &mmio32
, offset
);
788 * vgic_handle_mmio_access - handle an in-kernel MMIO access
789 * This is called by the read/write KVM IO device wrappers below.
790 * @vcpu: pointer to the vcpu performing the access
791 * @this: pointer to the KVM IO device in charge
792 * @addr: guest physical address of the access
793 * @len: size of the access
794 * @val: pointer to the data region
795 * @is_write: read or write access
797 * returns true if the MMIO access could be performed
799 static int vgic_handle_mmio_access(struct kvm_vcpu
*vcpu
,
800 struct kvm_io_device
*this, gpa_t addr
,
801 int len
, void *val
, bool is_write
)
803 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
804 struct vgic_io_device
*iodev
= container_of(this,
805 struct vgic_io_device
, dev
);
806 struct kvm_run
*run
= vcpu
->run
;
807 const struct vgic_io_range
*range
;
808 struct kvm_exit_mmio mmio
;
812 offset
= addr
- iodev
->addr
;
813 range
= vgic_find_range(iodev
->reg_ranges
, len
, offset
);
814 if (unlikely(!range
|| !range
->handle_mmio
)) {
815 pr_warn("Unhandled access %d %08llx %d\n", is_write
, addr
, len
);
819 mmio
.phys_addr
= addr
;
821 mmio
.is_write
= is_write
;
823 mmio
.private = iodev
->redist_vcpu
;
825 spin_lock(&dist
->lock
);
826 offset
-= range
->base
;
827 if (vgic_validate_access(dist
, range
, offset
)) {
828 updated_state
= call_range_handler(vcpu
, &mmio
, offset
, range
);
832 updated_state
= false;
834 spin_unlock(&dist
->lock
);
835 run
->mmio
.is_write
= is_write
;
837 run
->mmio
.phys_addr
= addr
;
838 memcpy(run
->mmio
.data
, val
, len
);
840 kvm_handle_mmio_return(vcpu
, run
);
843 vgic_kick_vcpus(vcpu
->kvm
);
848 static int vgic_handle_mmio_read(struct kvm_vcpu
*vcpu
,
849 struct kvm_io_device
*this,
850 gpa_t addr
, int len
, void *val
)
852 return vgic_handle_mmio_access(vcpu
, this, addr
, len
, val
, false);
855 static int vgic_handle_mmio_write(struct kvm_vcpu
*vcpu
,
856 struct kvm_io_device
*this,
857 gpa_t addr
, int len
, const void *val
)
859 return vgic_handle_mmio_access(vcpu
, this, addr
, len
, (void *)val
,
863 struct kvm_io_device_ops vgic_io_ops
= {
864 .read
= vgic_handle_mmio_read
,
865 .write
= vgic_handle_mmio_write
,
869 * vgic_register_kvm_io_dev - register VGIC register frame on the KVM I/O bus
870 * @kvm: The VM structure pointer
871 * @base: The (guest) base address for the register frame
872 * @len: Length of the register frame window
873 * @ranges: Describing the handler functions for each register
874 * @redist_vcpu_id: The VCPU ID to pass on to the handlers on call
875 * @iodev: Points to memory to be passed on to the handler
877 * @iodev stores the parameters of this function to be usable by the handler
878 * respectively the dispatcher function (since the KVM I/O bus framework lacks
879 * an opaque parameter). Initialization is done in this function, but the
880 * reference should be valid and unique for the whole VGIC lifetime.
881 * If the register frame is not mapped for a specific VCPU, pass -1 to
884 int vgic_register_kvm_io_dev(struct kvm
*kvm
, gpa_t base
, int len
,
885 const struct vgic_io_range
*ranges
,
887 struct vgic_io_device
*iodev
)
889 struct kvm_vcpu
*vcpu
= NULL
;
892 if (redist_vcpu_id
>= 0)
893 vcpu
= kvm_get_vcpu(kvm
, redist_vcpu_id
);
897 iodev
->reg_ranges
= ranges
;
898 iodev
->redist_vcpu
= vcpu
;
900 kvm_iodevice_init(&iodev
->dev
, &vgic_io_ops
);
902 mutex_lock(&kvm
->slots_lock
);
904 ret
= kvm_io_bus_register_dev(kvm
, KVM_MMIO_BUS
, base
, len
,
906 mutex_unlock(&kvm
->slots_lock
);
908 /* Mark the iodev as invalid if registration fails. */
910 iodev
->dev
.ops
= NULL
;
915 static int vgic_nr_shared_irqs(struct vgic_dist
*dist
)
917 return dist
->nr_irqs
- VGIC_NR_PRIVATE_IRQS
;
920 static int compute_active_for_cpu(struct kvm_vcpu
*vcpu
)
922 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
923 unsigned long *active
, *enabled
, *act_percpu
, *act_shared
;
924 unsigned long active_private
, active_shared
;
925 int nr_shared
= vgic_nr_shared_irqs(dist
);
928 vcpu_id
= vcpu
->vcpu_id
;
929 act_percpu
= vcpu
->arch
.vgic_cpu
.active_percpu
;
930 act_shared
= vcpu
->arch
.vgic_cpu
.active_shared
;
932 active
= vgic_bitmap_get_cpu_map(&dist
->irq_active
, vcpu_id
);
933 enabled
= vgic_bitmap_get_cpu_map(&dist
->irq_enabled
, vcpu_id
);
934 bitmap_and(act_percpu
, active
, enabled
, VGIC_NR_PRIVATE_IRQS
);
936 active
= vgic_bitmap_get_shared_map(&dist
->irq_active
);
937 enabled
= vgic_bitmap_get_shared_map(&dist
->irq_enabled
);
938 bitmap_and(act_shared
, active
, enabled
, nr_shared
);
939 bitmap_and(act_shared
, act_shared
,
940 vgic_bitmap_get_shared_map(&dist
->irq_spi_target
[vcpu_id
]),
943 active_private
= find_first_bit(act_percpu
, VGIC_NR_PRIVATE_IRQS
);
944 active_shared
= find_first_bit(act_shared
, nr_shared
);
946 return (active_private
< VGIC_NR_PRIVATE_IRQS
||
947 active_shared
< nr_shared
);
950 static int compute_pending_for_cpu(struct kvm_vcpu
*vcpu
)
952 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
953 unsigned long *pending
, *enabled
, *pend_percpu
, *pend_shared
;
954 unsigned long pending_private
, pending_shared
;
955 int nr_shared
= vgic_nr_shared_irqs(dist
);
958 vcpu_id
= vcpu
->vcpu_id
;
959 pend_percpu
= vcpu
->arch
.vgic_cpu
.pending_percpu
;
960 pend_shared
= vcpu
->arch
.vgic_cpu
.pending_shared
;
962 pending
= vgic_bitmap_get_cpu_map(&dist
->irq_pending
, vcpu_id
);
963 enabled
= vgic_bitmap_get_cpu_map(&dist
->irq_enabled
, vcpu_id
);
964 bitmap_and(pend_percpu
, pending
, enabled
, VGIC_NR_PRIVATE_IRQS
);
966 pending
= vgic_bitmap_get_shared_map(&dist
->irq_pending
);
967 enabled
= vgic_bitmap_get_shared_map(&dist
->irq_enabled
);
968 bitmap_and(pend_shared
, pending
, enabled
, nr_shared
);
969 bitmap_and(pend_shared
, pend_shared
,
970 vgic_bitmap_get_shared_map(&dist
->irq_spi_target
[vcpu_id
]),
973 pending_private
= find_first_bit(pend_percpu
, VGIC_NR_PRIVATE_IRQS
);
974 pending_shared
= find_first_bit(pend_shared
, nr_shared
);
975 return (pending_private
< VGIC_NR_PRIVATE_IRQS
||
976 pending_shared
< vgic_nr_shared_irqs(dist
));
980 * Update the interrupt state and determine which CPUs have pending
981 * or active interrupts. Must be called with distributor lock held.
983 void vgic_update_state(struct kvm
*kvm
)
985 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
986 struct kvm_vcpu
*vcpu
;
989 if (!dist
->enabled
) {
990 set_bit(0, dist
->irq_pending_on_cpu
);
994 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
995 if (compute_pending_for_cpu(vcpu
))
996 set_bit(c
, dist
->irq_pending_on_cpu
);
998 if (compute_active_for_cpu(vcpu
))
999 set_bit(c
, dist
->irq_active_on_cpu
);
1001 clear_bit(c
, dist
->irq_active_on_cpu
);
1005 static struct vgic_lr
vgic_get_lr(const struct kvm_vcpu
*vcpu
, int lr
)
1007 return vgic_ops
->get_lr(vcpu
, lr
);
1010 static void vgic_set_lr(struct kvm_vcpu
*vcpu
, int lr
,
1013 vgic_ops
->set_lr(vcpu
, lr
, vlr
);
1016 static void vgic_sync_lr_elrsr(struct kvm_vcpu
*vcpu
, int lr
,
1019 vgic_ops
->sync_lr_elrsr(vcpu
, lr
, vlr
);
1022 static inline u64
vgic_get_elrsr(struct kvm_vcpu
*vcpu
)
1024 return vgic_ops
->get_elrsr(vcpu
);
1027 static inline u64
vgic_get_eisr(struct kvm_vcpu
*vcpu
)
1029 return vgic_ops
->get_eisr(vcpu
);
1032 static inline void vgic_clear_eisr(struct kvm_vcpu
*vcpu
)
1034 vgic_ops
->clear_eisr(vcpu
);
1037 static inline u32
vgic_get_interrupt_status(struct kvm_vcpu
*vcpu
)
1039 return vgic_ops
->get_interrupt_status(vcpu
);
1042 static inline void vgic_enable_underflow(struct kvm_vcpu
*vcpu
)
1044 vgic_ops
->enable_underflow(vcpu
);
1047 static inline void vgic_disable_underflow(struct kvm_vcpu
*vcpu
)
1049 vgic_ops
->disable_underflow(vcpu
);
1052 void vgic_get_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcr
)
1054 vgic_ops
->get_vmcr(vcpu
, vmcr
);
1057 void vgic_set_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcr
)
1059 vgic_ops
->set_vmcr(vcpu
, vmcr
);
1062 static inline void vgic_enable(struct kvm_vcpu
*vcpu
)
1064 vgic_ops
->enable(vcpu
);
1067 static void vgic_retire_lr(int lr_nr
, int irq
, struct kvm_vcpu
*vcpu
)
1069 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1070 struct vgic_lr vlr
= vgic_get_lr(vcpu
, lr_nr
);
1073 vgic_set_lr(vcpu
, lr_nr
, vlr
);
1074 clear_bit(lr_nr
, vgic_cpu
->lr_used
);
1075 vgic_cpu
->vgic_irq_lr_map
[irq
] = LR_EMPTY
;
1076 vgic_sync_lr_elrsr(vcpu
, lr_nr
, vlr
);
1080 * An interrupt may have been disabled after being made pending on the
1081 * CPU interface (the classic case is a timer running while we're
1082 * rebooting the guest - the interrupt would kick as soon as the CPU
1083 * interface gets enabled, with deadly consequences).
1085 * The solution is to examine already active LRs, and check the
1086 * interrupt is still enabled. If not, just retire it.
1088 static void vgic_retire_disabled_irqs(struct kvm_vcpu
*vcpu
)
1090 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1093 for_each_set_bit(lr
, vgic_cpu
->lr_used
, vgic
->nr_lr
) {
1094 struct vgic_lr vlr
= vgic_get_lr(vcpu
, lr
);
1096 if (!vgic_irq_is_enabled(vcpu
, vlr
.irq
)) {
1097 vgic_retire_lr(lr
, vlr
.irq
, vcpu
);
1098 if (vgic_irq_is_queued(vcpu
, vlr
.irq
))
1099 vgic_irq_clear_queued(vcpu
, vlr
.irq
);
1104 static void vgic_queue_irq_to_lr(struct kvm_vcpu
*vcpu
, int irq
,
1105 int lr_nr
, struct vgic_lr vlr
)
1107 if (vgic_irq_is_active(vcpu
, irq
)) {
1108 vlr
.state
|= LR_STATE_ACTIVE
;
1109 kvm_debug("Set active, clear distributor: 0x%x\n", vlr
.state
);
1110 vgic_irq_clear_active(vcpu
, irq
);
1111 vgic_update_state(vcpu
->kvm
);
1112 } else if (vgic_dist_irq_is_pending(vcpu
, irq
)) {
1113 vlr
.state
|= LR_STATE_PENDING
;
1114 kvm_debug("Set pending: 0x%x\n", vlr
.state
);
1117 if (!vgic_irq_is_edge(vcpu
, irq
))
1118 vlr
.state
|= LR_EOI_INT
;
1120 vgic_set_lr(vcpu
, lr_nr
, vlr
);
1121 vgic_sync_lr_elrsr(vcpu
, lr_nr
, vlr
);
1125 * Queue an interrupt to a CPU virtual interface. Return true on success,
1126 * or false if it wasn't possible to queue it.
1127 * sgi_source must be zero for any non-SGI interrupts.
1129 bool vgic_queue_irq(struct kvm_vcpu
*vcpu
, u8 sgi_source_id
, int irq
)
1131 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1132 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1136 /* Sanitize the input... */
1137 BUG_ON(sgi_source_id
& ~7);
1138 BUG_ON(sgi_source_id
&& irq
>= VGIC_NR_SGIS
);
1139 BUG_ON(irq
>= dist
->nr_irqs
);
1141 kvm_debug("Queue IRQ%d\n", irq
);
1143 lr
= vgic_cpu
->vgic_irq_lr_map
[irq
];
1145 /* Do we have an active interrupt for the same CPUID? */
1146 if (lr
!= LR_EMPTY
) {
1147 vlr
= vgic_get_lr(vcpu
, lr
);
1148 if (vlr
.source
== sgi_source_id
) {
1149 kvm_debug("LR%d piggyback for IRQ%d\n", lr
, vlr
.irq
);
1150 BUG_ON(!test_bit(lr
, vgic_cpu
->lr_used
));
1151 vgic_queue_irq_to_lr(vcpu
, irq
, lr
, vlr
);
1156 /* Try to use another LR for this interrupt */
1157 lr
= find_first_zero_bit((unsigned long *)vgic_cpu
->lr_used
,
1159 if (lr
>= vgic
->nr_lr
)
1162 kvm_debug("LR%d allocated for IRQ%d %x\n", lr
, irq
, sgi_source_id
);
1163 vgic_cpu
->vgic_irq_lr_map
[irq
] = lr
;
1164 set_bit(lr
, vgic_cpu
->lr_used
);
1167 vlr
.source
= sgi_source_id
;
1169 vgic_queue_irq_to_lr(vcpu
, irq
, lr
, vlr
);
1174 static bool vgic_queue_hwirq(struct kvm_vcpu
*vcpu
, int irq
)
1176 if (!vgic_can_sample_irq(vcpu
, irq
))
1177 return true; /* level interrupt, already queued */
1179 if (vgic_queue_irq(vcpu
, 0, irq
)) {
1180 if (vgic_irq_is_edge(vcpu
, irq
)) {
1181 vgic_dist_irq_clear_pending(vcpu
, irq
);
1182 vgic_cpu_irq_clear(vcpu
, irq
);
1184 vgic_irq_set_queued(vcpu
, irq
);
1194 * Fill the list registers with pending interrupts before running the
1197 static void __kvm_vgic_flush_hwstate(struct kvm_vcpu
*vcpu
)
1199 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1200 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1201 unsigned long *pa_percpu
, *pa_shared
;
1204 int nr_shared
= vgic_nr_shared_irqs(dist
);
1206 vcpu_id
= vcpu
->vcpu_id
;
1208 pa_percpu
= vcpu
->arch
.vgic_cpu
.pend_act_percpu
;
1209 pa_shared
= vcpu
->arch
.vgic_cpu
.pend_act_shared
;
1211 bitmap_or(pa_percpu
, vgic_cpu
->pending_percpu
, vgic_cpu
->active_percpu
,
1212 VGIC_NR_PRIVATE_IRQS
);
1213 bitmap_or(pa_shared
, vgic_cpu
->pending_shared
, vgic_cpu
->active_shared
,
1216 * We may not have any pending interrupt, or the interrupts
1217 * may have been serviced from another vcpu. In all cases,
1220 if (!kvm_vgic_vcpu_pending_irq(vcpu
) && !kvm_vgic_vcpu_active_irq(vcpu
))
1224 for_each_set_bit(i
, pa_percpu
, VGIC_NR_SGIS
) {
1225 if (!queue_sgi(vcpu
, i
))
1230 for_each_set_bit_from(i
, pa_percpu
, VGIC_NR_PRIVATE_IRQS
) {
1231 if (!vgic_queue_hwirq(vcpu
, i
))
1236 for_each_set_bit(i
, pa_shared
, nr_shared
) {
1237 if (!vgic_queue_hwirq(vcpu
, i
+ VGIC_NR_PRIVATE_IRQS
))
1246 vgic_enable_underflow(vcpu
);
1248 vgic_disable_underflow(vcpu
);
1250 * We're about to run this VCPU, and we've consumed
1251 * everything the distributor had in store for
1252 * us. Claim we don't have anything pending. We'll
1253 * adjust that if needed while exiting.
1255 clear_bit(vcpu_id
, dist
->irq_pending_on_cpu
);
1259 static bool vgic_process_maintenance(struct kvm_vcpu
*vcpu
)
1261 u32 status
= vgic_get_interrupt_status(vcpu
);
1262 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1263 bool level_pending
= false;
1264 struct kvm
*kvm
= vcpu
->kvm
;
1266 kvm_debug("STATUS = %08x\n", status
);
1268 if (status
& INT_STATUS_EOI
) {
1270 * Some level interrupts have been EOIed. Clear their
1273 u64 eisr
= vgic_get_eisr(vcpu
);
1274 unsigned long *eisr_ptr
= u64_to_bitmask(&eisr
);
1277 for_each_set_bit(lr
, eisr_ptr
, vgic
->nr_lr
) {
1278 struct vgic_lr vlr
= vgic_get_lr(vcpu
, lr
);
1279 WARN_ON(vgic_irq_is_edge(vcpu
, vlr
.irq
));
1281 spin_lock(&dist
->lock
);
1282 vgic_irq_clear_queued(vcpu
, vlr
.irq
);
1283 WARN_ON(vlr
.state
& LR_STATE_MASK
);
1285 vgic_set_lr(vcpu
, lr
, vlr
);
1288 * If the IRQ was EOIed it was also ACKed and we we
1289 * therefore assume we can clear the soft pending
1290 * state (should it had been set) for this interrupt.
1292 * Note: if the IRQ soft pending state was set after
1293 * the IRQ was acked, it actually shouldn't be
1294 * cleared, but we have no way of knowing that unless
1295 * we start trapping ACKs when the soft-pending state
1298 vgic_dist_irq_clear_soft_pend(vcpu
, vlr
.irq
);
1301 * kvm_notify_acked_irq calls kvm_set_irq()
1302 * to reset the IRQ level. Need to release the
1303 * lock for kvm_set_irq to grab it.
1305 spin_unlock(&dist
->lock
);
1307 kvm_notify_acked_irq(kvm
, 0,
1308 vlr
.irq
- VGIC_NR_PRIVATE_IRQS
);
1309 spin_lock(&dist
->lock
);
1311 /* Any additional pending interrupt? */
1312 if (vgic_dist_irq_get_level(vcpu
, vlr
.irq
)) {
1313 vgic_cpu_irq_set(vcpu
, vlr
.irq
);
1314 level_pending
= true;
1316 vgic_dist_irq_clear_pending(vcpu
, vlr
.irq
);
1317 vgic_cpu_irq_clear(vcpu
, vlr
.irq
);
1320 spin_unlock(&dist
->lock
);
1323 * Despite being EOIed, the LR may not have
1324 * been marked as empty.
1326 vgic_sync_lr_elrsr(vcpu
, lr
, vlr
);
1330 if (status
& INT_STATUS_UNDERFLOW
)
1331 vgic_disable_underflow(vcpu
);
1334 * In the next iterations of the vcpu loop, if we sync the vgic state
1335 * after flushing it, but before entering the guest (this happens for
1336 * pending signals and vmid rollovers), then make sure we don't pick
1337 * up any old maintenance interrupts here.
1339 vgic_clear_eisr(vcpu
);
1341 return level_pending
;
1344 /* Sync back the VGIC state after a guest run */
1345 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu
*vcpu
)
1347 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1348 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1350 unsigned long *elrsr_ptr
;
1354 level_pending
= vgic_process_maintenance(vcpu
);
1355 elrsr
= vgic_get_elrsr(vcpu
);
1356 elrsr_ptr
= u64_to_bitmask(&elrsr
);
1358 /* Clear mappings for empty LRs */
1359 for_each_set_bit(lr
, elrsr_ptr
, vgic
->nr_lr
) {
1362 if (!test_and_clear_bit(lr
, vgic_cpu
->lr_used
))
1365 vlr
= vgic_get_lr(vcpu
, lr
);
1367 BUG_ON(vlr
.irq
>= dist
->nr_irqs
);
1368 vgic_cpu
->vgic_irq_lr_map
[vlr
.irq
] = LR_EMPTY
;
1371 /* Check if we still have something up our sleeve... */
1372 pending
= find_first_zero_bit(elrsr_ptr
, vgic
->nr_lr
);
1373 if (level_pending
|| pending
< vgic
->nr_lr
)
1374 set_bit(vcpu
->vcpu_id
, dist
->irq_pending_on_cpu
);
1377 void kvm_vgic_flush_hwstate(struct kvm_vcpu
*vcpu
)
1379 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1381 if (!irqchip_in_kernel(vcpu
->kvm
))
1384 spin_lock(&dist
->lock
);
1385 __kvm_vgic_flush_hwstate(vcpu
);
1386 spin_unlock(&dist
->lock
);
1389 void kvm_vgic_sync_hwstate(struct kvm_vcpu
*vcpu
)
1391 if (!irqchip_in_kernel(vcpu
->kvm
))
1394 __kvm_vgic_sync_hwstate(vcpu
);
1397 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu
*vcpu
)
1399 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1401 if (!irqchip_in_kernel(vcpu
->kvm
))
1404 return test_bit(vcpu
->vcpu_id
, dist
->irq_pending_on_cpu
);
1407 int kvm_vgic_vcpu_active_irq(struct kvm_vcpu
*vcpu
)
1409 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1411 if (!irqchip_in_kernel(vcpu
->kvm
))
1414 return test_bit(vcpu
->vcpu_id
, dist
->irq_active_on_cpu
);
1418 void vgic_kick_vcpus(struct kvm
*kvm
)
1420 struct kvm_vcpu
*vcpu
;
1424 * We've injected an interrupt, time to find out who deserves
1427 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
1428 if (kvm_vgic_vcpu_pending_irq(vcpu
))
1429 kvm_vcpu_kick(vcpu
);
1433 static int vgic_validate_injection(struct kvm_vcpu
*vcpu
, int irq
, int level
)
1435 int edge_triggered
= vgic_irq_is_edge(vcpu
, irq
);
1438 * Only inject an interrupt if:
1439 * - edge triggered and we have a rising edge
1440 * - level triggered and we change level
1442 if (edge_triggered
) {
1443 int state
= vgic_dist_irq_is_pending(vcpu
, irq
);
1444 return level
> state
;
1446 int state
= vgic_dist_irq_get_level(vcpu
, irq
);
1447 return level
!= state
;
1451 static int vgic_update_irq_pending(struct kvm
*kvm
, int cpuid
,
1452 unsigned int irq_num
, bool level
)
1454 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1455 struct kvm_vcpu
*vcpu
;
1456 int edge_triggered
, level_triggered
;
1458 bool ret
= true, can_inject
= true;
1460 spin_lock(&dist
->lock
);
1462 vcpu
= kvm_get_vcpu(kvm
, cpuid
);
1463 edge_triggered
= vgic_irq_is_edge(vcpu
, irq_num
);
1464 level_triggered
= !edge_triggered
;
1466 if (!vgic_validate_injection(vcpu
, irq_num
, level
)) {
1471 if (irq_num
>= VGIC_NR_PRIVATE_IRQS
) {
1472 cpuid
= dist
->irq_spi_cpu
[irq_num
- VGIC_NR_PRIVATE_IRQS
];
1473 if (cpuid
== VCPU_NOT_ALLOCATED
) {
1474 /* Pretend we use CPU0, and prevent injection */
1478 vcpu
= kvm_get_vcpu(kvm
, cpuid
);
1481 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num
, level
, cpuid
);
1484 if (level_triggered
)
1485 vgic_dist_irq_set_level(vcpu
, irq_num
);
1486 vgic_dist_irq_set_pending(vcpu
, irq_num
);
1488 if (level_triggered
) {
1489 vgic_dist_irq_clear_level(vcpu
, irq_num
);
1490 if (!vgic_dist_irq_soft_pend(vcpu
, irq_num
))
1491 vgic_dist_irq_clear_pending(vcpu
, irq_num
);
1498 enabled
= vgic_irq_is_enabled(vcpu
, irq_num
);
1500 if (!enabled
|| !can_inject
) {
1505 if (!vgic_can_sample_irq(vcpu
, irq_num
)) {
1507 * Level interrupt in progress, will be picked up
1515 vgic_cpu_irq_set(vcpu
, irq_num
);
1516 set_bit(cpuid
, dist
->irq_pending_on_cpu
);
1520 spin_unlock(&dist
->lock
);
1522 return ret
? cpuid
: -EINVAL
;
1526 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
1527 * @kvm: The VM structure pointer
1528 * @cpuid: The CPU for PPIs
1529 * @irq_num: The IRQ number that is assigned to the device
1530 * @level: Edge-triggered: true: to trigger the interrupt
1531 * false: to ignore the call
1532 * Level-sensitive true: activates an interrupt
1533 * false: deactivates an interrupt
1535 * The GIC is not concerned with devices being active-LOW or active-HIGH for
1536 * level-sensitive interrupts. You can think of the level parameter as 1
1537 * being HIGH and 0 being LOW and all devices being active-HIGH.
1539 int kvm_vgic_inject_irq(struct kvm
*kvm
, int cpuid
, unsigned int irq_num
,
1545 if (unlikely(!vgic_initialized(kvm
))) {
1547 * We only provide the automatic initialization of the VGIC
1548 * for the legacy case of a GICv2. Any other type must
1549 * be explicitly initialized once setup with the respective
1552 if (kvm
->arch
.vgic
.vgic_model
!= KVM_DEV_TYPE_ARM_VGIC_V2
) {
1556 mutex_lock(&kvm
->lock
);
1557 ret
= vgic_init(kvm
);
1558 mutex_unlock(&kvm
->lock
);
1564 if (irq_num
>= min(kvm
->arch
.vgic
.nr_irqs
, 1020))
1567 vcpu_id
= vgic_update_irq_pending(kvm
, cpuid
, irq_num
, level
);
1569 /* kick the specified vcpu */
1570 kvm_vcpu_kick(kvm_get_vcpu(kvm
, vcpu_id
));
1577 static irqreturn_t
vgic_maintenance_handler(int irq
, void *data
)
1580 * We cannot rely on the vgic maintenance interrupt to be
1581 * delivered synchronously. This means we can only use it to
1582 * exit the VM, and we perform the handling of EOIed
1583 * interrupts on the exit path (see vgic_process_maintenance).
1588 void kvm_vgic_vcpu_destroy(struct kvm_vcpu
*vcpu
)
1590 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1592 kfree(vgic_cpu
->pending_shared
);
1593 kfree(vgic_cpu
->active_shared
);
1594 kfree(vgic_cpu
->pend_act_shared
);
1595 kfree(vgic_cpu
->vgic_irq_lr_map
);
1596 vgic_cpu
->pending_shared
= NULL
;
1597 vgic_cpu
->active_shared
= NULL
;
1598 vgic_cpu
->pend_act_shared
= NULL
;
1599 vgic_cpu
->vgic_irq_lr_map
= NULL
;
1602 static int vgic_vcpu_init_maps(struct kvm_vcpu
*vcpu
, int nr_irqs
)
1604 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1606 int sz
= (nr_irqs
- VGIC_NR_PRIVATE_IRQS
) / 8;
1607 vgic_cpu
->pending_shared
= kzalloc(sz
, GFP_KERNEL
);
1608 vgic_cpu
->active_shared
= kzalloc(sz
, GFP_KERNEL
);
1609 vgic_cpu
->pend_act_shared
= kzalloc(sz
, GFP_KERNEL
);
1610 vgic_cpu
->vgic_irq_lr_map
= kmalloc(nr_irqs
, GFP_KERNEL
);
1612 if (!vgic_cpu
->pending_shared
1613 || !vgic_cpu
->active_shared
1614 || !vgic_cpu
->pend_act_shared
1615 || !vgic_cpu
->vgic_irq_lr_map
) {
1616 kvm_vgic_vcpu_destroy(vcpu
);
1620 memset(vgic_cpu
->vgic_irq_lr_map
, LR_EMPTY
, nr_irqs
);
1623 * Store the number of LRs per vcpu, so we don't have to go
1624 * all the way to the distributor structure to find out. Only
1625 * assembly code should use this one.
1627 vgic_cpu
->nr_lr
= vgic
->nr_lr
;
1633 * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
1635 * The host's GIC naturally limits the maximum amount of VCPUs a guest
1638 int kvm_vgic_get_max_vcpus(void)
1640 return vgic
->max_gic_vcpus
;
1643 void kvm_vgic_destroy(struct kvm
*kvm
)
1645 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1646 struct kvm_vcpu
*vcpu
;
1649 kvm_for_each_vcpu(i
, vcpu
, kvm
)
1650 kvm_vgic_vcpu_destroy(vcpu
);
1652 vgic_free_bitmap(&dist
->irq_enabled
);
1653 vgic_free_bitmap(&dist
->irq_level
);
1654 vgic_free_bitmap(&dist
->irq_pending
);
1655 vgic_free_bitmap(&dist
->irq_soft_pend
);
1656 vgic_free_bitmap(&dist
->irq_queued
);
1657 vgic_free_bitmap(&dist
->irq_cfg
);
1658 vgic_free_bytemap(&dist
->irq_priority
);
1659 if (dist
->irq_spi_target
) {
1660 for (i
= 0; i
< dist
->nr_cpus
; i
++)
1661 vgic_free_bitmap(&dist
->irq_spi_target
[i
]);
1663 kfree(dist
->irq_sgi_sources
);
1664 kfree(dist
->irq_spi_cpu
);
1665 kfree(dist
->irq_spi_mpidr
);
1666 kfree(dist
->irq_spi_target
);
1667 kfree(dist
->irq_pending_on_cpu
);
1668 kfree(dist
->irq_active_on_cpu
);
1669 dist
->irq_sgi_sources
= NULL
;
1670 dist
->irq_spi_cpu
= NULL
;
1671 dist
->irq_spi_target
= NULL
;
1672 dist
->irq_pending_on_cpu
= NULL
;
1673 dist
->irq_active_on_cpu
= NULL
;
1678 * Allocate and initialize the various data structures. Must be called
1679 * with kvm->lock held!
1681 int vgic_init(struct kvm
*kvm
)
1683 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1684 struct kvm_vcpu
*vcpu
;
1685 int nr_cpus
, nr_irqs
;
1686 int ret
, i
, vcpu_id
;
1688 if (vgic_initialized(kvm
))
1691 nr_cpus
= dist
->nr_cpus
= atomic_read(&kvm
->online_vcpus
);
1692 if (!nr_cpus
) /* No vcpus? Can't be good... */
1696 * If nobody configured the number of interrupts, use the
1700 dist
->nr_irqs
= VGIC_NR_IRQS_LEGACY
;
1702 nr_irqs
= dist
->nr_irqs
;
1704 ret
= vgic_init_bitmap(&dist
->irq_enabled
, nr_cpus
, nr_irqs
);
1705 ret
|= vgic_init_bitmap(&dist
->irq_level
, nr_cpus
, nr_irqs
);
1706 ret
|= vgic_init_bitmap(&dist
->irq_pending
, nr_cpus
, nr_irqs
);
1707 ret
|= vgic_init_bitmap(&dist
->irq_soft_pend
, nr_cpus
, nr_irqs
);
1708 ret
|= vgic_init_bitmap(&dist
->irq_queued
, nr_cpus
, nr_irqs
);
1709 ret
|= vgic_init_bitmap(&dist
->irq_active
, nr_cpus
, nr_irqs
);
1710 ret
|= vgic_init_bitmap(&dist
->irq_cfg
, nr_cpus
, nr_irqs
);
1711 ret
|= vgic_init_bytemap(&dist
->irq_priority
, nr_cpus
, nr_irqs
);
1716 dist
->irq_sgi_sources
= kzalloc(nr_cpus
* VGIC_NR_SGIS
, GFP_KERNEL
);
1717 dist
->irq_spi_cpu
= kzalloc(nr_irqs
- VGIC_NR_PRIVATE_IRQS
, GFP_KERNEL
);
1718 dist
->irq_spi_target
= kzalloc(sizeof(*dist
->irq_spi_target
) * nr_cpus
,
1720 dist
->irq_pending_on_cpu
= kzalloc(BITS_TO_LONGS(nr_cpus
) * sizeof(long),
1722 dist
->irq_active_on_cpu
= kzalloc(BITS_TO_LONGS(nr_cpus
) * sizeof(long),
1724 if (!dist
->irq_sgi_sources
||
1725 !dist
->irq_spi_cpu
||
1726 !dist
->irq_spi_target
||
1727 !dist
->irq_pending_on_cpu
||
1728 !dist
->irq_active_on_cpu
) {
1733 for (i
= 0; i
< nr_cpus
; i
++)
1734 ret
|= vgic_init_bitmap(&dist
->irq_spi_target
[i
],
1740 ret
= kvm
->arch
.vgic
.vm_ops
.init_model(kvm
);
1744 kvm_for_each_vcpu(vcpu_id
, vcpu
, kvm
) {
1745 ret
= vgic_vcpu_init_maps(vcpu
, nr_irqs
);
1747 kvm_err("VGIC: Failed to allocate vcpu memory\n");
1751 for (i
= 0; i
< dist
->nr_irqs
; i
++) {
1752 if (i
< VGIC_NR_PPIS
)
1753 vgic_bitmap_set_irq_val(&dist
->irq_enabled
,
1754 vcpu
->vcpu_id
, i
, 1);
1755 if (i
< VGIC_NR_PRIVATE_IRQS
)
1756 vgic_bitmap_set_irq_val(&dist
->irq_cfg
,
1766 kvm_vgic_destroy(kvm
);
1771 static int init_vgic_model(struct kvm
*kvm
, int type
)
1774 case KVM_DEV_TYPE_ARM_VGIC_V2
:
1775 vgic_v2_init_emulation(kvm
);
1777 #ifdef CONFIG_ARM_GIC_V3
1778 case KVM_DEV_TYPE_ARM_VGIC_V3
:
1779 vgic_v3_init_emulation(kvm
);
1786 if (atomic_read(&kvm
->online_vcpus
) > kvm
->arch
.max_vcpus
)
1792 int kvm_vgic_create(struct kvm
*kvm
, u32 type
)
1794 int i
, vcpu_lock_idx
= -1, ret
;
1795 struct kvm_vcpu
*vcpu
;
1797 mutex_lock(&kvm
->lock
);
1799 if (irqchip_in_kernel(kvm
)) {
1805 * This function is also called by the KVM_CREATE_IRQCHIP handler,
1806 * which had no chance yet to check the availability of the GICv2
1807 * emulation. So check this here again. KVM_CREATE_DEVICE does
1808 * the proper checks already.
1810 if (type
== KVM_DEV_TYPE_ARM_VGIC_V2
&& !vgic
->can_emulate_gicv2
) {
1816 * Any time a vcpu is run, vcpu_load is called which tries to grab the
1817 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
1818 * that no other VCPUs are run while we create the vgic.
1821 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1822 if (!mutex_trylock(&vcpu
->mutex
))
1827 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1828 if (vcpu
->arch
.has_run_once
)
1833 ret
= init_vgic_model(kvm
, type
);
1837 spin_lock_init(&kvm
->arch
.vgic
.lock
);
1838 kvm
->arch
.vgic
.in_kernel
= true;
1839 kvm
->arch
.vgic
.vgic_model
= type
;
1840 kvm
->arch
.vgic
.vctrl_base
= vgic
->vctrl_base
;
1841 kvm
->arch
.vgic
.vgic_dist_base
= VGIC_ADDR_UNDEF
;
1842 kvm
->arch
.vgic
.vgic_cpu_base
= VGIC_ADDR_UNDEF
;
1843 kvm
->arch
.vgic
.vgic_redist_base
= VGIC_ADDR_UNDEF
;
1846 for (; vcpu_lock_idx
>= 0; vcpu_lock_idx
--) {
1847 vcpu
= kvm_get_vcpu(kvm
, vcpu_lock_idx
);
1848 mutex_unlock(&vcpu
->mutex
);
1852 mutex_unlock(&kvm
->lock
);
1856 static int vgic_ioaddr_overlap(struct kvm
*kvm
)
1858 phys_addr_t dist
= kvm
->arch
.vgic
.vgic_dist_base
;
1859 phys_addr_t cpu
= kvm
->arch
.vgic
.vgic_cpu_base
;
1861 if (IS_VGIC_ADDR_UNDEF(dist
) || IS_VGIC_ADDR_UNDEF(cpu
))
1863 if ((dist
<= cpu
&& dist
+ KVM_VGIC_V2_DIST_SIZE
> cpu
) ||
1864 (cpu
<= dist
&& cpu
+ KVM_VGIC_V2_CPU_SIZE
> dist
))
1869 static int vgic_ioaddr_assign(struct kvm
*kvm
, phys_addr_t
*ioaddr
,
1870 phys_addr_t addr
, phys_addr_t size
)
1874 if (addr
& ~KVM_PHYS_MASK
)
1877 if (addr
& (SZ_4K
- 1))
1880 if (!IS_VGIC_ADDR_UNDEF(*ioaddr
))
1882 if (addr
+ size
< addr
)
1886 ret
= vgic_ioaddr_overlap(kvm
);
1888 *ioaddr
= VGIC_ADDR_UNDEF
;
1894 * kvm_vgic_addr - set or get vgic VM base addresses
1895 * @kvm: pointer to the vm struct
1896 * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
1897 * @addr: pointer to address value
1898 * @write: if true set the address in the VM address space, if false read the
1901 * Set or get the vgic base addresses for the distributor and the virtual CPU
1902 * interface in the VM physical address space. These addresses are properties
1903 * of the emulated core/SoC and therefore user space initially knows this
1906 int kvm_vgic_addr(struct kvm
*kvm
, unsigned long type
, u64
*addr
, bool write
)
1909 struct vgic_dist
*vgic
= &kvm
->arch
.vgic
;
1911 phys_addr_t
*addr_ptr
, block_size
;
1912 phys_addr_t alignment
;
1914 mutex_lock(&kvm
->lock
);
1916 case KVM_VGIC_V2_ADDR_TYPE_DIST
:
1917 type_needed
= KVM_DEV_TYPE_ARM_VGIC_V2
;
1918 addr_ptr
= &vgic
->vgic_dist_base
;
1919 block_size
= KVM_VGIC_V2_DIST_SIZE
;
1922 case KVM_VGIC_V2_ADDR_TYPE_CPU
:
1923 type_needed
= KVM_DEV_TYPE_ARM_VGIC_V2
;
1924 addr_ptr
= &vgic
->vgic_cpu_base
;
1925 block_size
= KVM_VGIC_V2_CPU_SIZE
;
1928 #ifdef CONFIG_ARM_GIC_V3
1929 case KVM_VGIC_V3_ADDR_TYPE_DIST
:
1930 type_needed
= KVM_DEV_TYPE_ARM_VGIC_V3
;
1931 addr_ptr
= &vgic
->vgic_dist_base
;
1932 block_size
= KVM_VGIC_V3_DIST_SIZE
;
1935 case KVM_VGIC_V3_ADDR_TYPE_REDIST
:
1936 type_needed
= KVM_DEV_TYPE_ARM_VGIC_V3
;
1937 addr_ptr
= &vgic
->vgic_redist_base
;
1938 block_size
= KVM_VGIC_V3_REDIST_SIZE
;
1947 if (vgic
->vgic_model
!= type_needed
) {
1953 if (!IS_ALIGNED(*addr
, alignment
))
1956 r
= vgic_ioaddr_assign(kvm
, addr_ptr
, *addr
,
1963 mutex_unlock(&kvm
->lock
);
1967 int vgic_set_common_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1971 switch (attr
->group
) {
1972 case KVM_DEV_ARM_VGIC_GRP_ADDR
: {
1973 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
1975 unsigned long type
= (unsigned long)attr
->attr
;
1977 if (copy_from_user(&addr
, uaddr
, sizeof(addr
)))
1980 r
= kvm_vgic_addr(dev
->kvm
, type
, &addr
, true);
1981 return (r
== -ENODEV
) ? -ENXIO
: r
;
1983 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS
: {
1984 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
1988 if (get_user(val
, uaddr
))
1993 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
1994 * - at most 1024 interrupts
1995 * - a multiple of 32 interrupts
1997 if (val
< (VGIC_NR_PRIVATE_IRQS
+ 32) ||
1998 val
> VGIC_MAX_IRQS
||
2002 mutex_lock(&dev
->kvm
->lock
);
2004 if (vgic_ready(dev
->kvm
) || dev
->kvm
->arch
.vgic
.nr_irqs
)
2007 dev
->kvm
->arch
.vgic
.nr_irqs
= val
;
2009 mutex_unlock(&dev
->kvm
->lock
);
2013 case KVM_DEV_ARM_VGIC_GRP_CTRL
: {
2014 switch (attr
->attr
) {
2015 case KVM_DEV_ARM_VGIC_CTRL_INIT
:
2016 r
= vgic_init(dev
->kvm
);
2026 int vgic_get_common_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
2030 switch (attr
->group
) {
2031 case KVM_DEV_ARM_VGIC_GRP_ADDR
: {
2032 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2034 unsigned long type
= (unsigned long)attr
->attr
;
2036 r
= kvm_vgic_addr(dev
->kvm
, type
, &addr
, false);
2038 return (r
== -ENODEV
) ? -ENXIO
: r
;
2040 if (copy_to_user(uaddr
, &addr
, sizeof(addr
)))
2044 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS
: {
2045 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
2047 r
= put_user(dev
->kvm
->arch
.vgic
.nr_irqs
, uaddr
);
2056 int vgic_has_attr_regs(const struct vgic_io_range
*ranges
, phys_addr_t offset
)
2058 if (vgic_find_range(ranges
, 4, offset
))
2064 static void vgic_init_maintenance_interrupt(void *info
)
2066 enable_percpu_irq(vgic
->maint_irq
, 0);
2069 static int vgic_cpu_notify(struct notifier_block
*self
,
2070 unsigned long action
, void *cpu
)
2074 case CPU_STARTING_FROZEN
:
2075 vgic_init_maintenance_interrupt(NULL
);
2078 case CPU_DYING_FROZEN
:
2079 disable_percpu_irq(vgic
->maint_irq
);
2086 static struct notifier_block vgic_cpu_nb
= {
2087 .notifier_call
= vgic_cpu_notify
,
2090 static const struct of_device_id vgic_ids
[] = {
2091 { .compatible
= "arm,cortex-a15-gic", .data
= vgic_v2_probe
, },
2092 { .compatible
= "arm,cortex-a7-gic", .data
= vgic_v2_probe
, },
2093 { .compatible
= "arm,gic-400", .data
= vgic_v2_probe
, },
2094 { .compatible
= "arm,gic-v3", .data
= vgic_v3_probe
, },
2098 int kvm_vgic_hyp_init(void)
2100 const struct of_device_id
*matched_id
;
2101 const int (*vgic_probe
)(struct device_node
*,const struct vgic_ops
**,
2102 const struct vgic_params
**);
2103 struct device_node
*vgic_node
;
2106 vgic_node
= of_find_matching_node_and_match(NULL
,
2107 vgic_ids
, &matched_id
);
2109 kvm_err("error: no compatible GIC node found\n");
2113 vgic_probe
= matched_id
->data
;
2114 ret
= vgic_probe(vgic_node
, &vgic_ops
, &vgic
);
2118 ret
= request_percpu_irq(vgic
->maint_irq
, vgic_maintenance_handler
,
2119 "vgic", kvm_get_running_vcpus());
2121 kvm_err("Cannot register interrupt %d\n", vgic
->maint_irq
);
2125 ret
= __register_cpu_notifier(&vgic_cpu_nb
);
2127 kvm_err("Cannot register vgic CPU notifier\n");
2131 /* Callback into for arch code for setup */
2132 vgic_arch_setup(vgic
);
2134 on_each_cpu(vgic_init_maintenance_interrupt
, NULL
, 1);
2139 free_percpu_irq(vgic
->maint_irq
, kvm_get_running_vcpus());
2143 int kvm_irq_map_gsi(struct kvm
*kvm
,
2144 struct kvm_kernel_irq_routing_entry
*entries
,
2150 int kvm_irq_map_chip_pin(struct kvm
*kvm
, unsigned irqchip
, unsigned pin
)
2155 int kvm_set_irq(struct kvm
*kvm
, int irq_source_id
,
2156 u32 irq
, int level
, bool line_status
)
2158 unsigned int spi
= irq
+ VGIC_NR_PRIVATE_IRQS
;
2160 trace_kvm_set_irq(irq
, level
, irq_source_id
);
2162 BUG_ON(!vgic_initialized(kvm
));
2164 return kvm_vgic_inject_irq(kvm
, 0, spi
, level
);
2167 /* MSI not implemented yet */
2168 int kvm_set_msi(struct kvm_kernel_irq_routing_entry
*e
,
2169 struct kvm
*kvm
, int irq_source_id
,
2170 int level
, bool line_status
)