2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/cpu.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/interrupt.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/uaccess.h>
29 #include <linux/irqchip/arm-gic.h>
31 #include <asm/kvm_emulate.h>
32 #include <asm/kvm_arm.h>
33 #include <asm/kvm_mmu.h>
36 * How the whole thing works (courtesy of Christoffer Dall):
38 * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
39 * something is pending on the CPU interface.
40 * - Interrupts that are pending on the distributor are stored on the
41 * vgic.irq_pending vgic bitmap (this bitmap is updated by both user land
42 * ioctls and guest mmio ops, and other in-kernel peripherals such as the
44 * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
46 * - To calculate the oracle, we need info for each cpu from
47 * compute_pending_for_cpu, which considers:
48 * - PPI: dist->irq_pending & dist->irq_enable
49 * - SPI: dist->irq_pending & dist->irq_enable & dist->irq_spi_target
50 * - irq_spi_target is a 'formatted' version of the GICD_ITARGETSRn
51 * registers, stored on each vcpu. We only keep one bit of
52 * information per interrupt, making sure that only one vcpu can
53 * accept the interrupt.
54 * - If any of the above state changes, we must recalculate the oracle.
55 * - The same is true when injecting an interrupt, except that we only
56 * consider a single interrupt at a time. The irq_spi_cpu array
57 * contains the target CPU for each SPI.
59 * The handling of level interrupts adds some extra complexity. We
60 * need to track when the interrupt has been EOIed, so we can sample
61 * the 'line' again. This is achieved as such:
63 * - When a level interrupt is moved onto a vcpu, the corresponding
64 * bit in irq_queued is set. As long as this bit is set, the line
65 * will be ignored for further interrupts. The interrupt is injected
66 * into the vcpu with the GICH_LR_EOI bit set (generate a
67 * maintenance interrupt on EOI).
68 * - When the interrupt is EOIed, the maintenance interrupt fires,
69 * and clears the corresponding bit in irq_queued. This allows the
70 * interrupt line to be sampled again.
71 * - Note that level-triggered interrupts can also be set to pending from
72 * writes to GICD_ISPENDRn and lowering the external input line does not
73 * cause the interrupt to become inactive in such a situation.
74 * Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become
75 * inactive as long as the external input line is held high.
80 static void vgic_retire_disabled_irqs(struct kvm_vcpu
*vcpu
);
81 static void vgic_retire_lr(int lr_nr
, int irq
, struct kvm_vcpu
*vcpu
);
82 static struct vgic_lr
vgic_get_lr(const struct kvm_vcpu
*vcpu
, int lr
);
83 static void vgic_set_lr(struct kvm_vcpu
*vcpu
, int lr
, struct vgic_lr lr_desc
);
85 static const struct vgic_ops
*vgic_ops
;
86 static const struct vgic_params
*vgic
;
88 static void add_sgi_source(struct kvm_vcpu
*vcpu
, int irq
, int source
)
90 vcpu
->kvm
->arch
.vgic
.vm_ops
.add_sgi_source(vcpu
, irq
, source
);
93 static bool queue_sgi(struct kvm_vcpu
*vcpu
, int irq
)
95 return vcpu
->kvm
->arch
.vgic
.vm_ops
.queue_sgi(vcpu
, irq
);
98 int kvm_vgic_map_resources(struct kvm
*kvm
)
100 return kvm
->arch
.vgic
.vm_ops
.map_resources(kvm
, vgic
);
104 * struct vgic_bitmap contains a bitmap made of unsigned longs, but
105 * extracts u32s out of them.
107 * This does not work on 64-bit BE systems, because the bitmap access
108 * will store two consecutive 32-bit words with the higher-addressed
109 * register's bits at the lower index and the lower-addressed register's
110 * bits at the higher index.
112 * Therefore, swizzle the register index when accessing the 32-bit word
113 * registers to access the right register's value.
115 #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64
116 #define REG_OFFSET_SWIZZLE 1
118 #define REG_OFFSET_SWIZZLE 0
121 static int vgic_init_bitmap(struct vgic_bitmap
*b
, int nr_cpus
, int nr_irqs
)
125 nr_longs
= nr_cpus
+ BITS_TO_LONGS(nr_irqs
- VGIC_NR_PRIVATE_IRQS
);
127 b
->private = kzalloc(sizeof(unsigned long) * nr_longs
, GFP_KERNEL
);
131 b
->shared
= b
->private + nr_cpus
;
136 static void vgic_free_bitmap(struct vgic_bitmap
*b
)
144 * Call this function to convert a u64 value to an unsigned long * bitmask
145 * in a way that works on both 32-bit and 64-bit LE and BE platforms.
147 * Warning: Calling this function may modify *val.
149 static unsigned long *u64_to_bitmask(u64
*val
)
151 #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
152 *val
= (*val
>> 32) | (*val
<< 32);
154 return (unsigned long *)val
;
157 u32
*vgic_bitmap_get_reg(struct vgic_bitmap
*x
, int cpuid
, u32 offset
)
161 return (u32
*)(x
->private + cpuid
) + REG_OFFSET_SWIZZLE
;
163 return (u32
*)(x
->shared
) + ((offset
- 1) ^ REG_OFFSET_SWIZZLE
);
166 static int vgic_bitmap_get_irq_val(struct vgic_bitmap
*x
,
169 if (irq
< VGIC_NR_PRIVATE_IRQS
)
170 return test_bit(irq
, x
->private + cpuid
);
172 return test_bit(irq
- VGIC_NR_PRIVATE_IRQS
, x
->shared
);
175 void vgic_bitmap_set_irq_val(struct vgic_bitmap
*x
, int cpuid
,
180 if (irq
< VGIC_NR_PRIVATE_IRQS
) {
181 reg
= x
->private + cpuid
;
184 irq
-= VGIC_NR_PRIVATE_IRQS
;
193 static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap
*x
, int cpuid
)
195 return x
->private + cpuid
;
198 unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap
*x
)
203 static int vgic_init_bytemap(struct vgic_bytemap
*x
, int nr_cpus
, int nr_irqs
)
207 size
= nr_cpus
* VGIC_NR_PRIVATE_IRQS
;
208 size
+= nr_irqs
- VGIC_NR_PRIVATE_IRQS
;
210 x
->private = kzalloc(size
, GFP_KERNEL
);
214 x
->shared
= x
->private + nr_cpus
* VGIC_NR_PRIVATE_IRQS
/ sizeof(u32
);
218 static void vgic_free_bytemap(struct vgic_bytemap
*b
)
225 u32
*vgic_bytemap_get_reg(struct vgic_bytemap
*x
, int cpuid
, u32 offset
)
229 if (offset
< VGIC_NR_PRIVATE_IRQS
) {
231 offset
+= cpuid
* VGIC_NR_PRIVATE_IRQS
;
234 offset
-= VGIC_NR_PRIVATE_IRQS
;
237 return reg
+ (offset
/ sizeof(u32
));
240 #define VGIC_CFG_LEVEL 0
241 #define VGIC_CFG_EDGE 1
243 static bool vgic_irq_is_edge(struct kvm_vcpu
*vcpu
, int irq
)
245 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
248 irq_val
= vgic_bitmap_get_irq_val(&dist
->irq_cfg
, vcpu
->vcpu_id
, irq
);
249 return irq_val
== VGIC_CFG_EDGE
;
252 static int vgic_irq_is_enabled(struct kvm_vcpu
*vcpu
, int irq
)
254 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
256 return vgic_bitmap_get_irq_val(&dist
->irq_enabled
, vcpu
->vcpu_id
, irq
);
259 static int vgic_irq_is_queued(struct kvm_vcpu
*vcpu
, int irq
)
261 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
263 return vgic_bitmap_get_irq_val(&dist
->irq_queued
, vcpu
->vcpu_id
, irq
);
266 static void vgic_irq_set_queued(struct kvm_vcpu
*vcpu
, int irq
)
268 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
270 vgic_bitmap_set_irq_val(&dist
->irq_queued
, vcpu
->vcpu_id
, irq
, 1);
273 static void vgic_irq_clear_queued(struct kvm_vcpu
*vcpu
, int irq
)
275 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
277 vgic_bitmap_set_irq_val(&dist
->irq_queued
, vcpu
->vcpu_id
, irq
, 0);
280 static int vgic_dist_irq_get_level(struct kvm_vcpu
*vcpu
, int irq
)
282 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
284 return vgic_bitmap_get_irq_val(&dist
->irq_level
, vcpu
->vcpu_id
, irq
);
287 static void vgic_dist_irq_set_level(struct kvm_vcpu
*vcpu
, int irq
)
289 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
291 vgic_bitmap_set_irq_val(&dist
->irq_level
, vcpu
->vcpu_id
, irq
, 1);
294 static void vgic_dist_irq_clear_level(struct kvm_vcpu
*vcpu
, int irq
)
296 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
298 vgic_bitmap_set_irq_val(&dist
->irq_level
, vcpu
->vcpu_id
, irq
, 0);
301 static int vgic_dist_irq_soft_pend(struct kvm_vcpu
*vcpu
, int irq
)
303 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
305 return vgic_bitmap_get_irq_val(&dist
->irq_soft_pend
, vcpu
->vcpu_id
, irq
);
308 static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu
*vcpu
, int irq
)
310 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
312 vgic_bitmap_set_irq_val(&dist
->irq_soft_pend
, vcpu
->vcpu_id
, irq
, 0);
315 static int vgic_dist_irq_is_pending(struct kvm_vcpu
*vcpu
, int irq
)
317 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
319 return vgic_bitmap_get_irq_val(&dist
->irq_pending
, vcpu
->vcpu_id
, irq
);
322 void vgic_dist_irq_set_pending(struct kvm_vcpu
*vcpu
, int irq
)
324 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
326 vgic_bitmap_set_irq_val(&dist
->irq_pending
, vcpu
->vcpu_id
, irq
, 1);
329 void vgic_dist_irq_clear_pending(struct kvm_vcpu
*vcpu
, int irq
)
331 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
333 vgic_bitmap_set_irq_val(&dist
->irq_pending
, vcpu
->vcpu_id
, irq
, 0);
336 static void vgic_cpu_irq_set(struct kvm_vcpu
*vcpu
, int irq
)
338 if (irq
< VGIC_NR_PRIVATE_IRQS
)
339 set_bit(irq
, vcpu
->arch
.vgic_cpu
.pending_percpu
);
341 set_bit(irq
- VGIC_NR_PRIVATE_IRQS
,
342 vcpu
->arch
.vgic_cpu
.pending_shared
);
345 void vgic_cpu_irq_clear(struct kvm_vcpu
*vcpu
, int irq
)
347 if (irq
< VGIC_NR_PRIVATE_IRQS
)
348 clear_bit(irq
, vcpu
->arch
.vgic_cpu
.pending_percpu
);
350 clear_bit(irq
- VGIC_NR_PRIVATE_IRQS
,
351 vcpu
->arch
.vgic_cpu
.pending_shared
);
354 static bool vgic_can_sample_irq(struct kvm_vcpu
*vcpu
, int irq
)
356 return vgic_irq_is_edge(vcpu
, irq
) || !vgic_irq_is_queued(vcpu
, irq
);
360 * vgic_reg_access - access vgic register
361 * @mmio: pointer to the data describing the mmio access
362 * @reg: pointer to the virtual backing of vgic distributor data
363 * @offset: least significant 2 bits used for word offset
364 * @mode: ACCESS_ mode (see defines above)
366 * Helper to make vgic register access easier using one of the access
367 * modes defined for vgic register access
368 * (read,raz,write-ignored,setbit,clearbit,write)
370 void vgic_reg_access(struct kvm_exit_mmio
*mmio
, u32
*reg
,
371 phys_addr_t offset
, int mode
)
373 int word_offset
= (offset
& 3) * 8;
374 u32 mask
= (1UL << (mmio
->len
* 8)) - 1;
378 * Any alignment fault should have been delivered to the guest
379 * directly (ARM ARM B3.12.7 "Prioritization of aborts").
385 BUG_ON(mode
!= (ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
));
389 if (mmio
->is_write
) {
390 u32 data
= mmio_data_read(mmio
, mask
) << word_offset
;
391 switch (ACCESS_WRITE_MASK(mode
)) {
392 case ACCESS_WRITE_IGNORED
:
395 case ACCESS_WRITE_SETBIT
:
399 case ACCESS_WRITE_CLEARBIT
:
403 case ACCESS_WRITE_VALUE
:
404 regval
= (regval
& ~(mask
<< word_offset
)) | data
;
409 switch (ACCESS_READ_MASK(mode
)) {
410 case ACCESS_READ_RAZ
:
414 case ACCESS_READ_VALUE
:
415 mmio_data_write(mmio
, mask
, regval
>> word_offset
);
420 bool handle_mmio_raz_wi(struct kvm_vcpu
*vcpu
, struct kvm_exit_mmio
*mmio
,
423 vgic_reg_access(mmio
, NULL
, offset
,
424 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
428 bool vgic_handle_enable_reg(struct kvm
*kvm
, struct kvm_exit_mmio
*mmio
,
429 phys_addr_t offset
, int vcpu_id
, int access
)
432 int mode
= ACCESS_READ_VALUE
| access
;
433 struct kvm_vcpu
*target_vcpu
= kvm_get_vcpu(kvm
, vcpu_id
);
435 reg
= vgic_bitmap_get_reg(&kvm
->arch
.vgic
.irq_enabled
, vcpu_id
, offset
);
436 vgic_reg_access(mmio
, reg
, offset
, mode
);
437 if (mmio
->is_write
) {
438 if (access
& ACCESS_WRITE_CLEARBIT
) {
439 if (offset
< 4) /* Force SGI enabled */
441 vgic_retire_disabled_irqs(target_vcpu
);
443 vgic_update_state(kvm
);
450 bool vgic_handle_set_pending_reg(struct kvm
*kvm
,
451 struct kvm_exit_mmio
*mmio
,
452 phys_addr_t offset
, int vcpu_id
)
456 int mode
= ACCESS_READ_VALUE
| ACCESS_WRITE_SETBIT
;
457 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
459 reg
= vgic_bitmap_get_reg(&dist
->irq_cfg
, vcpu_id
, offset
);
460 level_mask
= (~(*reg
));
462 /* Mark both level and edge triggered irqs as pending */
463 reg
= vgic_bitmap_get_reg(&dist
->irq_pending
, vcpu_id
, offset
);
465 vgic_reg_access(mmio
, reg
, offset
, mode
);
467 if (mmio
->is_write
) {
468 /* Set the soft-pending flag only for level-triggered irqs */
469 reg
= vgic_bitmap_get_reg(&dist
->irq_soft_pend
,
471 vgic_reg_access(mmio
, reg
, offset
, mode
);
474 /* Ignore writes to SGIs */
477 *reg
|= orig
& 0xffff;
480 vgic_update_state(kvm
);
487 bool vgic_handle_clear_pending_reg(struct kvm
*kvm
,
488 struct kvm_exit_mmio
*mmio
,
489 phys_addr_t offset
, int vcpu_id
)
493 int mode
= ACCESS_READ_VALUE
| ACCESS_WRITE_CLEARBIT
;
494 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
496 reg
= vgic_bitmap_get_reg(&dist
->irq_pending
, vcpu_id
, offset
);
498 vgic_reg_access(mmio
, reg
, offset
, mode
);
499 if (mmio
->is_write
) {
500 /* Re-set level triggered level-active interrupts */
501 level_active
= vgic_bitmap_get_reg(&dist
->irq_level
,
503 reg
= vgic_bitmap_get_reg(&dist
->irq_pending
, vcpu_id
, offset
);
504 *reg
|= *level_active
;
506 /* Ignore writes to SGIs */
509 *reg
|= orig
& 0xffff;
512 /* Clear soft-pending flags */
513 reg
= vgic_bitmap_get_reg(&dist
->irq_soft_pend
,
515 vgic_reg_access(mmio
, reg
, offset
, mode
);
517 vgic_update_state(kvm
);
523 static u32
vgic_cfg_expand(u16 val
)
529 * Turn a 16bit value like abcd...mnop into a 32bit word
530 * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
532 for (i
= 0; i
< 16; i
++)
533 res
|= ((val
>> i
) & VGIC_CFG_EDGE
) << (2 * i
+ 1);
538 static u16
vgic_cfg_compress(u32 val
)
544 * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
545 * abcd...mnop which is what we really care about.
547 for (i
= 0; i
< 16; i
++)
548 res
|= ((val
>> (i
* 2 + 1)) & VGIC_CFG_EDGE
) << i
;
554 * The distributor uses 2 bits per IRQ for the CFG register, but the
555 * LSB is always 0. As such, we only keep the upper bit, and use the
556 * two above functions to compress/expand the bits
558 bool vgic_handle_cfg_reg(u32
*reg
, struct kvm_exit_mmio
*mmio
,
568 val
= vgic_cfg_expand(val
);
569 vgic_reg_access(mmio
, &val
, offset
,
570 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
571 if (mmio
->is_write
) {
573 *reg
= ~0U; /* Force PPIs/SGIs to 1 */
577 val
= vgic_cfg_compress(val
);
582 *reg
&= 0xffff << 16;
591 * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
592 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
594 * Move any pending IRQs that have already been assigned to LRs back to the
595 * emulated distributor state so that the complete emulated state can be read
596 * from the main emulation structures without investigating the LRs.
598 * Note that IRQs in the active state in the LRs get their pending state moved
599 * to the distributor but the active state stays in the LRs, because we don't
600 * track the active state on the distributor side.
602 void vgic_unqueue_irqs(struct kvm_vcpu
*vcpu
)
604 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
607 for_each_set_bit(i
, vgic_cpu
->lr_used
, vgic_cpu
->nr_lr
) {
608 struct vgic_lr lr
= vgic_get_lr(vcpu
, i
);
611 * There are three options for the state bits:
615 * 11: pending and active
617 * If the LR holds only an active interrupt (not pending) then
618 * just leave it alone.
620 if ((lr
.state
& LR_STATE_MASK
) == LR_STATE_ACTIVE
)
624 * Reestablish the pending state on the distributor and the
625 * CPU interface. It may have already been pending, but that
626 * is fine, then we are only setting a few bits that were
629 vgic_dist_irq_set_pending(vcpu
, lr
.irq
);
630 if (lr
.irq
< VGIC_NR_SGIS
)
631 add_sgi_source(vcpu
, lr
.irq
, lr
.source
);
632 lr
.state
&= ~LR_STATE_PENDING
;
633 vgic_set_lr(vcpu
, i
, lr
);
636 * If there's no state left on the LR (it could still be
637 * active), then the LR does not hold any useful info and can
638 * be marked as free for other use.
640 if (!(lr
.state
& LR_STATE_MASK
)) {
641 vgic_retire_lr(i
, lr
.irq
, vcpu
);
642 vgic_irq_clear_queued(vcpu
, lr
.irq
);
645 /* Finally update the VGIC state. */
646 vgic_update_state(vcpu
->kvm
);
651 struct kvm_mmio_range
*vgic_find_range(const struct kvm_mmio_range
*ranges
,
652 struct kvm_exit_mmio
*mmio
,
655 const struct kvm_mmio_range
*r
= ranges
;
658 if (offset
>= r
->base
&&
659 (offset
+ mmio
->len
) <= (r
->base
+ r
->len
))
667 static bool vgic_validate_access(const struct vgic_dist
*dist
,
668 const struct kvm_mmio_range
*range
,
669 unsigned long offset
)
673 if (!range
->bits_per_irq
)
674 return true; /* Not an irq-based access */
676 irq
= offset
* 8 / range
->bits_per_irq
;
677 if (irq
>= dist
->nr_irqs
)
684 * Call the respective handler function for the given range.
685 * We split up any 64 bit accesses into two consecutive 32 bit
686 * handler calls and merge the result afterwards.
687 * We do this in a little endian fashion regardless of the host's
688 * or guest's endianness, because the GIC is always LE and the rest of
689 * the code (vgic_reg_access) also puts it in a LE fashion already.
690 * At this point we have already identified the handle function, so
691 * range points to that one entry and offset is relative to this.
693 static bool call_range_handler(struct kvm_vcpu
*vcpu
,
694 struct kvm_exit_mmio
*mmio
,
695 unsigned long offset
,
696 const struct kvm_mmio_range
*range
)
698 u32
*data32
= (void *)mmio
->data
;
699 struct kvm_exit_mmio mmio32
;
702 if (likely(mmio
->len
<= 4))
703 return range
->handle_mmio(vcpu
, mmio
, offset
);
706 * Any access bigger than 4 bytes (that we currently handle in KVM)
707 * is actually 8 bytes long, caused by a 64-bit access
711 mmio32
.is_write
= mmio
->is_write
;
712 mmio32
.private = mmio
->private;
714 mmio32
.phys_addr
= mmio
->phys_addr
+ 4;
716 *(u32
*)mmio32
.data
= data32
[1];
717 ret
= range
->handle_mmio(vcpu
, &mmio32
, offset
+ 4);
719 data32
[1] = *(u32
*)mmio32
.data
;
721 mmio32
.phys_addr
= mmio
->phys_addr
;
723 *(u32
*)mmio32
.data
= data32
[0];
724 ret
|= range
->handle_mmio(vcpu
, &mmio32
, offset
);
726 data32
[0] = *(u32
*)mmio32
.data
;
732 * vgic_handle_mmio_range - handle an in-kernel MMIO access
733 * @vcpu: pointer to the vcpu performing the access
734 * @run: pointer to the kvm_run structure
735 * @mmio: pointer to the data describing the access
736 * @ranges: array of MMIO ranges in a given region
737 * @mmio_base: base address of that region
739 * returns true if the MMIO access could be performed
741 bool vgic_handle_mmio_range(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
742 struct kvm_exit_mmio
*mmio
,
743 const struct kvm_mmio_range
*ranges
,
744 unsigned long mmio_base
)
746 const struct kvm_mmio_range
*range
;
747 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
749 unsigned long offset
;
751 offset
= mmio
->phys_addr
- mmio_base
;
752 range
= vgic_find_range(ranges
, mmio
, offset
);
753 if (unlikely(!range
|| !range
->handle_mmio
)) {
754 pr_warn("Unhandled access %d %08llx %d\n",
755 mmio
->is_write
, mmio
->phys_addr
, mmio
->len
);
759 spin_lock(&vcpu
->kvm
->arch
.vgic
.lock
);
760 offset
-= range
->base
;
761 if (vgic_validate_access(dist
, range
, offset
)) {
762 updated_state
= call_range_handler(vcpu
, mmio
, offset
, range
);
765 memset(mmio
->data
, 0, mmio
->len
);
766 updated_state
= false;
768 spin_unlock(&vcpu
->kvm
->arch
.vgic
.lock
);
769 kvm_prepare_mmio(run
, mmio
);
770 kvm_handle_mmio_return(vcpu
, run
);
773 vgic_kick_vcpus(vcpu
->kvm
);
779 * vgic_handle_mmio - handle an in-kernel MMIO access for the GIC emulation
780 * @vcpu: pointer to the vcpu performing the access
781 * @run: pointer to the kvm_run structure
782 * @mmio: pointer to the data describing the access
784 * returns true if the MMIO access has been performed in kernel space,
785 * and false if it needs to be emulated in user space.
786 * Calls the actual handling routine for the selected VGIC model.
788 bool vgic_handle_mmio(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
789 struct kvm_exit_mmio
*mmio
)
791 if (!irqchip_in_kernel(vcpu
->kvm
))
795 * This will currently call either vgic_v2_handle_mmio() or
796 * vgic_v3_handle_mmio(), which in turn will call
797 * vgic_handle_mmio_range() defined above.
799 return vcpu
->kvm
->arch
.vgic
.vm_ops
.handle_mmio(vcpu
, run
, mmio
);
802 static int vgic_nr_shared_irqs(struct vgic_dist
*dist
)
804 return dist
->nr_irqs
- VGIC_NR_PRIVATE_IRQS
;
807 static int compute_pending_for_cpu(struct kvm_vcpu
*vcpu
)
809 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
810 unsigned long *pending
, *enabled
, *pend_percpu
, *pend_shared
;
811 unsigned long pending_private
, pending_shared
;
812 int nr_shared
= vgic_nr_shared_irqs(dist
);
815 vcpu_id
= vcpu
->vcpu_id
;
816 pend_percpu
= vcpu
->arch
.vgic_cpu
.pending_percpu
;
817 pend_shared
= vcpu
->arch
.vgic_cpu
.pending_shared
;
819 pending
= vgic_bitmap_get_cpu_map(&dist
->irq_pending
, vcpu_id
);
820 enabled
= vgic_bitmap_get_cpu_map(&dist
->irq_enabled
, vcpu_id
);
821 bitmap_and(pend_percpu
, pending
, enabled
, VGIC_NR_PRIVATE_IRQS
);
823 pending
= vgic_bitmap_get_shared_map(&dist
->irq_pending
);
824 enabled
= vgic_bitmap_get_shared_map(&dist
->irq_enabled
);
825 bitmap_and(pend_shared
, pending
, enabled
, nr_shared
);
826 bitmap_and(pend_shared
, pend_shared
,
827 vgic_bitmap_get_shared_map(&dist
->irq_spi_target
[vcpu_id
]),
830 pending_private
= find_first_bit(pend_percpu
, VGIC_NR_PRIVATE_IRQS
);
831 pending_shared
= find_first_bit(pend_shared
, nr_shared
);
832 return (pending_private
< VGIC_NR_PRIVATE_IRQS
||
833 pending_shared
< vgic_nr_shared_irqs(dist
));
837 * Update the interrupt state and determine which CPUs have pending
838 * interrupts. Must be called with distributor lock held.
840 void vgic_update_state(struct kvm
*kvm
)
842 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
843 struct kvm_vcpu
*vcpu
;
846 if (!dist
->enabled
) {
847 set_bit(0, dist
->irq_pending_on_cpu
);
851 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
852 if (compute_pending_for_cpu(vcpu
)) {
853 pr_debug("CPU%d has pending interrupts\n", c
);
854 set_bit(c
, dist
->irq_pending_on_cpu
);
859 static struct vgic_lr
vgic_get_lr(const struct kvm_vcpu
*vcpu
, int lr
)
861 return vgic_ops
->get_lr(vcpu
, lr
);
864 static void vgic_set_lr(struct kvm_vcpu
*vcpu
, int lr
,
867 vgic_ops
->set_lr(vcpu
, lr
, vlr
);
870 static void vgic_sync_lr_elrsr(struct kvm_vcpu
*vcpu
, int lr
,
873 vgic_ops
->sync_lr_elrsr(vcpu
, lr
, vlr
);
876 static inline u64
vgic_get_elrsr(struct kvm_vcpu
*vcpu
)
878 return vgic_ops
->get_elrsr(vcpu
);
881 static inline u64
vgic_get_eisr(struct kvm_vcpu
*vcpu
)
883 return vgic_ops
->get_eisr(vcpu
);
886 static inline void vgic_clear_eisr(struct kvm_vcpu
*vcpu
)
888 vgic_ops
->clear_eisr(vcpu
);
891 static inline u32
vgic_get_interrupt_status(struct kvm_vcpu
*vcpu
)
893 return vgic_ops
->get_interrupt_status(vcpu
);
896 static inline void vgic_enable_underflow(struct kvm_vcpu
*vcpu
)
898 vgic_ops
->enable_underflow(vcpu
);
901 static inline void vgic_disable_underflow(struct kvm_vcpu
*vcpu
)
903 vgic_ops
->disable_underflow(vcpu
);
906 void vgic_get_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcr
)
908 vgic_ops
->get_vmcr(vcpu
, vmcr
);
911 void vgic_set_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcr
)
913 vgic_ops
->set_vmcr(vcpu
, vmcr
);
916 static inline void vgic_enable(struct kvm_vcpu
*vcpu
)
918 vgic_ops
->enable(vcpu
);
921 static void vgic_retire_lr(int lr_nr
, int irq
, struct kvm_vcpu
*vcpu
)
923 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
924 struct vgic_lr vlr
= vgic_get_lr(vcpu
, lr_nr
);
927 vgic_set_lr(vcpu
, lr_nr
, vlr
);
928 clear_bit(lr_nr
, vgic_cpu
->lr_used
);
929 vgic_cpu
->vgic_irq_lr_map
[irq
] = LR_EMPTY
;
930 vgic_sync_lr_elrsr(vcpu
, lr_nr
, vlr
);
934 * An interrupt may have been disabled after being made pending on the
935 * CPU interface (the classic case is a timer running while we're
936 * rebooting the guest - the interrupt would kick as soon as the CPU
937 * interface gets enabled, with deadly consequences).
939 * The solution is to examine already active LRs, and check the
940 * interrupt is still enabled. If not, just retire it.
942 static void vgic_retire_disabled_irqs(struct kvm_vcpu
*vcpu
)
944 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
947 for_each_set_bit(lr
, vgic_cpu
->lr_used
, vgic
->nr_lr
) {
948 struct vgic_lr vlr
= vgic_get_lr(vcpu
, lr
);
950 if (!vgic_irq_is_enabled(vcpu
, vlr
.irq
)) {
951 vgic_retire_lr(lr
, vlr
.irq
, vcpu
);
952 if (vgic_irq_is_queued(vcpu
, vlr
.irq
))
953 vgic_irq_clear_queued(vcpu
, vlr
.irq
);
959 * Queue an interrupt to a CPU virtual interface. Return true on success,
960 * or false if it wasn't possible to queue it.
961 * sgi_source must be zero for any non-SGI interrupts.
963 bool vgic_queue_irq(struct kvm_vcpu
*vcpu
, u8 sgi_source_id
, int irq
)
965 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
966 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
970 /* Sanitize the input... */
971 BUG_ON(sgi_source_id
& ~7);
972 BUG_ON(sgi_source_id
&& irq
>= VGIC_NR_SGIS
);
973 BUG_ON(irq
>= dist
->nr_irqs
);
975 kvm_debug("Queue IRQ%d\n", irq
);
977 lr
= vgic_cpu
->vgic_irq_lr_map
[irq
];
979 /* Do we have an active interrupt for the same CPUID? */
980 if (lr
!= LR_EMPTY
) {
981 vlr
= vgic_get_lr(vcpu
, lr
);
982 if (vlr
.source
== sgi_source_id
) {
983 kvm_debug("LR%d piggyback for IRQ%d\n", lr
, vlr
.irq
);
984 BUG_ON(!test_bit(lr
, vgic_cpu
->lr_used
));
985 vlr
.state
|= LR_STATE_PENDING
;
986 vgic_set_lr(vcpu
, lr
, vlr
);
987 vgic_sync_lr_elrsr(vcpu
, lr
, vlr
);
992 /* Try to use another LR for this interrupt */
993 lr
= find_first_zero_bit((unsigned long *)vgic_cpu
->lr_used
,
995 if (lr
>= vgic
->nr_lr
)
998 kvm_debug("LR%d allocated for IRQ%d %x\n", lr
, irq
, sgi_source_id
);
999 vgic_cpu
->vgic_irq_lr_map
[irq
] = lr
;
1000 set_bit(lr
, vgic_cpu
->lr_used
);
1003 vlr
.source
= sgi_source_id
;
1004 vlr
.state
= LR_STATE_PENDING
;
1005 if (!vgic_irq_is_edge(vcpu
, irq
))
1006 vlr
.state
|= LR_EOI_INT
;
1008 vgic_set_lr(vcpu
, lr
, vlr
);
1009 vgic_sync_lr_elrsr(vcpu
, lr
, vlr
);
1014 static bool vgic_queue_hwirq(struct kvm_vcpu
*vcpu
, int irq
)
1016 if (!vgic_can_sample_irq(vcpu
, irq
))
1017 return true; /* level interrupt, already queued */
1019 if (vgic_queue_irq(vcpu
, 0, irq
)) {
1020 if (vgic_irq_is_edge(vcpu
, irq
)) {
1021 vgic_dist_irq_clear_pending(vcpu
, irq
);
1022 vgic_cpu_irq_clear(vcpu
, irq
);
1024 vgic_irq_set_queued(vcpu
, irq
);
1034 * Fill the list registers with pending interrupts before running the
1037 static void __kvm_vgic_flush_hwstate(struct kvm_vcpu
*vcpu
)
1039 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1040 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1044 vcpu_id
= vcpu
->vcpu_id
;
1047 * We may not have any pending interrupt, or the interrupts
1048 * may have been serviced from another vcpu. In all cases,
1051 if (!kvm_vgic_vcpu_pending_irq(vcpu
)) {
1052 pr_debug("CPU%d has no pending interrupt\n", vcpu_id
);
1057 for_each_set_bit(i
, vgic_cpu
->pending_percpu
, VGIC_NR_SGIS
) {
1058 if (!queue_sgi(vcpu
, i
))
1063 for_each_set_bit_from(i
, vgic_cpu
->pending_percpu
, VGIC_NR_PRIVATE_IRQS
) {
1064 if (!vgic_queue_hwirq(vcpu
, i
))
1069 for_each_set_bit(i
, vgic_cpu
->pending_shared
, vgic_nr_shared_irqs(dist
)) {
1070 if (!vgic_queue_hwirq(vcpu
, i
+ VGIC_NR_PRIVATE_IRQS
))
1076 vgic_enable_underflow(vcpu
);
1078 vgic_disable_underflow(vcpu
);
1080 * We're about to run this VCPU, and we've consumed
1081 * everything the distributor had in store for
1082 * us. Claim we don't have anything pending. We'll
1083 * adjust that if needed while exiting.
1085 clear_bit(vcpu_id
, dist
->irq_pending_on_cpu
);
1089 static bool vgic_process_maintenance(struct kvm_vcpu
*vcpu
)
1091 u32 status
= vgic_get_interrupt_status(vcpu
);
1092 bool level_pending
= false;
1094 kvm_debug("STATUS = %08x\n", status
);
1096 if (status
& INT_STATUS_EOI
) {
1098 * Some level interrupts have been EOIed. Clear their
1101 u64 eisr
= vgic_get_eisr(vcpu
);
1102 unsigned long *eisr_ptr
= u64_to_bitmask(&eisr
);
1105 for_each_set_bit(lr
, eisr_ptr
, vgic
->nr_lr
) {
1106 struct vgic_lr vlr
= vgic_get_lr(vcpu
, lr
);
1107 WARN_ON(vgic_irq_is_edge(vcpu
, vlr
.irq
));
1109 vgic_irq_clear_queued(vcpu
, vlr
.irq
);
1110 WARN_ON(vlr
.state
& LR_STATE_MASK
);
1112 vgic_set_lr(vcpu
, lr
, vlr
);
1115 * If the IRQ was EOIed it was also ACKed and we we
1116 * therefore assume we can clear the soft pending
1117 * state (should it had been set) for this interrupt.
1119 * Note: if the IRQ soft pending state was set after
1120 * the IRQ was acked, it actually shouldn't be
1121 * cleared, but we have no way of knowing that unless
1122 * we start trapping ACKs when the soft-pending state
1125 vgic_dist_irq_clear_soft_pend(vcpu
, vlr
.irq
);
1127 /* Any additional pending interrupt? */
1128 if (vgic_dist_irq_get_level(vcpu
, vlr
.irq
)) {
1129 vgic_cpu_irq_set(vcpu
, vlr
.irq
);
1130 level_pending
= true;
1132 vgic_dist_irq_clear_pending(vcpu
, vlr
.irq
);
1133 vgic_cpu_irq_clear(vcpu
, vlr
.irq
);
1137 * Despite being EOIed, the LR may not have
1138 * been marked as empty.
1140 vgic_sync_lr_elrsr(vcpu
, lr
, vlr
);
1144 if (status
& INT_STATUS_UNDERFLOW
)
1145 vgic_disable_underflow(vcpu
);
1148 * In the next iterations of the vcpu loop, if we sync the vgic state
1149 * after flushing it, but before entering the guest (this happens for
1150 * pending signals and vmid rollovers), then make sure we don't pick
1151 * up any old maintenance interrupts here.
1153 vgic_clear_eisr(vcpu
);
1155 return level_pending
;
1159 * Sync back the VGIC state after a guest run. The distributor lock is
1160 * needed so we don't get preempted in the middle of the state processing.
1162 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu
*vcpu
)
1164 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1165 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1167 unsigned long *elrsr_ptr
;
1171 level_pending
= vgic_process_maintenance(vcpu
);
1172 elrsr
= vgic_get_elrsr(vcpu
);
1173 elrsr_ptr
= u64_to_bitmask(&elrsr
);
1175 /* Clear mappings for empty LRs */
1176 for_each_set_bit(lr
, elrsr_ptr
, vgic
->nr_lr
) {
1179 if (!test_and_clear_bit(lr
, vgic_cpu
->lr_used
))
1182 vlr
= vgic_get_lr(vcpu
, lr
);
1184 BUG_ON(vlr
.irq
>= dist
->nr_irqs
);
1185 vgic_cpu
->vgic_irq_lr_map
[vlr
.irq
] = LR_EMPTY
;
1188 /* Check if we still have something up our sleeve... */
1189 pending
= find_first_zero_bit(elrsr_ptr
, vgic
->nr_lr
);
1190 if (level_pending
|| pending
< vgic
->nr_lr
)
1191 set_bit(vcpu
->vcpu_id
, dist
->irq_pending_on_cpu
);
1194 void kvm_vgic_flush_hwstate(struct kvm_vcpu
*vcpu
)
1196 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1198 if (!irqchip_in_kernel(vcpu
->kvm
))
1201 spin_lock(&dist
->lock
);
1202 __kvm_vgic_flush_hwstate(vcpu
);
1203 spin_unlock(&dist
->lock
);
1206 void kvm_vgic_sync_hwstate(struct kvm_vcpu
*vcpu
)
1208 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1210 if (!irqchip_in_kernel(vcpu
->kvm
))
1213 spin_lock(&dist
->lock
);
1214 __kvm_vgic_sync_hwstate(vcpu
);
1215 spin_unlock(&dist
->lock
);
1218 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu
*vcpu
)
1220 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1222 if (!irqchip_in_kernel(vcpu
->kvm
))
1225 return test_bit(vcpu
->vcpu_id
, dist
->irq_pending_on_cpu
);
1228 void vgic_kick_vcpus(struct kvm
*kvm
)
1230 struct kvm_vcpu
*vcpu
;
1234 * We've injected an interrupt, time to find out who deserves
1237 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
1238 if (kvm_vgic_vcpu_pending_irq(vcpu
))
1239 kvm_vcpu_kick(vcpu
);
1243 static int vgic_validate_injection(struct kvm_vcpu
*vcpu
, int irq
, int level
)
1245 int edge_triggered
= vgic_irq_is_edge(vcpu
, irq
);
1248 * Only inject an interrupt if:
1249 * - edge triggered and we have a rising edge
1250 * - level triggered and we change level
1252 if (edge_triggered
) {
1253 int state
= vgic_dist_irq_is_pending(vcpu
, irq
);
1254 return level
> state
;
1256 int state
= vgic_dist_irq_get_level(vcpu
, irq
);
1257 return level
!= state
;
1261 static int vgic_update_irq_pending(struct kvm
*kvm
, int cpuid
,
1262 unsigned int irq_num
, bool level
)
1264 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1265 struct kvm_vcpu
*vcpu
;
1266 int edge_triggered
, level_triggered
;
1268 bool ret
= true, can_inject
= true;
1270 spin_lock(&dist
->lock
);
1272 vcpu
= kvm_get_vcpu(kvm
, cpuid
);
1273 edge_triggered
= vgic_irq_is_edge(vcpu
, irq_num
);
1274 level_triggered
= !edge_triggered
;
1276 if (!vgic_validate_injection(vcpu
, irq_num
, level
)) {
1281 if (irq_num
>= VGIC_NR_PRIVATE_IRQS
) {
1282 cpuid
= dist
->irq_spi_cpu
[irq_num
- VGIC_NR_PRIVATE_IRQS
];
1283 if (cpuid
== VCPU_NOT_ALLOCATED
) {
1284 /* Pretend we use CPU0, and prevent injection */
1288 vcpu
= kvm_get_vcpu(kvm
, cpuid
);
1291 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num
, level
, cpuid
);
1294 if (level_triggered
)
1295 vgic_dist_irq_set_level(vcpu
, irq_num
);
1296 vgic_dist_irq_set_pending(vcpu
, irq_num
);
1298 if (level_triggered
) {
1299 vgic_dist_irq_clear_level(vcpu
, irq_num
);
1300 if (!vgic_dist_irq_soft_pend(vcpu
, irq_num
))
1301 vgic_dist_irq_clear_pending(vcpu
, irq_num
);
1308 enabled
= vgic_irq_is_enabled(vcpu
, irq_num
);
1310 if (!enabled
|| !can_inject
) {
1315 if (!vgic_can_sample_irq(vcpu
, irq_num
)) {
1317 * Level interrupt in progress, will be picked up
1325 vgic_cpu_irq_set(vcpu
, irq_num
);
1326 set_bit(cpuid
, dist
->irq_pending_on_cpu
);
1330 spin_unlock(&dist
->lock
);
1332 return ret
? cpuid
: -EINVAL
;
1336 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
1337 * @kvm: The VM structure pointer
1338 * @cpuid: The CPU for PPIs
1339 * @irq_num: The IRQ number that is assigned to the device
1340 * @level: Edge-triggered: true: to trigger the interrupt
1341 * false: to ignore the call
1342 * Level-sensitive true: activates an interrupt
1343 * false: deactivates an interrupt
1345 * The GIC is not concerned with devices being active-LOW or active-HIGH for
1346 * level-sensitive interrupts. You can think of the level parameter as 1
1347 * being HIGH and 0 being LOW and all devices being active-HIGH.
1349 int kvm_vgic_inject_irq(struct kvm
*kvm
, int cpuid
, unsigned int irq_num
,
1355 if (unlikely(!vgic_initialized(kvm
))) {
1357 * We only provide the automatic initialization of the VGIC
1358 * for the legacy case of a GICv2. Any other type must
1359 * be explicitly initialized once setup with the respective
1362 if (kvm
->arch
.vgic
.vgic_model
!= KVM_DEV_TYPE_ARM_VGIC_V2
) {
1366 mutex_lock(&kvm
->lock
);
1367 ret
= vgic_init(kvm
);
1368 mutex_unlock(&kvm
->lock
);
1374 vcpu_id
= vgic_update_irq_pending(kvm
, cpuid
, irq_num
, level
);
1376 /* kick the specified vcpu */
1377 kvm_vcpu_kick(kvm_get_vcpu(kvm
, vcpu_id
));
1384 static irqreturn_t
vgic_maintenance_handler(int irq
, void *data
)
1387 * We cannot rely on the vgic maintenance interrupt to be
1388 * delivered synchronously. This means we can only use it to
1389 * exit the VM, and we perform the handling of EOIed
1390 * interrupts on the exit path (see vgic_process_maintenance).
1395 void kvm_vgic_vcpu_destroy(struct kvm_vcpu
*vcpu
)
1397 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1399 kfree(vgic_cpu
->pending_shared
);
1400 kfree(vgic_cpu
->vgic_irq_lr_map
);
1401 vgic_cpu
->pending_shared
= NULL
;
1402 vgic_cpu
->vgic_irq_lr_map
= NULL
;
1405 static int vgic_vcpu_init_maps(struct kvm_vcpu
*vcpu
, int nr_irqs
)
1407 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1409 int sz
= (nr_irqs
- VGIC_NR_PRIVATE_IRQS
) / 8;
1410 vgic_cpu
->pending_shared
= kzalloc(sz
, GFP_KERNEL
);
1411 vgic_cpu
->vgic_irq_lr_map
= kmalloc(nr_irqs
, GFP_KERNEL
);
1413 if (!vgic_cpu
->pending_shared
|| !vgic_cpu
->vgic_irq_lr_map
) {
1414 kvm_vgic_vcpu_destroy(vcpu
);
1418 memset(vgic_cpu
->vgic_irq_lr_map
, LR_EMPTY
, nr_irqs
);
1421 * Store the number of LRs per vcpu, so we don't have to go
1422 * all the way to the distributor structure to find out. Only
1423 * assembly code should use this one.
1425 vgic_cpu
->nr_lr
= vgic
->nr_lr
;
1431 * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
1433 * The host's GIC naturally limits the maximum amount of VCPUs a guest
1436 int kvm_vgic_get_max_vcpus(void)
1438 return vgic
->max_gic_vcpus
;
1441 void kvm_vgic_destroy(struct kvm
*kvm
)
1443 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1444 struct kvm_vcpu
*vcpu
;
1447 kvm_for_each_vcpu(i
, vcpu
, kvm
)
1448 kvm_vgic_vcpu_destroy(vcpu
);
1450 vgic_free_bitmap(&dist
->irq_enabled
);
1451 vgic_free_bitmap(&dist
->irq_level
);
1452 vgic_free_bitmap(&dist
->irq_pending
);
1453 vgic_free_bitmap(&dist
->irq_soft_pend
);
1454 vgic_free_bitmap(&dist
->irq_queued
);
1455 vgic_free_bitmap(&dist
->irq_cfg
);
1456 vgic_free_bytemap(&dist
->irq_priority
);
1457 if (dist
->irq_spi_target
) {
1458 for (i
= 0; i
< dist
->nr_cpus
; i
++)
1459 vgic_free_bitmap(&dist
->irq_spi_target
[i
]);
1461 kfree(dist
->irq_sgi_sources
);
1462 kfree(dist
->irq_spi_cpu
);
1463 kfree(dist
->irq_spi_mpidr
);
1464 kfree(dist
->irq_spi_target
);
1465 kfree(dist
->irq_pending_on_cpu
);
1466 dist
->irq_sgi_sources
= NULL
;
1467 dist
->irq_spi_cpu
= NULL
;
1468 dist
->irq_spi_target
= NULL
;
1469 dist
->irq_pending_on_cpu
= NULL
;
1474 * Allocate and initialize the various data structures. Must be called
1475 * with kvm->lock held!
1477 int vgic_init(struct kvm
*kvm
)
1479 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1480 struct kvm_vcpu
*vcpu
;
1481 int nr_cpus
, nr_irqs
;
1482 int ret
, i
, vcpu_id
;
1484 if (vgic_initialized(kvm
))
1487 nr_cpus
= dist
->nr_cpus
= atomic_read(&kvm
->online_vcpus
);
1488 if (!nr_cpus
) /* No vcpus? Can't be good... */
1492 * If nobody configured the number of interrupts, use the
1496 dist
->nr_irqs
= VGIC_NR_IRQS_LEGACY
;
1498 nr_irqs
= dist
->nr_irqs
;
1500 ret
= vgic_init_bitmap(&dist
->irq_enabled
, nr_cpus
, nr_irqs
);
1501 ret
|= vgic_init_bitmap(&dist
->irq_level
, nr_cpus
, nr_irqs
);
1502 ret
|= vgic_init_bitmap(&dist
->irq_pending
, nr_cpus
, nr_irqs
);
1503 ret
|= vgic_init_bitmap(&dist
->irq_soft_pend
, nr_cpus
, nr_irqs
);
1504 ret
|= vgic_init_bitmap(&dist
->irq_queued
, nr_cpus
, nr_irqs
);
1505 ret
|= vgic_init_bitmap(&dist
->irq_cfg
, nr_cpus
, nr_irqs
);
1506 ret
|= vgic_init_bytemap(&dist
->irq_priority
, nr_cpus
, nr_irqs
);
1511 dist
->irq_sgi_sources
= kzalloc(nr_cpus
* VGIC_NR_SGIS
, GFP_KERNEL
);
1512 dist
->irq_spi_cpu
= kzalloc(nr_irqs
- VGIC_NR_PRIVATE_IRQS
, GFP_KERNEL
);
1513 dist
->irq_spi_target
= kzalloc(sizeof(*dist
->irq_spi_target
) * nr_cpus
,
1515 dist
->irq_pending_on_cpu
= kzalloc(BITS_TO_LONGS(nr_cpus
) * sizeof(long),
1517 if (!dist
->irq_sgi_sources
||
1518 !dist
->irq_spi_cpu
||
1519 !dist
->irq_spi_target
||
1520 !dist
->irq_pending_on_cpu
) {
1525 for (i
= 0; i
< nr_cpus
; i
++)
1526 ret
|= vgic_init_bitmap(&dist
->irq_spi_target
[i
],
1532 ret
= kvm
->arch
.vgic
.vm_ops
.init_model(kvm
);
1536 kvm_for_each_vcpu(vcpu_id
, vcpu
, kvm
) {
1537 ret
= vgic_vcpu_init_maps(vcpu
, nr_irqs
);
1539 kvm_err("VGIC: Failed to allocate vcpu memory\n");
1543 for (i
= 0; i
< dist
->nr_irqs
; i
++) {
1544 if (i
< VGIC_NR_PPIS
)
1545 vgic_bitmap_set_irq_val(&dist
->irq_enabled
,
1546 vcpu
->vcpu_id
, i
, 1);
1547 if (i
< VGIC_NR_PRIVATE_IRQS
)
1548 vgic_bitmap_set_irq_val(&dist
->irq_cfg
,
1558 kvm_vgic_destroy(kvm
);
1563 static int init_vgic_model(struct kvm
*kvm
, int type
)
1566 case KVM_DEV_TYPE_ARM_VGIC_V2
:
1567 vgic_v2_init_emulation(kvm
);
1569 #ifdef CONFIG_ARM_GIC_V3
1570 case KVM_DEV_TYPE_ARM_VGIC_V3
:
1571 vgic_v3_init_emulation(kvm
);
1578 if (atomic_read(&kvm
->online_vcpus
) > kvm
->arch
.max_vcpus
)
1584 int kvm_vgic_create(struct kvm
*kvm
, u32 type
)
1586 int i
, vcpu_lock_idx
= -1, ret
;
1587 struct kvm_vcpu
*vcpu
;
1589 mutex_lock(&kvm
->lock
);
1591 if (irqchip_in_kernel(kvm
)) {
1597 * This function is also called by the KVM_CREATE_IRQCHIP handler,
1598 * which had no chance yet to check the availability of the GICv2
1599 * emulation. So check this here again. KVM_CREATE_DEVICE does
1600 * the proper checks already.
1602 if (type
== KVM_DEV_TYPE_ARM_VGIC_V2
&& !vgic
->can_emulate_gicv2
) {
1608 * Any time a vcpu is run, vcpu_load is called which tries to grab the
1609 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
1610 * that no other VCPUs are run while we create the vgic.
1613 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1614 if (!mutex_trylock(&vcpu
->mutex
))
1619 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1620 if (vcpu
->arch
.has_run_once
)
1625 ret
= init_vgic_model(kvm
, type
);
1629 spin_lock_init(&kvm
->arch
.vgic
.lock
);
1630 kvm
->arch
.vgic
.in_kernel
= true;
1631 kvm
->arch
.vgic
.vgic_model
= type
;
1632 kvm
->arch
.vgic
.vctrl_base
= vgic
->vctrl_base
;
1633 kvm
->arch
.vgic
.vgic_dist_base
= VGIC_ADDR_UNDEF
;
1634 kvm
->arch
.vgic
.vgic_cpu_base
= VGIC_ADDR_UNDEF
;
1635 kvm
->arch
.vgic
.vgic_redist_base
= VGIC_ADDR_UNDEF
;
1638 for (; vcpu_lock_idx
>= 0; vcpu_lock_idx
--) {
1639 vcpu
= kvm_get_vcpu(kvm
, vcpu_lock_idx
);
1640 mutex_unlock(&vcpu
->mutex
);
1644 mutex_unlock(&kvm
->lock
);
1648 static int vgic_ioaddr_overlap(struct kvm
*kvm
)
1650 phys_addr_t dist
= kvm
->arch
.vgic
.vgic_dist_base
;
1651 phys_addr_t cpu
= kvm
->arch
.vgic
.vgic_cpu_base
;
1653 if (IS_VGIC_ADDR_UNDEF(dist
) || IS_VGIC_ADDR_UNDEF(cpu
))
1655 if ((dist
<= cpu
&& dist
+ KVM_VGIC_V2_DIST_SIZE
> cpu
) ||
1656 (cpu
<= dist
&& cpu
+ KVM_VGIC_V2_CPU_SIZE
> dist
))
1661 static int vgic_ioaddr_assign(struct kvm
*kvm
, phys_addr_t
*ioaddr
,
1662 phys_addr_t addr
, phys_addr_t size
)
1666 if (addr
& ~KVM_PHYS_MASK
)
1669 if (addr
& (SZ_4K
- 1))
1672 if (!IS_VGIC_ADDR_UNDEF(*ioaddr
))
1674 if (addr
+ size
< addr
)
1678 ret
= vgic_ioaddr_overlap(kvm
);
1680 *ioaddr
= VGIC_ADDR_UNDEF
;
1686 * kvm_vgic_addr - set or get vgic VM base addresses
1687 * @kvm: pointer to the vm struct
1688 * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
1689 * @addr: pointer to address value
1690 * @write: if true set the address in the VM address space, if false read the
1693 * Set or get the vgic base addresses for the distributor and the virtual CPU
1694 * interface in the VM physical address space. These addresses are properties
1695 * of the emulated core/SoC and therefore user space initially knows this
1698 int kvm_vgic_addr(struct kvm
*kvm
, unsigned long type
, u64
*addr
, bool write
)
1701 struct vgic_dist
*vgic
= &kvm
->arch
.vgic
;
1703 phys_addr_t
*addr_ptr
, block_size
;
1704 phys_addr_t alignment
;
1706 mutex_lock(&kvm
->lock
);
1708 case KVM_VGIC_V2_ADDR_TYPE_DIST
:
1709 type_needed
= KVM_DEV_TYPE_ARM_VGIC_V2
;
1710 addr_ptr
= &vgic
->vgic_dist_base
;
1711 block_size
= KVM_VGIC_V2_DIST_SIZE
;
1714 case KVM_VGIC_V2_ADDR_TYPE_CPU
:
1715 type_needed
= KVM_DEV_TYPE_ARM_VGIC_V2
;
1716 addr_ptr
= &vgic
->vgic_cpu_base
;
1717 block_size
= KVM_VGIC_V2_CPU_SIZE
;
1720 #ifdef CONFIG_ARM_GIC_V3
1721 case KVM_VGIC_V3_ADDR_TYPE_DIST
:
1722 type_needed
= KVM_DEV_TYPE_ARM_VGIC_V3
;
1723 addr_ptr
= &vgic
->vgic_dist_base
;
1724 block_size
= KVM_VGIC_V3_DIST_SIZE
;
1727 case KVM_VGIC_V3_ADDR_TYPE_REDIST
:
1728 type_needed
= KVM_DEV_TYPE_ARM_VGIC_V3
;
1729 addr_ptr
= &vgic
->vgic_redist_base
;
1730 block_size
= KVM_VGIC_V3_REDIST_SIZE
;
1739 if (vgic
->vgic_model
!= type_needed
) {
1745 if (!IS_ALIGNED(*addr
, alignment
))
1748 r
= vgic_ioaddr_assign(kvm
, addr_ptr
, *addr
,
1755 mutex_unlock(&kvm
->lock
);
1759 int vgic_set_common_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1763 switch (attr
->group
) {
1764 case KVM_DEV_ARM_VGIC_GRP_ADDR
: {
1765 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
1767 unsigned long type
= (unsigned long)attr
->attr
;
1769 if (copy_from_user(&addr
, uaddr
, sizeof(addr
)))
1772 r
= kvm_vgic_addr(dev
->kvm
, type
, &addr
, true);
1773 return (r
== -ENODEV
) ? -ENXIO
: r
;
1775 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS
: {
1776 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
1780 if (get_user(val
, uaddr
))
1785 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
1786 * - at most 1024 interrupts
1787 * - a multiple of 32 interrupts
1789 if (val
< (VGIC_NR_PRIVATE_IRQS
+ 32) ||
1790 val
> VGIC_MAX_IRQS
||
1794 mutex_lock(&dev
->kvm
->lock
);
1796 if (vgic_ready(dev
->kvm
) || dev
->kvm
->arch
.vgic
.nr_irqs
)
1799 dev
->kvm
->arch
.vgic
.nr_irqs
= val
;
1801 mutex_unlock(&dev
->kvm
->lock
);
1805 case KVM_DEV_ARM_VGIC_GRP_CTRL
: {
1806 switch (attr
->attr
) {
1807 case KVM_DEV_ARM_VGIC_CTRL_INIT
:
1808 r
= vgic_init(dev
->kvm
);
1818 int vgic_get_common_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1822 switch (attr
->group
) {
1823 case KVM_DEV_ARM_VGIC_GRP_ADDR
: {
1824 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
1826 unsigned long type
= (unsigned long)attr
->attr
;
1828 r
= kvm_vgic_addr(dev
->kvm
, type
, &addr
, false);
1830 return (r
== -ENODEV
) ? -ENXIO
: r
;
1832 if (copy_to_user(uaddr
, &addr
, sizeof(addr
)))
1836 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS
: {
1837 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
1839 r
= put_user(dev
->kvm
->arch
.vgic
.nr_irqs
, uaddr
);
1848 int vgic_has_attr_regs(const struct kvm_mmio_range
*ranges
, phys_addr_t offset
)
1850 struct kvm_exit_mmio dev_attr_mmio
;
1852 dev_attr_mmio
.len
= 4;
1853 if (vgic_find_range(ranges
, &dev_attr_mmio
, offset
))
1859 static void vgic_init_maintenance_interrupt(void *info
)
1861 enable_percpu_irq(vgic
->maint_irq
, 0);
1864 static int vgic_cpu_notify(struct notifier_block
*self
,
1865 unsigned long action
, void *cpu
)
1869 case CPU_STARTING_FROZEN
:
1870 vgic_init_maintenance_interrupt(NULL
);
1873 case CPU_DYING_FROZEN
:
1874 disable_percpu_irq(vgic
->maint_irq
);
1881 static struct notifier_block vgic_cpu_nb
= {
1882 .notifier_call
= vgic_cpu_notify
,
1885 static const struct of_device_id vgic_ids
[] = {
1886 { .compatible
= "arm,cortex-a15-gic", .data
= vgic_v2_probe
, },
1887 { .compatible
= "arm,gic-v3", .data
= vgic_v3_probe
, },
1891 int kvm_vgic_hyp_init(void)
1893 const struct of_device_id
*matched_id
;
1894 const int (*vgic_probe
)(struct device_node
*,const struct vgic_ops
**,
1895 const struct vgic_params
**);
1896 struct device_node
*vgic_node
;
1899 vgic_node
= of_find_matching_node_and_match(NULL
,
1900 vgic_ids
, &matched_id
);
1902 kvm_err("error: no compatible GIC node found\n");
1906 vgic_probe
= matched_id
->data
;
1907 ret
= vgic_probe(vgic_node
, &vgic_ops
, &vgic
);
1911 ret
= request_percpu_irq(vgic
->maint_irq
, vgic_maintenance_handler
,
1912 "vgic", kvm_get_running_vcpus());
1914 kvm_err("Cannot register interrupt %d\n", vgic
->maint_irq
);
1918 ret
= __register_cpu_notifier(&vgic_cpu_nb
);
1920 kvm_err("Cannot register vgic CPU notifier\n");
1924 /* Callback into for arch code for setup */
1925 vgic_arch_setup(vgic
);
1927 on_each_cpu(vgic_init_maintenance_interrupt
, NULL
, 1);
1932 free_percpu_irq(vgic
->maint_irq
, kvm_get_running_vcpus());