1 // SPDX-License-Identifier: GPL-2.0
3 * vgic_irq.c - Test userspace injection of IRQs
5 * This test validates the injection of IRQs from userspace using various
6 * methods (e.g., KVM_IRQ_LINE) and modes (e.g., EOI). The guest "asks" the
7 * host to inject a specific intid via a GUEST_SYNC call, and then checks that
11 #include <asm/kvm_para.h>
12 #include <sys/eventfd.h>
13 #include <linux/sizes.h>
15 #include "processor.h"
16 #include "test_util.h"
23 * Stores the user specified args; it's passed to the guest and to every test
27 uint32_t nr_irqs
; /* number of KVM supported IRQs. */
28 bool eoi_split
; /* 1 is eoir+dir, 0 is eoir only */
29 bool level_sensitive
; /* 1 is level, 0 is edge */
30 int kvm_max_routes
; /* output of KVM_CAP_IRQ_ROUTING */
31 bool kvm_supports_irqfd
; /* output of KVM_CAP_IRQFD */
35 * KVM implements 32 priority levels:
36 * 0x00 (highest priority) - 0xF8 (lowest priority), in steps of 8
38 * Note that these macros will still be correct in the case that KVM implements
39 * more priority levels. Also note that 32 is the minimum for GICv3 and GICv2.
41 #define KVM_NUM_PRIOS 32
42 #define KVM_PRIO_SHIFT 3 /* steps of 8 = 1 << 3 */
43 #define KVM_PRIO_STEPS (1 << KVM_PRIO_SHIFT) /* 8 */
44 #define LOWEST_PRIO (KVM_NUM_PRIOS - 1)
45 #define CPU_PRIO_MASK (LOWEST_PRIO << KVM_PRIO_SHIFT) /* 0xf8 */
46 #define IRQ_DEFAULT_PRIO (LOWEST_PRIO - 1)
47 #define IRQ_DEFAULT_PRIO_REG (IRQ_DEFAULT_PRIO << KVM_PRIO_SHIFT) /* 0xf0 */
50 * The kvm_inject_* utilities are used by the guest to ask the host to inject
51 * interrupts (e.g., using the KVM_IRQ_LINE ioctl).
55 KVM_INJECT_EDGE_IRQ_LINE
= 1,
57 KVM_SET_IRQ_LINE_HIGH
,
58 KVM_SET_LEVEL_INFO_HIGH
,
64 struct kvm_inject_args
{
72 /* Used on the guest side to perform the hypercall. */
73 static void kvm_inject_call(kvm_inject_cmd cmd
, uint32_t first_intid
,
74 uint32_t num
, int level
, bool expect_failure
);
76 /* Used on the host side to get the hypercall info. */
77 static void kvm_inject_get_call(struct kvm_vm
*vm
, struct ucall
*uc
,
78 struct kvm_inject_args
*args
);
80 #define _KVM_INJECT_MULTI(cmd, intid, num, expect_failure) \
81 kvm_inject_call(cmd, intid, num, -1 /* not used */, expect_failure)
83 #define KVM_INJECT_MULTI(cmd, intid, num) \
84 _KVM_INJECT_MULTI(cmd, intid, num, false)
86 #define _KVM_INJECT(cmd, intid, expect_failure) \
87 _KVM_INJECT_MULTI(cmd, intid, 1, expect_failure)
89 #define KVM_INJECT(cmd, intid) \
90 _KVM_INJECT_MULTI(cmd, intid, 1, false)
92 #define KVM_ACTIVATE(cmd, intid) \
93 kvm_inject_call(cmd, intid, 1, 1, false);
95 struct kvm_inject_desc
{
97 /* can inject PPIs, PPIs, and/or SPIs. */
101 static struct kvm_inject_desc inject_edge_fns
[] = {
103 { KVM_INJECT_EDGE_IRQ_LINE
, false, false, true },
104 { KVM_INJECT_IRQFD
, false, false, true },
105 { KVM_WRITE_ISPENDR
, true, false, true },
109 static struct kvm_inject_desc inject_level_fns
[] = {
111 { KVM_SET_IRQ_LINE_HIGH
, false, true, true },
112 { KVM_SET_LEVEL_INFO_HIGH
, false, true, true },
113 { KVM_INJECT_IRQFD
, false, false, true },
114 { KVM_WRITE_ISPENDR
, false, true, true },
118 static struct kvm_inject_desc set_active_fns
[] = {
120 { KVM_WRITE_ISACTIVER
, true, true, true },
124 #define for_each_inject_fn(t, f) \
125 for ((f) = (t); (f)->cmd; (f)++)
127 #define for_each_supported_inject_fn(args, t, f) \
128 for_each_inject_fn(t, f) \
129 if ((args)->kvm_supports_irqfd || (f)->cmd != KVM_INJECT_IRQFD)
131 #define for_each_supported_activate_fn(args, t, f) \
132 for_each_supported_inject_fn((args), (t), (f))
134 /* Shared between the guest main thread and the IRQ handlers. */
135 volatile uint64_t irq_handled
;
136 volatile uint32_t irqnr_received
[MAX_SPI
+ 1];
138 static void reset_stats(void)
143 for (i
= 0; i
<= MAX_SPI
; i
++)
144 irqnr_received
[i
] = 0;
147 static uint64_t gic_read_ap1r0(void)
149 uint64_t reg
= read_sysreg_s(SYS_ICC_AP1R0_EL1
);
155 static void gic_write_ap1r0(uint64_t val
)
157 write_sysreg_s(val
, SYS_ICC_AP1R0_EL1
);
161 static void guest_set_irq_line(uint32_t intid
, uint32_t level
);
163 static void guest_irq_generic_handler(bool eoi_split
, bool level_sensitive
)
165 uint32_t intid
= gic_get_and_ack_irq();
167 if (intid
== IAR_SPURIOUS
)
170 GUEST_ASSERT(gic_irq_get_active(intid
));
172 if (!level_sensitive
)
173 GUEST_ASSERT(!gic_irq_get_pending(intid
));
176 guest_set_irq_line(intid
, 0);
178 GUEST_ASSERT(intid
< MAX_SPI
);
179 irqnr_received
[intid
] += 1;
183 GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
187 GUEST_ASSERT(!gic_irq_get_active(intid
));
188 GUEST_ASSERT(!gic_irq_get_pending(intid
));
191 static void kvm_inject_call(kvm_inject_cmd cmd
, uint32_t first_intid
,
192 uint32_t num
, int level
, bool expect_failure
)
194 struct kvm_inject_args args
= {
196 .first_intid
= first_intid
,
199 .expect_failure
= expect_failure
,
204 #define GUEST_ASSERT_IAR_EMPTY() \
207 _intid = gic_get_and_ack_irq(); \
208 GUEST_ASSERT(_intid == 0 || _intid == IAR_SPURIOUS); \
211 #define CAT_HELPER(a, b) a ## b
212 #define CAT(a, b) CAT_HELPER(a, b)
213 #define PREFIX guest_irq_handler_
214 #define GUEST_IRQ_HANDLER_NAME(split, lev) CAT(PREFIX, CAT(split, lev))
215 #define GENERATE_GUEST_IRQ_HANDLER(split, lev) \
216 static void CAT(PREFIX, CAT(split, lev))(struct ex_regs *regs) \
218 guest_irq_generic_handler(split, lev); \
221 GENERATE_GUEST_IRQ_HANDLER(0, 0);
222 GENERATE_GUEST_IRQ_HANDLER(0, 1);
223 GENERATE_GUEST_IRQ_HANDLER(1, 0);
224 GENERATE_GUEST_IRQ_HANDLER(1, 1);
226 static void (*guest_irq_handlers
[2][2])(struct ex_regs
*) = {
227 {GUEST_IRQ_HANDLER_NAME(0, 0), GUEST_IRQ_HANDLER_NAME(0, 1),},
228 {GUEST_IRQ_HANDLER_NAME(1, 0), GUEST_IRQ_HANDLER_NAME(1, 1),},
231 static void reset_priorities(struct test_args
*args
)
235 for (i
= 0; i
< args
->nr_irqs
; i
++)
236 gic_set_priority(i
, IRQ_DEFAULT_PRIO_REG
);
239 static void guest_set_irq_line(uint32_t intid
, uint32_t level
)
241 kvm_inject_call(KVM_SET_IRQ_LINE
, intid
, 1, level
, false);
244 static void test_inject_fail(struct test_args
*args
,
245 uint32_t intid
, kvm_inject_cmd cmd
)
249 _KVM_INJECT(cmd
, intid
, true);
250 /* no IRQ to handle on entry */
252 GUEST_ASSERT_EQ(irq_handled
, 0);
253 GUEST_ASSERT_IAR_EMPTY();
256 static void guest_inject(struct test_args
*args
,
257 uint32_t first_intid
, uint32_t num
,
264 /* Cycle over all priorities to make things more interesting. */
265 for (i
= first_intid
; i
< num
+ first_intid
; i
++)
266 gic_set_priority(i
, (i
% (KVM_NUM_PRIOS
- 1)) << 3);
268 asm volatile("msr daifset, #2" : : : "memory");
269 KVM_INJECT_MULTI(cmd
, first_intid
, num
);
271 while (irq_handled
< num
) {
274 isb(); /* handle IRQ */
279 GUEST_ASSERT_EQ(irq_handled
, num
);
280 for (i
= first_intid
; i
< num
+ first_intid
; i
++)
281 GUEST_ASSERT_EQ(irqnr_received
[i
], 1);
282 GUEST_ASSERT_IAR_EMPTY();
284 reset_priorities(args
);
288 * Restore the active state of multiple concurrent IRQs (given by
289 * concurrent_irqs). This does what a live-migration would do on the
290 * destination side assuming there are some active IRQs that were not
293 static void guest_restore_active(struct test_args
*args
,
294 uint32_t first_intid
, uint32_t num
,
297 uint32_t prio
, intid
, ap1r
;
301 * Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
302 * in descending order, so intid+1 can preempt intid.
304 for (i
= 0, prio
= (num
- 1) * 8; i
< num
; i
++, prio
-= 8) {
305 GUEST_ASSERT(prio
>= 0);
306 intid
= i
+ first_intid
;
307 gic_set_priority(intid
, prio
);
311 * In a real migration, KVM would restore all GIC state before running
314 for (i
= 0; i
< num
; i
++) {
315 intid
= i
+ first_intid
;
316 KVM_ACTIVATE(cmd
, intid
);
317 ap1r
= gic_read_ap1r0();
319 gic_write_ap1r0(ap1r
);
322 /* This is where the "migration" would occur. */
324 /* finish handling the IRQs starting with the highest priority one. */
325 for (i
= 0; i
< num
; i
++) {
326 intid
= num
- i
- 1 + first_intid
;
332 for (i
= 0; i
< num
; i
++)
333 GUEST_ASSERT(!gic_irq_get_active(i
+ first_intid
));
334 GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
335 GUEST_ASSERT_IAR_EMPTY();
339 * Polls the IAR until it's not a spurious interrupt.
341 * This function should only be used in test_inject_preemption (with IRQs
344 static uint32_t wait_for_and_activate_irq(void)
349 asm volatile("wfi" : : : "memory");
350 intid
= gic_get_and_ack_irq();
351 } while (intid
== IAR_SPURIOUS
);
357 * Inject multiple concurrent IRQs (num IRQs starting at first_intid) and
358 * handle them without handling the actual exceptions. This is done by masking
359 * interrupts for the whole test.
361 static void test_inject_preemption(struct test_args
*args
,
362 uint32_t first_intid
, int num
,
365 uint32_t intid
, prio
, step
= KVM_PRIO_STEPS
;
368 /* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
369 * in descending order, so intid+1 can preempt intid.
371 for (i
= 0, prio
= (num
- 1) * step
; i
< num
; i
++, prio
-= step
) {
372 GUEST_ASSERT(prio
>= 0);
373 intid
= i
+ first_intid
;
374 gic_set_priority(intid
, prio
);
379 for (i
= 0; i
< num
; i
++) {
381 intid
= i
+ first_intid
;
382 KVM_INJECT(cmd
, intid
);
383 /* Each successive IRQ will preempt the previous one. */
384 tmp
= wait_for_and_activate_irq();
385 GUEST_ASSERT_EQ(tmp
, intid
);
386 if (args
->level_sensitive
)
387 guest_set_irq_line(intid
, 0);
390 /* finish handling the IRQs starting with the highest priority one. */
391 for (i
= 0; i
< num
; i
++) {
392 intid
= num
- i
- 1 + first_intid
;
400 for (i
= 0; i
< num
; i
++)
401 GUEST_ASSERT(!gic_irq_get_active(i
+ first_intid
));
402 GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
403 GUEST_ASSERT_IAR_EMPTY();
405 reset_priorities(args
);
408 static void test_injection(struct test_args
*args
, struct kvm_inject_desc
*f
)
410 uint32_t nr_irqs
= args
->nr_irqs
;
413 guest_inject(args
, MIN_SGI
, 1, f
->cmd
);
414 guest_inject(args
, 0, 16, f
->cmd
);
418 guest_inject(args
, MIN_PPI
, 1, f
->cmd
);
421 guest_inject(args
, MIN_SPI
, 1, f
->cmd
);
422 guest_inject(args
, nr_irqs
- 1, 1, f
->cmd
);
423 guest_inject(args
, MIN_SPI
, nr_irqs
- MIN_SPI
, f
->cmd
);
427 static void test_injection_failure(struct test_args
*args
,
428 struct kvm_inject_desc
*f
)
430 uint32_t bad_intid
[] = { args
->nr_irqs
, 1020, 1024, 1120, 5120, ~0U, };
433 for (i
= 0; i
< ARRAY_SIZE(bad_intid
); i
++)
434 test_inject_fail(args
, bad_intid
[i
], f
->cmd
);
437 static void test_preemption(struct test_args
*args
, struct kvm_inject_desc
*f
)
440 * Test up to 4 levels of preemption. The reason is that KVM doesn't
441 * currently implement the ability to have more than the number-of-LRs
442 * number of concurrently active IRQs. The number of LRs implemented is
443 * IMPLEMENTATION DEFINED, however, it seems that most implement 4.
446 test_inject_preemption(args
, MIN_SGI
, 4, f
->cmd
);
449 test_inject_preemption(args
, MIN_PPI
, 4, f
->cmd
);
452 test_inject_preemption(args
, MIN_SPI
, 4, f
->cmd
);
455 static void test_restore_active(struct test_args
*args
, struct kvm_inject_desc
*f
)
457 /* Test up to 4 active IRQs. Same reason as in test_preemption. */
459 guest_restore_active(args
, MIN_SGI
, 4, f
->cmd
);
462 guest_restore_active(args
, MIN_PPI
, 4, f
->cmd
);
465 guest_restore_active(args
, MIN_SPI
, 4, f
->cmd
);
468 static void guest_code(struct test_args
*args
)
470 uint32_t i
, nr_irqs
= args
->nr_irqs
;
471 bool level_sensitive
= args
->level_sensitive
;
472 struct kvm_inject_desc
*f
, *inject_fns
;
476 for (i
= 0; i
< nr_irqs
; i
++)
479 for (i
= MIN_SPI
; i
< nr_irqs
; i
++)
480 gic_irq_set_config(i
, !level_sensitive
);
482 gic_set_eoi_split(args
->eoi_split
);
484 reset_priorities(args
);
485 gic_set_priority_mask(CPU_PRIO_MASK
);
487 inject_fns
= level_sensitive
? inject_level_fns
492 /* Start the tests. */
493 for_each_supported_inject_fn(args
, inject_fns
, f
) {
494 test_injection(args
, f
);
495 test_preemption(args
, f
);
496 test_injection_failure(args
, f
);
500 * Restore the active state of IRQs. This would happen when live
501 * migrating IRQs in the middle of being handled.
503 for_each_supported_activate_fn(args
, set_active_fns
, f
)
504 test_restore_active(args
, f
);
509 static void kvm_irq_line_check(struct kvm_vm
*vm
, uint32_t intid
, int level
,
510 struct test_args
*test_args
, bool expect_failure
)
514 if (!expect_failure
) {
515 kvm_arm_irq_line(vm
, intid
, level
);
517 /* The interface doesn't allow larger intid's. */
518 if (intid
> KVM_ARM_IRQ_NUM_MASK
)
521 ret
= _kvm_arm_irq_line(vm
, intid
, level
);
522 TEST_ASSERT(ret
!= 0 && errno
== EINVAL
,
523 "Bad intid %i did not cause KVM_IRQ_LINE "
524 "error: rc: %i errno: %i", intid
, ret
, errno
);
528 void kvm_irq_set_level_info_check(int gic_fd
, uint32_t intid
, int level
,
531 if (!expect_failure
) {
532 kvm_irq_set_level_info(gic_fd
, intid
, level
);
534 int ret
= _kvm_irq_set_level_info(gic_fd
, intid
, level
);
536 * The kernel silently fails for invalid SPIs and SGIs (which
537 * are not level-sensitive). It only checks for intid to not
538 * spill over 1U << 10 (the max reserved SPI). Also, callers
539 * are supposed to mask the intid with 0x3ff (1023).
541 if (intid
> VGIC_MAX_RESERVED
)
542 TEST_ASSERT(ret
!= 0 && errno
== EINVAL
,
543 "Bad intid %i did not cause VGIC_GRP_LEVEL_INFO "
544 "error: rc: %i errno: %i", intid
, ret
, errno
);
546 TEST_ASSERT(!ret
, "KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO "
547 "for intid %i failed, rc: %i errno: %i",
552 static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm
*vm
,
553 uint32_t intid
, uint32_t num
, uint32_t kvm_max_routes
,
556 struct kvm_irq_routing
*routing
;
560 assert(num
<= kvm_max_routes
&& kvm_max_routes
<= KVM_MAX_IRQ_ROUTES
);
562 routing
= kvm_gsi_routing_create();
563 for (i
= intid
; i
< (uint64_t)intid
+ num
; i
++)
564 kvm_gsi_routing_irqchip_add(routing
, i
- MIN_SPI
, i
- MIN_SPI
);
566 if (!expect_failure
) {
567 kvm_gsi_routing_write(vm
, routing
);
569 ret
= _kvm_gsi_routing_write(vm
, routing
);
570 /* The kernel only checks e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS */
571 if (((uint64_t)intid
+ num
- 1 - MIN_SPI
) >= KVM_IRQCHIP_NUM_PINS
)
572 TEST_ASSERT(ret
!= 0 && errno
== EINVAL
,
573 "Bad intid %u did not cause KVM_SET_GSI_ROUTING "
574 "error: rc: %i errno: %i", intid
, ret
, errno
);
576 TEST_ASSERT(ret
== 0, "KVM_SET_GSI_ROUTING "
577 "for intid %i failed, rc: %i errno: %i",
582 static void kvm_irq_write_ispendr_check(int gic_fd
, uint32_t intid
,
583 struct kvm_vcpu
*vcpu
,
587 * Ignore this when expecting failure as invalid intids will lead to
588 * either trying to inject SGIs when we configured the test to be
589 * level_sensitive (or the reverse), or inject large intids which
590 * will lead to writing above the ISPENDR register space (and we
591 * don't want to do that either).
594 kvm_irq_write_ispendr(gic_fd
, intid
, vcpu
);
597 static void kvm_routing_and_irqfd_check(struct kvm_vm
*vm
,
598 uint32_t intid
, uint32_t num
, uint32_t kvm_max_routes
,
607 * There is no way to try injecting an SGI or PPI as the interface
608 * starts counting from the first SPI (above the private ones), so just
611 if (INTID_IS_SGI(intid
) || INTID_IS_PPI(intid
))
614 kvm_set_gsi_routing_irqchip_check(vm
, intid
, num
,
615 kvm_max_routes
, expect_failure
);
618 * If expect_failure, then just to inject anyway. These
619 * will silently fail. And in any case, the guest will check
620 * that no actual interrupt was injected for those cases.
623 for (f
= 0, i
= intid
; i
< (uint64_t)intid
+ num
; i
++, f
++) {
624 fd
[f
] = eventfd(0, 0);
625 TEST_ASSERT(fd
[f
] != -1, __KVM_SYSCALL_ERROR("eventfd()", fd
[f
]));
628 for (f
= 0, i
= intid
; i
< (uint64_t)intid
+ num
; i
++, f
++) {
629 struct kvm_irqfd irqfd
= {
633 assert(i
<= (uint64_t)UINT_MAX
);
634 vm_ioctl(vm
, KVM_IRQFD
, &irqfd
);
637 for (f
= 0, i
= intid
; i
< (uint64_t)intid
+ num
; i
++, f
++) {
639 ret
= write(fd
[f
], &val
, sizeof(uint64_t));
640 TEST_ASSERT(ret
== sizeof(uint64_t),
641 __KVM_SYSCALL_ERROR("write()", ret
));
644 for (f
= 0, i
= intid
; i
< (uint64_t)intid
+ num
; i
++, f
++)
648 /* handles the valid case: intid=0xffffffff num=1 */
649 #define for_each_intid(first, num, tmp, i) \
650 for ((tmp) = (i) = (first); \
651 (tmp) < (uint64_t)(first) + (uint64_t)(num); \
654 static void run_guest_cmd(struct kvm_vcpu
*vcpu
, int gic_fd
,
655 struct kvm_inject_args
*inject_args
,
656 struct test_args
*test_args
)
658 kvm_inject_cmd cmd
= inject_args
->cmd
;
659 uint32_t intid
= inject_args
->first_intid
;
660 uint32_t num
= inject_args
->num
;
661 int level
= inject_args
->level
;
662 bool expect_failure
= inject_args
->expect_failure
;
663 struct kvm_vm
*vm
= vcpu
->vm
;
667 /* handles the valid case: intid=0xffffffff num=1 */
668 assert(intid
< UINT_MAX
- num
|| num
== 1);
671 case KVM_INJECT_EDGE_IRQ_LINE
:
672 for_each_intid(intid
, num
, tmp
, i
)
673 kvm_irq_line_check(vm
, i
, 1, test_args
,
675 for_each_intid(intid
, num
, tmp
, i
)
676 kvm_irq_line_check(vm
, i
, 0, test_args
,
679 case KVM_SET_IRQ_LINE
:
680 for_each_intid(intid
, num
, tmp
, i
)
681 kvm_irq_line_check(vm
, i
, level
, test_args
,
684 case KVM_SET_IRQ_LINE_HIGH
:
685 for_each_intid(intid
, num
, tmp
, i
)
686 kvm_irq_line_check(vm
, i
, 1, test_args
,
689 case KVM_SET_LEVEL_INFO_HIGH
:
690 for_each_intid(intid
, num
, tmp
, i
)
691 kvm_irq_set_level_info_check(gic_fd
, i
, 1,
694 case KVM_INJECT_IRQFD
:
695 kvm_routing_and_irqfd_check(vm
, intid
, num
,
696 test_args
->kvm_max_routes
,
699 case KVM_WRITE_ISPENDR
:
700 for (i
= intid
; i
< intid
+ num
; i
++)
701 kvm_irq_write_ispendr_check(gic_fd
, i
, vcpu
,
704 case KVM_WRITE_ISACTIVER
:
705 for (i
= intid
; i
< intid
+ num
; i
++)
706 kvm_irq_write_isactiver(gic_fd
, i
, vcpu
);
713 static void kvm_inject_get_call(struct kvm_vm
*vm
, struct ucall
*uc
,
714 struct kvm_inject_args
*args
)
716 struct kvm_inject_args
*kvm_args_hva
;
717 vm_vaddr_t kvm_args_gva
;
719 kvm_args_gva
= uc
->args
[1];
720 kvm_args_hva
= (struct kvm_inject_args
*)addr_gva2hva(vm
, kvm_args_gva
);
721 memcpy(args
, kvm_args_hva
, sizeof(struct kvm_inject_args
));
724 static void print_args(struct test_args
*args
)
726 printf("nr-irqs=%d level-sensitive=%d eoi-split=%d\n",
727 args
->nr_irqs
, args
->level_sensitive
,
731 static void test_vgic(uint32_t nr_irqs
, bool level_sensitive
, bool eoi_split
)
735 struct kvm_vcpu
*vcpu
;
737 struct kvm_inject_args inject_args
;
740 struct test_args args
= {
742 .level_sensitive
= level_sensitive
,
743 .eoi_split
= eoi_split
,
744 .kvm_max_routes
= kvm_check_cap(KVM_CAP_IRQ_ROUTING
),
745 .kvm_supports_irqfd
= kvm_check_cap(KVM_CAP_IRQFD
),
750 vm
= vm_create_with_one_vcpu(&vcpu
, guest_code
);
752 vm_init_descriptor_tables(vm
);
753 vcpu_init_descriptor_tables(vcpu
);
755 /* Setup the guest args page (so it gets the args). */
756 args_gva
= vm_vaddr_alloc_page(vm
);
757 memcpy(addr_gva2hva(vm
, args_gva
), &args
, sizeof(args
));
758 vcpu_args_set(vcpu
, 1, args_gva
);
760 gic_fd
= vgic_v3_setup(vm
, 1, nr_irqs
);
761 __TEST_REQUIRE(gic_fd
>= 0, "Failed to create vgic-v3, skipping");
763 vm_install_exception_handler(vm
, VECTOR_IRQ_CURRENT
,
764 guest_irq_handlers
[args
.eoi_split
][args
.level_sensitive
]);
769 switch (get_ucall(vcpu
, &uc
)) {
771 kvm_inject_get_call(vm
, &uc
, &inject_args
);
772 run_guest_cmd(vcpu
, gic_fd
, &inject_args
, &args
);
775 REPORT_GUEST_ASSERT(uc
);
780 TEST_FAIL("Unknown ucall %lu", uc
.cmd
);
789 static void help(const char *name
)
793 "usage: %s [-n num_irqs] [-e eoi_split] [-l level_sensitive]\n", name
);
794 printf(" -n: specify number of IRQs to setup the vgic with. "
795 "It has to be a multiple of 32 and between 64 and 1024.\n");
796 printf(" -e: if 1 then EOI is split into a write to DIR on top "
797 "of writing EOI.\n");
798 printf(" -l: specify whether the IRQs are level-sensitive (1) or not (0).");
803 int main(int argc
, char **argv
)
805 uint32_t nr_irqs
= 64;
806 bool default_args
= true;
807 bool level_sensitive
= false;
809 bool eoi_split
= false;
811 while ((opt
= getopt(argc
, argv
, "hn:e:l:")) != -1) {
814 nr_irqs
= atoi_non_negative("Number of IRQs", optarg
);
815 if (nr_irqs
> 1024 || nr_irqs
% 32)
819 eoi_split
= (bool)atoi_paranoid(optarg
);
820 default_args
= false;
823 level_sensitive
= (bool)atoi_paranoid(optarg
);
824 default_args
= false;
834 * If the user just specified nr_irqs and/or gic_version, then run all
838 test_vgic(nr_irqs
, false /* level */, false /* eoi_split */);
839 test_vgic(nr_irqs
, false /* level */, true /* eoi_split */);
840 test_vgic(nr_irqs
, true /* level */, false /* eoi_split */);
841 test_vgic(nr_irqs
, true /* level */, true /* eoi_split */);
843 test_vgic(nr_irqs
, level_sensitive
, eoi_split
);