1 // SPDX-License-Identifier: GPL-2.0-only
3 * VFIO PCI interrupt handling
5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
6 * Author: Alex Williamson <alex.williamson@redhat.com>
8 * Derived from original vfio:
9 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
10 * Author: Tom Lyon, pugs@cisco.com
13 #include <linux/device.h>
14 #include <linux/interrupt.h>
15 #include <linux/eventfd.h>
16 #include <linux/msi.h>
17 #include <linux/pci.h>
18 #include <linux/file.h>
19 #include <linux/vfio.h>
20 #include <linux/wait.h>
21 #include <linux/slab.h>
23 #include "vfio_pci_priv.h"
25 struct vfio_pci_irq_ctx
{
26 struct vfio_pci_core_device
*vdev
;
27 struct eventfd_ctx
*trigger
;
28 struct virqfd
*unmask
;
32 struct irq_bypass_producer producer
;
35 static bool irq_is(struct vfio_pci_core_device
*vdev
, int type
)
37 return vdev
->irq_type
== type
;
40 static bool is_intx(struct vfio_pci_core_device
*vdev
)
42 return vdev
->irq_type
== VFIO_PCI_INTX_IRQ_INDEX
;
45 static bool is_irq_none(struct vfio_pci_core_device
*vdev
)
47 return !(vdev
->irq_type
== VFIO_PCI_INTX_IRQ_INDEX
||
48 vdev
->irq_type
== VFIO_PCI_MSI_IRQ_INDEX
||
49 vdev
->irq_type
== VFIO_PCI_MSIX_IRQ_INDEX
);
53 struct vfio_pci_irq_ctx
*vfio_irq_ctx_get(struct vfio_pci_core_device
*vdev
,
56 return xa_load(&vdev
->ctx
, index
);
59 static void vfio_irq_ctx_free(struct vfio_pci_core_device
*vdev
,
60 struct vfio_pci_irq_ctx
*ctx
, unsigned long index
)
62 xa_erase(&vdev
->ctx
, index
);
66 static struct vfio_pci_irq_ctx
*
67 vfio_irq_ctx_alloc(struct vfio_pci_core_device
*vdev
, unsigned long index
)
69 struct vfio_pci_irq_ctx
*ctx
;
72 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL_ACCOUNT
);
76 ret
= xa_insert(&vdev
->ctx
, index
, ctx
, GFP_KERNEL_ACCOUNT
);
88 static void vfio_send_intx_eventfd(void *opaque
, void *data
)
90 struct vfio_pci_core_device
*vdev
= opaque
;
92 if (likely(is_intx(vdev
) && !vdev
->virq_disabled
)) {
93 struct vfio_pci_irq_ctx
*ctx
= data
;
94 struct eventfd_ctx
*trigger
= READ_ONCE(ctx
->trigger
);
97 eventfd_signal(trigger
);
101 /* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */
102 static bool __vfio_pci_intx_mask(struct vfio_pci_core_device
*vdev
)
104 struct pci_dev
*pdev
= vdev
->pdev
;
105 struct vfio_pci_irq_ctx
*ctx
;
107 bool masked_changed
= false;
109 lockdep_assert_held(&vdev
->igate
);
111 spin_lock_irqsave(&vdev
->irqlock
, flags
);
114 * Masking can come from interrupt, ioctl, or config space
115 * via INTx disable. The latter means this can get called
116 * even when not using intx delivery. In this case, just
117 * try to have the physical bit follow the virtual bit.
119 if (unlikely(!is_intx(vdev
))) {
125 ctx
= vfio_irq_ctx_get(vdev
, 0);
126 if (WARN_ON_ONCE(!ctx
))
131 * Can't use check_and_mask here because we always want to
132 * mask, not just when something is pending.
137 disable_irq_nosync(pdev
->irq
);
140 masked_changed
= true;
144 spin_unlock_irqrestore(&vdev
->irqlock
, flags
);
145 return masked_changed
;
148 bool vfio_pci_intx_mask(struct vfio_pci_core_device
*vdev
)
152 mutex_lock(&vdev
->igate
);
153 mask_changed
= __vfio_pci_intx_mask(vdev
);
154 mutex_unlock(&vdev
->igate
);
160 * If this is triggered by an eventfd, we can't call eventfd_signal
161 * or else we'll deadlock on the eventfd wait queue. Return >0 when
162 * a signal is necessary, which can then be handled via a work queue
163 * or directly depending on the caller.
165 static int vfio_pci_intx_unmask_handler(void *opaque
, void *data
)
167 struct vfio_pci_core_device
*vdev
= opaque
;
168 struct pci_dev
*pdev
= vdev
->pdev
;
169 struct vfio_pci_irq_ctx
*ctx
= data
;
173 spin_lock_irqsave(&vdev
->irqlock
, flags
);
176 * Unmasking comes from ioctl or config, so again, have the
177 * physical bit follow the virtual even when not using INTx.
179 if (unlikely(!is_intx(vdev
))) {
185 if (ctx
->masked
&& !vdev
->virq_disabled
) {
187 * A pending interrupt here would immediately trigger,
188 * but we can avoid that overhead by just re-sending
189 * the interrupt to the user.
192 if (!pci_check_and_unmask_intx(pdev
))
195 enable_irq(pdev
->irq
);
197 ctx
->masked
= (ret
> 0);
201 spin_unlock_irqrestore(&vdev
->irqlock
, flags
);
206 static void __vfio_pci_intx_unmask(struct vfio_pci_core_device
*vdev
)
208 struct vfio_pci_irq_ctx
*ctx
= vfio_irq_ctx_get(vdev
, 0);
210 lockdep_assert_held(&vdev
->igate
);
212 if (vfio_pci_intx_unmask_handler(vdev
, ctx
) > 0)
213 vfio_send_intx_eventfd(vdev
, ctx
);
216 void vfio_pci_intx_unmask(struct vfio_pci_core_device
*vdev
)
218 mutex_lock(&vdev
->igate
);
219 __vfio_pci_intx_unmask(vdev
);
220 mutex_unlock(&vdev
->igate
);
223 static irqreturn_t
vfio_intx_handler(int irq
, void *dev_id
)
225 struct vfio_pci_irq_ctx
*ctx
= dev_id
;
226 struct vfio_pci_core_device
*vdev
= ctx
->vdev
;
230 spin_lock_irqsave(&vdev
->irqlock
, flags
);
232 if (!vdev
->pci_2_3
) {
233 disable_irq_nosync(vdev
->pdev
->irq
);
236 } else if (!ctx
->masked
&& /* may be shared */
237 pci_check_and_mask_intx(vdev
->pdev
)) {
242 spin_unlock_irqrestore(&vdev
->irqlock
, flags
);
244 if (ret
== IRQ_HANDLED
)
245 vfio_send_intx_eventfd(vdev
, ctx
);
250 static int vfio_intx_enable(struct vfio_pci_core_device
*vdev
,
251 struct eventfd_ctx
*trigger
)
253 struct pci_dev
*pdev
= vdev
->pdev
;
254 struct vfio_pci_irq_ctx
*ctx
;
255 unsigned long irqflags
;
259 if (!is_irq_none(vdev
))
265 name
= kasprintf(GFP_KERNEL_ACCOUNT
, "vfio-intx(%s)", pci_name(pdev
));
269 ctx
= vfio_irq_ctx_alloc(vdev
, 0);
276 ctx
->trigger
= trigger
;
280 * Fill the initial masked state based on virq_disabled. After
281 * enable, changing the DisINTx bit in vconfig directly changes INTx
282 * masking. igate prevents races during setup, once running masked
283 * is protected via irqlock.
285 * Devices supporting DisINTx also reflect the current mask state in
286 * the physical DisINTx bit, which is not affected during IRQ setup.
288 * Devices without DisINTx support require an exclusive interrupt.
289 * IRQ masking is performed at the IRQ chip. Again, igate protects
290 * against races during setup and IRQ handlers and irqfds are not
291 * yet active, therefore masked is stable and can be used to
292 * conditionally auto-enable the IRQ.
294 * irq_type must be stable while the IRQ handler is registered,
295 * therefore it must be set before request_irq().
297 ctx
->masked
= vdev
->virq_disabled
;
299 pci_intx(pdev
, !ctx
->masked
);
300 irqflags
= IRQF_SHARED
;
302 irqflags
= ctx
->masked
? IRQF_NO_AUTOEN
: 0;
305 vdev
->irq_type
= VFIO_PCI_INTX_IRQ_INDEX
;
307 ret
= request_irq(pdev
->irq
, vfio_intx_handler
,
308 irqflags
, ctx
->name
, ctx
);
310 vdev
->irq_type
= VFIO_PCI_NUM_IRQS
;
312 vfio_irq_ctx_free(vdev
, ctx
, 0);
319 static int vfio_intx_set_signal(struct vfio_pci_core_device
*vdev
,
320 struct eventfd_ctx
*trigger
)
322 struct pci_dev
*pdev
= vdev
->pdev
;
323 struct vfio_pci_irq_ctx
*ctx
;
324 struct eventfd_ctx
*old
;
326 ctx
= vfio_irq_ctx_get(vdev
, 0);
327 if (WARN_ON_ONCE(!ctx
))
332 WRITE_ONCE(ctx
->trigger
, trigger
);
334 /* Releasing an old ctx requires synchronizing in-flight users */
336 synchronize_irq(pdev
->irq
);
337 vfio_virqfd_flush_thread(&ctx
->unmask
);
338 eventfd_ctx_put(old
);
344 static void vfio_intx_disable(struct vfio_pci_core_device
*vdev
)
346 struct pci_dev
*pdev
= vdev
->pdev
;
347 struct vfio_pci_irq_ctx
*ctx
;
349 ctx
= vfio_irq_ctx_get(vdev
, 0);
352 vfio_virqfd_disable(&ctx
->unmask
);
353 vfio_virqfd_disable(&ctx
->mask
);
354 free_irq(pdev
->irq
, ctx
);
356 eventfd_ctx_put(ctx
->trigger
);
358 vfio_irq_ctx_free(vdev
, ctx
, 0);
360 vdev
->irq_type
= VFIO_PCI_NUM_IRQS
;
366 static irqreturn_t
vfio_msihandler(int irq
, void *arg
)
368 struct eventfd_ctx
*trigger
= arg
;
370 eventfd_signal(trigger
);
374 static int vfio_msi_enable(struct vfio_pci_core_device
*vdev
, int nvec
, bool msix
)
376 struct pci_dev
*pdev
= vdev
->pdev
;
377 unsigned int flag
= msix
? PCI_IRQ_MSIX
: PCI_IRQ_MSI
;
381 if (!is_irq_none(vdev
))
384 /* return the number of supported vectors if we can't get all: */
385 cmd
= vfio_pci_memory_lock_and_enable(vdev
);
386 ret
= pci_alloc_irq_vectors(pdev
, 1, nvec
, flag
);
389 pci_free_irq_vectors(pdev
);
390 vfio_pci_memory_unlock_and_restore(vdev
, cmd
);
393 vfio_pci_memory_unlock_and_restore(vdev
, cmd
);
395 vdev
->irq_type
= msix
? VFIO_PCI_MSIX_IRQ_INDEX
:
396 VFIO_PCI_MSI_IRQ_INDEX
;
400 * Compute the virtual hardware field for max msi vectors -
401 * it is the log base 2 of the number of vectors.
403 vdev
->msi_qmax
= fls(nvec
* 2 - 1) - 1;
410 * vfio_msi_alloc_irq() returns the Linux IRQ number of an MSI or MSI-X device
411 * interrupt vector. If a Linux IRQ number is not available then a new
412 * interrupt is allocated if dynamic MSI-X is supported.
414 * Where is vfio_msi_free_irq()? Allocated interrupts are maintained,
415 * essentially forming a cache that subsequent allocations can draw from.
416 * Interrupts are freed using pci_free_irq_vectors() when MSI/MSI-X is
419 static int vfio_msi_alloc_irq(struct vfio_pci_core_device
*vdev
,
420 unsigned int vector
, bool msix
)
422 struct pci_dev
*pdev
= vdev
->pdev
;
427 irq
= pci_irq_vector(pdev
, vector
);
428 if (WARN_ON_ONCE(irq
== 0))
430 if (irq
> 0 || !msix
|| !vdev
->has_dyn_msix
)
433 cmd
= vfio_pci_memory_lock_and_enable(vdev
);
434 map
= pci_msix_alloc_irq_at(pdev
, vector
, NULL
);
435 vfio_pci_memory_unlock_and_restore(vdev
, cmd
);
437 return map
.index
< 0 ? map
.index
: map
.virq
;
440 static int vfio_msi_set_vector_signal(struct vfio_pci_core_device
*vdev
,
441 unsigned int vector
, int fd
, bool msix
)
443 struct pci_dev
*pdev
= vdev
->pdev
;
444 struct vfio_pci_irq_ctx
*ctx
;
445 struct eventfd_ctx
*trigger
;
446 int irq
= -EINVAL
, ret
;
449 ctx
= vfio_irq_ctx_get(vdev
, vector
);
452 irq_bypass_unregister_producer(&ctx
->producer
);
453 irq
= pci_irq_vector(pdev
, vector
);
454 cmd
= vfio_pci_memory_lock_and_enable(vdev
);
455 free_irq(irq
, ctx
->trigger
);
456 vfio_pci_memory_unlock_and_restore(vdev
, cmd
);
457 /* Interrupt stays allocated, will be freed at MSI-X disable. */
459 eventfd_ctx_put(ctx
->trigger
);
460 vfio_irq_ctx_free(vdev
, ctx
, vector
);
466 if (irq
== -EINVAL
) {
467 /* Interrupt stays allocated, will be freed at MSI-X disable. */
468 irq
= vfio_msi_alloc_irq(vdev
, vector
, msix
);
473 ctx
= vfio_irq_ctx_alloc(vdev
, vector
);
477 ctx
->name
= kasprintf(GFP_KERNEL_ACCOUNT
, "vfio-msi%s[%d](%s)",
478 msix
? "x" : "", vector
, pci_name(pdev
));
484 trigger
= eventfd_ctx_fdget(fd
);
485 if (IS_ERR(trigger
)) {
486 ret
= PTR_ERR(trigger
);
491 * If the vector was previously allocated, refresh the on-device
492 * message data before enabling in case it had been cleared or
493 * corrupted (e.g. due to backdoor resets) since writing.
495 cmd
= vfio_pci_memory_lock_and_enable(vdev
);
499 get_cached_msi_msg(irq
, &msg
);
500 pci_write_msi_msg(irq
, &msg
);
503 ret
= request_irq(irq
, vfio_msihandler
, 0, ctx
->name
, trigger
);
504 vfio_pci_memory_unlock_and_restore(vdev
, cmd
);
506 goto out_put_eventfd_ctx
;
508 ctx
->producer
.token
= trigger
;
509 ctx
->producer
.irq
= irq
;
510 ret
= irq_bypass_register_producer(&ctx
->producer
);
513 "irq bypass producer (token %p) registration fails: %d\n",
514 ctx
->producer
.token
, ret
);
516 ctx
->producer
.token
= NULL
;
518 ctx
->trigger
= trigger
;
523 eventfd_ctx_put(trigger
);
527 vfio_irq_ctx_free(vdev
, ctx
, vector
);
531 static int vfio_msi_set_block(struct vfio_pci_core_device
*vdev
, unsigned start
,
532 unsigned count
, int32_t *fds
, bool msix
)
537 for (i
= 0, j
= start
; i
< count
&& !ret
; i
++, j
++) {
538 int fd
= fds
? fds
[i
] : -1;
539 ret
= vfio_msi_set_vector_signal(vdev
, j
, fd
, msix
);
543 for (i
= start
; i
< j
; i
++)
544 vfio_msi_set_vector_signal(vdev
, i
, -1, msix
);
550 static void vfio_msi_disable(struct vfio_pci_core_device
*vdev
, bool msix
)
552 struct pci_dev
*pdev
= vdev
->pdev
;
553 struct vfio_pci_irq_ctx
*ctx
;
557 xa_for_each(&vdev
->ctx
, i
, ctx
) {
558 vfio_virqfd_disable(&ctx
->unmask
);
559 vfio_virqfd_disable(&ctx
->mask
);
560 vfio_msi_set_vector_signal(vdev
, i
, -1, msix
);
563 cmd
= vfio_pci_memory_lock_and_enable(vdev
);
564 pci_free_irq_vectors(pdev
);
565 vfio_pci_memory_unlock_and_restore(vdev
, cmd
);
568 * Both disable paths above use pci_intx_for_msi() to clear DisINTx
569 * via their shutdown paths. Restore for NoINTx devices.
574 vdev
->irq_type
= VFIO_PCI_NUM_IRQS
;
580 static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device
*vdev
,
581 unsigned index
, unsigned start
,
582 unsigned count
, uint32_t flags
, void *data
)
584 if (!is_intx(vdev
) || start
!= 0 || count
!= 1)
587 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
588 __vfio_pci_intx_unmask(vdev
);
589 } else if (flags
& VFIO_IRQ_SET_DATA_BOOL
) {
590 uint8_t unmask
= *(uint8_t *)data
;
592 __vfio_pci_intx_unmask(vdev
);
593 } else if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
594 struct vfio_pci_irq_ctx
*ctx
= vfio_irq_ctx_get(vdev
, 0);
595 int32_t fd
= *(int32_t *)data
;
597 if (WARN_ON_ONCE(!ctx
))
600 return vfio_virqfd_enable((void *) vdev
,
601 vfio_pci_intx_unmask_handler
,
602 vfio_send_intx_eventfd
, ctx
,
605 vfio_virqfd_disable(&ctx
->unmask
);
611 static int vfio_pci_set_intx_mask(struct vfio_pci_core_device
*vdev
,
612 unsigned index
, unsigned start
,
613 unsigned count
, uint32_t flags
, void *data
)
615 if (!is_intx(vdev
) || start
!= 0 || count
!= 1)
618 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
619 __vfio_pci_intx_mask(vdev
);
620 } else if (flags
& VFIO_IRQ_SET_DATA_BOOL
) {
621 uint8_t mask
= *(uint8_t *)data
;
623 __vfio_pci_intx_mask(vdev
);
624 } else if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
625 return -ENOTTY
; /* XXX implement me */
631 static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device
*vdev
,
632 unsigned index
, unsigned start
,
633 unsigned count
, uint32_t flags
, void *data
)
635 if (is_intx(vdev
) && !count
&& (flags
& VFIO_IRQ_SET_DATA_NONE
)) {
636 vfio_intx_disable(vdev
);
640 if (!(is_intx(vdev
) || is_irq_none(vdev
)) || start
!= 0 || count
!= 1)
643 if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
644 struct eventfd_ctx
*trigger
= NULL
;
645 int32_t fd
= *(int32_t *)data
;
649 trigger
= eventfd_ctx_fdget(fd
);
651 return PTR_ERR(trigger
);
655 ret
= vfio_intx_set_signal(vdev
, trigger
);
657 ret
= vfio_intx_enable(vdev
, trigger
);
660 eventfd_ctx_put(trigger
);
668 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
669 vfio_send_intx_eventfd(vdev
, vfio_irq_ctx_get(vdev
, 0));
670 } else if (flags
& VFIO_IRQ_SET_DATA_BOOL
) {
671 uint8_t trigger
= *(uint8_t *)data
;
673 vfio_send_intx_eventfd(vdev
, vfio_irq_ctx_get(vdev
, 0));
678 static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device
*vdev
,
679 unsigned index
, unsigned start
,
680 unsigned count
, uint32_t flags
, void *data
)
682 struct vfio_pci_irq_ctx
*ctx
;
684 bool msix
= (index
== VFIO_PCI_MSIX_IRQ_INDEX
) ? true : false;
686 if (irq_is(vdev
, index
) && !count
&& (flags
& VFIO_IRQ_SET_DATA_NONE
)) {
687 vfio_msi_disable(vdev
, msix
);
691 if (!(irq_is(vdev
, index
) || is_irq_none(vdev
)))
694 if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
698 if (vdev
->irq_type
== index
)
699 return vfio_msi_set_block(vdev
, start
, count
,
702 ret
= vfio_msi_enable(vdev
, start
+ count
, msix
);
706 ret
= vfio_msi_set_block(vdev
, start
, count
, fds
, msix
);
708 vfio_msi_disable(vdev
, msix
);
713 if (!irq_is(vdev
, index
))
716 for (i
= start
; i
< start
+ count
; i
++) {
717 ctx
= vfio_irq_ctx_get(vdev
, i
);
720 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
721 eventfd_signal(ctx
->trigger
);
722 } else if (flags
& VFIO_IRQ_SET_DATA_BOOL
) {
723 uint8_t *bools
= data
;
724 if (bools
[i
- start
])
725 eventfd_signal(ctx
->trigger
);
731 static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx
**ctx
,
732 unsigned int count
, uint32_t flags
,
735 /* DATA_NONE/DATA_BOOL enables loopback testing */
736 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
739 eventfd_signal(*ctx
);
741 eventfd_ctx_put(*ctx
);
746 } else if (flags
& VFIO_IRQ_SET_DATA_BOOL
) {
752 trigger
= *(uint8_t *)data
;
754 eventfd_signal(*ctx
);
757 } else if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
763 fd
= *(int32_t *)data
;
766 eventfd_ctx_put(*ctx
);
768 } else if (fd
>= 0) {
769 struct eventfd_ctx
*efdctx
;
771 efdctx
= eventfd_ctx_fdget(fd
);
773 return PTR_ERR(efdctx
);
776 eventfd_ctx_put(*ctx
);
786 static int vfio_pci_set_err_trigger(struct vfio_pci_core_device
*vdev
,
787 unsigned index
, unsigned start
,
788 unsigned count
, uint32_t flags
, void *data
)
790 if (index
!= VFIO_PCI_ERR_IRQ_INDEX
|| start
!= 0 || count
> 1)
793 return vfio_pci_set_ctx_trigger_single(&vdev
->err_trigger
,
797 static int vfio_pci_set_req_trigger(struct vfio_pci_core_device
*vdev
,
798 unsigned index
, unsigned start
,
799 unsigned count
, uint32_t flags
, void *data
)
801 if (index
!= VFIO_PCI_REQ_IRQ_INDEX
|| start
!= 0 || count
> 1)
804 return vfio_pci_set_ctx_trigger_single(&vdev
->req_trigger
,
808 int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device
*vdev
, uint32_t flags
,
809 unsigned index
, unsigned start
, unsigned count
,
812 int (*func
)(struct vfio_pci_core_device
*vdev
, unsigned index
,
813 unsigned start
, unsigned count
, uint32_t flags
,
817 case VFIO_PCI_INTX_IRQ_INDEX
:
818 switch (flags
& VFIO_IRQ_SET_ACTION_TYPE_MASK
) {
819 case VFIO_IRQ_SET_ACTION_MASK
:
820 func
= vfio_pci_set_intx_mask
;
822 case VFIO_IRQ_SET_ACTION_UNMASK
:
823 func
= vfio_pci_set_intx_unmask
;
825 case VFIO_IRQ_SET_ACTION_TRIGGER
:
826 func
= vfio_pci_set_intx_trigger
;
830 case VFIO_PCI_MSI_IRQ_INDEX
:
831 case VFIO_PCI_MSIX_IRQ_INDEX
:
832 switch (flags
& VFIO_IRQ_SET_ACTION_TYPE_MASK
) {
833 case VFIO_IRQ_SET_ACTION_MASK
:
834 case VFIO_IRQ_SET_ACTION_UNMASK
:
835 /* XXX Need masking support exported */
837 case VFIO_IRQ_SET_ACTION_TRIGGER
:
838 func
= vfio_pci_set_msi_trigger
;
842 case VFIO_PCI_ERR_IRQ_INDEX
:
843 switch (flags
& VFIO_IRQ_SET_ACTION_TYPE_MASK
) {
844 case VFIO_IRQ_SET_ACTION_TRIGGER
:
845 if (pci_is_pcie(vdev
->pdev
))
846 func
= vfio_pci_set_err_trigger
;
850 case VFIO_PCI_REQ_IRQ_INDEX
:
851 switch (flags
& VFIO_IRQ_SET_ACTION_TYPE_MASK
) {
852 case VFIO_IRQ_SET_ACTION_TRIGGER
:
853 func
= vfio_pci_set_req_trigger
;
862 return func(vdev
, index
, start
, count
, flags
, data
);