1 // SPDX-License-Identifier: GPL-2.0-only
3 * VFIO PCI interrupt handling
5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
6 * Author: Alex Williamson <alex.williamson@redhat.com>
8 * Derived from original vfio:
9 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
10 * Author: Tom Lyon, pugs@cisco.com
13 #include <linux/device.h>
14 #include <linux/interrupt.h>
15 #include <linux/eventfd.h>
16 #include <linux/msi.h>
17 #include <linux/pci.h>
18 #include <linux/file.h>
19 #include <linux/vfio.h>
20 #include <linux/wait.h>
21 #include <linux/slab.h>
23 #include "vfio_pci_private.h"
28 static void vfio_send_intx_eventfd(void *opaque
, void *unused
)
30 struct vfio_pci_device
*vdev
= opaque
;
32 if (likely(is_intx(vdev
) && !vdev
->virq_disabled
))
33 eventfd_signal(vdev
->ctx
[0].trigger
, 1);
36 void vfio_pci_intx_mask(struct vfio_pci_device
*vdev
)
38 struct pci_dev
*pdev
= vdev
->pdev
;
41 spin_lock_irqsave(&vdev
->irqlock
, flags
);
44 * Masking can come from interrupt, ioctl, or config space
45 * via INTx disable. The latter means this can get called
46 * even when not using intx delivery. In this case, just
47 * try to have the physical bit follow the virtual bit.
49 if (unlikely(!is_intx(vdev
))) {
52 } else if (!vdev
->ctx
[0].masked
) {
54 * Can't use check_and_mask here because we always want to
55 * mask, not just when something is pending.
60 disable_irq_nosync(pdev
->irq
);
62 vdev
->ctx
[0].masked
= true;
65 spin_unlock_irqrestore(&vdev
->irqlock
, flags
);
69 * If this is triggered by an eventfd, we can't call eventfd_signal
70 * or else we'll deadlock on the eventfd wait queue. Return >0 when
71 * a signal is necessary, which can then be handled via a work queue
72 * or directly depending on the caller.
74 static int vfio_pci_intx_unmask_handler(void *opaque
, void *unused
)
76 struct vfio_pci_device
*vdev
= opaque
;
77 struct pci_dev
*pdev
= vdev
->pdev
;
81 spin_lock_irqsave(&vdev
->irqlock
, flags
);
84 * Unmasking comes from ioctl or config, so again, have the
85 * physical bit follow the virtual even when not using INTx.
87 if (unlikely(!is_intx(vdev
))) {
90 } else if (vdev
->ctx
[0].masked
&& !vdev
->virq_disabled
) {
92 * A pending interrupt here would immediately trigger,
93 * but we can avoid that overhead by just re-sending
94 * the interrupt to the user.
97 if (!pci_check_and_unmask_intx(pdev
))
100 enable_irq(pdev
->irq
);
102 vdev
->ctx
[0].masked
= (ret
> 0);
105 spin_unlock_irqrestore(&vdev
->irqlock
, flags
);
110 void vfio_pci_intx_unmask(struct vfio_pci_device
*vdev
)
112 if (vfio_pci_intx_unmask_handler(vdev
, NULL
) > 0)
113 vfio_send_intx_eventfd(vdev
, NULL
);
116 static irqreturn_t
vfio_intx_handler(int irq
, void *dev_id
)
118 struct vfio_pci_device
*vdev
= dev_id
;
122 spin_lock_irqsave(&vdev
->irqlock
, flags
);
124 if (!vdev
->pci_2_3
) {
125 disable_irq_nosync(vdev
->pdev
->irq
);
126 vdev
->ctx
[0].masked
= true;
128 } else if (!vdev
->ctx
[0].masked
&& /* may be shared */
129 pci_check_and_mask_intx(vdev
->pdev
)) {
130 vdev
->ctx
[0].masked
= true;
134 spin_unlock_irqrestore(&vdev
->irqlock
, flags
);
136 if (ret
== IRQ_HANDLED
)
137 vfio_send_intx_eventfd(vdev
, NULL
);
142 static int vfio_intx_enable(struct vfio_pci_device
*vdev
)
144 if (!is_irq_none(vdev
))
147 if (!vdev
->pdev
->irq
)
150 vdev
->ctx
= kzalloc(sizeof(struct vfio_pci_irq_ctx
), GFP_KERNEL
);
157 * If the virtual interrupt is masked, restore it. Devices
158 * supporting DisINTx can be masked at the hardware level
159 * here, non-PCI-2.3 devices will have to wait until the
160 * interrupt is enabled.
162 vdev
->ctx
[0].masked
= vdev
->virq_disabled
;
164 pci_intx(vdev
->pdev
, !vdev
->ctx
[0].masked
);
166 vdev
->irq_type
= VFIO_PCI_INTX_IRQ_INDEX
;
171 static int vfio_intx_set_signal(struct vfio_pci_device
*vdev
, int fd
)
173 struct pci_dev
*pdev
= vdev
->pdev
;
174 unsigned long irqflags
= IRQF_SHARED
;
175 struct eventfd_ctx
*trigger
;
179 if (vdev
->ctx
[0].trigger
) {
180 free_irq(pdev
->irq
, vdev
);
181 kfree(vdev
->ctx
[0].name
);
182 eventfd_ctx_put(vdev
->ctx
[0].trigger
);
183 vdev
->ctx
[0].trigger
= NULL
;
186 if (fd
< 0) /* Disable only */
189 vdev
->ctx
[0].name
= kasprintf(GFP_KERNEL
, "vfio-intx(%s)",
191 if (!vdev
->ctx
[0].name
)
194 trigger
= eventfd_ctx_fdget(fd
);
195 if (IS_ERR(trigger
)) {
196 kfree(vdev
->ctx
[0].name
);
197 return PTR_ERR(trigger
);
200 vdev
->ctx
[0].trigger
= trigger
;
205 ret
= request_irq(pdev
->irq
, vfio_intx_handler
,
206 irqflags
, vdev
->ctx
[0].name
, vdev
);
208 vdev
->ctx
[0].trigger
= NULL
;
209 kfree(vdev
->ctx
[0].name
);
210 eventfd_ctx_put(trigger
);
215 * INTx disable will stick across the new irq setup,
218 spin_lock_irqsave(&vdev
->irqlock
, flags
);
219 if (!vdev
->pci_2_3
&& vdev
->ctx
[0].masked
)
220 disable_irq_nosync(pdev
->irq
);
221 spin_unlock_irqrestore(&vdev
->irqlock
, flags
);
226 static void vfio_intx_disable(struct vfio_pci_device
*vdev
)
228 vfio_virqfd_disable(&vdev
->ctx
[0].unmask
);
229 vfio_virqfd_disable(&vdev
->ctx
[0].mask
);
230 vfio_intx_set_signal(vdev
, -1);
231 vdev
->irq_type
= VFIO_PCI_NUM_IRQS
;
239 static irqreturn_t
vfio_msihandler(int irq
, void *arg
)
241 struct eventfd_ctx
*trigger
= arg
;
243 eventfd_signal(trigger
, 1);
247 static int vfio_msi_enable(struct vfio_pci_device
*vdev
, int nvec
, bool msix
)
249 struct pci_dev
*pdev
= vdev
->pdev
;
250 unsigned int flag
= msix
? PCI_IRQ_MSIX
: PCI_IRQ_MSI
;
254 if (!is_irq_none(vdev
))
257 vdev
->ctx
= kcalloc(nvec
, sizeof(struct vfio_pci_irq_ctx
), GFP_KERNEL
);
261 /* return the number of supported vectors if we can't get all: */
262 cmd
= vfio_pci_memory_lock_and_enable(vdev
);
263 ret
= pci_alloc_irq_vectors(pdev
, 1, nvec
, flag
);
266 pci_free_irq_vectors(pdev
);
267 vfio_pci_memory_unlock_and_restore(vdev
, cmd
);
271 vfio_pci_memory_unlock_and_restore(vdev
, cmd
);
273 vdev
->num_ctx
= nvec
;
274 vdev
->irq_type
= msix
? VFIO_PCI_MSIX_IRQ_INDEX
:
275 VFIO_PCI_MSI_IRQ_INDEX
;
279 * Compute the virtual hardware field for max msi vectors -
280 * it is the log base 2 of the number of vectors.
282 vdev
->msi_qmax
= fls(nvec
* 2 - 1) - 1;
288 static int vfio_msi_set_vector_signal(struct vfio_pci_device
*vdev
,
289 int vector
, int fd
, bool msix
)
291 struct pci_dev
*pdev
= vdev
->pdev
;
292 struct eventfd_ctx
*trigger
;
296 if (vector
< 0 || vector
>= vdev
->num_ctx
)
299 irq
= pci_irq_vector(pdev
, vector
);
301 if (vdev
->ctx
[vector
].trigger
) {
302 irq_bypass_unregister_producer(&vdev
->ctx
[vector
].producer
);
304 cmd
= vfio_pci_memory_lock_and_enable(vdev
);
305 free_irq(irq
, vdev
->ctx
[vector
].trigger
);
306 vfio_pci_memory_unlock_and_restore(vdev
, cmd
);
308 kfree(vdev
->ctx
[vector
].name
);
309 eventfd_ctx_put(vdev
->ctx
[vector
].trigger
);
310 vdev
->ctx
[vector
].trigger
= NULL
;
316 vdev
->ctx
[vector
].name
= kasprintf(GFP_KERNEL
, "vfio-msi%s[%d](%s)",
317 msix
? "x" : "", vector
,
319 if (!vdev
->ctx
[vector
].name
)
322 trigger
= eventfd_ctx_fdget(fd
);
323 if (IS_ERR(trigger
)) {
324 kfree(vdev
->ctx
[vector
].name
);
325 return PTR_ERR(trigger
);
329 * The MSIx vector table resides in device memory which may be cleared
330 * via backdoor resets. We don't allow direct access to the vector
331 * table so even if a userspace driver attempts to save/restore around
332 * such a reset it would be unsuccessful. To avoid this, restore the
333 * cached value of the message prior to enabling.
335 cmd
= vfio_pci_memory_lock_and_enable(vdev
);
339 get_cached_msi_msg(irq
, &msg
);
340 pci_write_msi_msg(irq
, &msg
);
343 ret
= request_irq(irq
, vfio_msihandler
, 0,
344 vdev
->ctx
[vector
].name
, trigger
);
345 vfio_pci_memory_unlock_and_restore(vdev
, cmd
);
347 kfree(vdev
->ctx
[vector
].name
);
348 eventfd_ctx_put(trigger
);
352 vdev
->ctx
[vector
].producer
.token
= trigger
;
353 vdev
->ctx
[vector
].producer
.irq
= irq
;
354 ret
= irq_bypass_register_producer(&vdev
->ctx
[vector
].producer
);
357 "irq bypass producer (token %p) registration fails: %d\n",
358 vdev
->ctx
[vector
].producer
.token
, ret
);
360 vdev
->ctx
[vector
].producer
.token
= NULL
;
362 vdev
->ctx
[vector
].trigger
= trigger
;
367 static int vfio_msi_set_block(struct vfio_pci_device
*vdev
, unsigned start
,
368 unsigned count
, int32_t *fds
, bool msix
)
372 if (start
>= vdev
->num_ctx
|| start
+ count
> vdev
->num_ctx
)
375 for (i
= 0, j
= start
; i
< count
&& !ret
; i
++, j
++) {
376 int fd
= fds
? fds
[i
] : -1;
377 ret
= vfio_msi_set_vector_signal(vdev
, j
, fd
, msix
);
381 for (--j
; j
>= (int)start
; j
--)
382 vfio_msi_set_vector_signal(vdev
, j
, -1, msix
);
388 static void vfio_msi_disable(struct vfio_pci_device
*vdev
, bool msix
)
390 struct pci_dev
*pdev
= vdev
->pdev
;
394 for (i
= 0; i
< vdev
->num_ctx
; i
++) {
395 vfio_virqfd_disable(&vdev
->ctx
[i
].unmask
);
396 vfio_virqfd_disable(&vdev
->ctx
[i
].mask
);
399 vfio_msi_set_block(vdev
, 0, vdev
->num_ctx
, NULL
, msix
);
401 cmd
= vfio_pci_memory_lock_and_enable(vdev
);
402 pci_free_irq_vectors(pdev
);
403 vfio_pci_memory_unlock_and_restore(vdev
, cmd
);
406 * Both disable paths above use pci_intx_for_msi() to clear DisINTx
407 * via their shutdown paths. Restore for NoINTx devices.
412 vdev
->irq_type
= VFIO_PCI_NUM_IRQS
;
420 static int vfio_pci_set_intx_unmask(struct vfio_pci_device
*vdev
,
421 unsigned index
, unsigned start
,
422 unsigned count
, uint32_t flags
, void *data
)
424 if (!is_intx(vdev
) || start
!= 0 || count
!= 1)
427 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
428 vfio_pci_intx_unmask(vdev
);
429 } else if (flags
& VFIO_IRQ_SET_DATA_BOOL
) {
430 uint8_t unmask
= *(uint8_t *)data
;
432 vfio_pci_intx_unmask(vdev
);
433 } else if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
434 int32_t fd
= *(int32_t *)data
;
436 return vfio_virqfd_enable((void *) vdev
,
437 vfio_pci_intx_unmask_handler
,
438 vfio_send_intx_eventfd
, NULL
,
439 &vdev
->ctx
[0].unmask
, fd
);
441 vfio_virqfd_disable(&vdev
->ctx
[0].unmask
);
447 static int vfio_pci_set_intx_mask(struct vfio_pci_device
*vdev
,
448 unsigned index
, unsigned start
,
449 unsigned count
, uint32_t flags
, void *data
)
451 if (!is_intx(vdev
) || start
!= 0 || count
!= 1)
454 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
455 vfio_pci_intx_mask(vdev
);
456 } else if (flags
& VFIO_IRQ_SET_DATA_BOOL
) {
457 uint8_t mask
= *(uint8_t *)data
;
459 vfio_pci_intx_mask(vdev
);
460 } else if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
461 return -ENOTTY
; /* XXX implement me */
467 static int vfio_pci_set_intx_trigger(struct vfio_pci_device
*vdev
,
468 unsigned index
, unsigned start
,
469 unsigned count
, uint32_t flags
, void *data
)
471 if (is_intx(vdev
) && !count
&& (flags
& VFIO_IRQ_SET_DATA_NONE
)) {
472 vfio_intx_disable(vdev
);
476 if (!(is_intx(vdev
) || is_irq_none(vdev
)) || start
!= 0 || count
!= 1)
479 if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
480 int32_t fd
= *(int32_t *)data
;
484 return vfio_intx_set_signal(vdev
, fd
);
486 ret
= vfio_intx_enable(vdev
);
490 ret
= vfio_intx_set_signal(vdev
, fd
);
492 vfio_intx_disable(vdev
);
500 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
501 vfio_send_intx_eventfd(vdev
, NULL
);
502 } else if (flags
& VFIO_IRQ_SET_DATA_BOOL
) {
503 uint8_t trigger
= *(uint8_t *)data
;
505 vfio_send_intx_eventfd(vdev
, NULL
);
510 static int vfio_pci_set_msi_trigger(struct vfio_pci_device
*vdev
,
511 unsigned index
, unsigned start
,
512 unsigned count
, uint32_t flags
, void *data
)
515 bool msix
= (index
== VFIO_PCI_MSIX_IRQ_INDEX
) ? true : false;
517 if (irq_is(vdev
, index
) && !count
&& (flags
& VFIO_IRQ_SET_DATA_NONE
)) {
518 vfio_msi_disable(vdev
, msix
);
522 if (!(irq_is(vdev
, index
) || is_irq_none(vdev
)))
525 if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
529 if (vdev
->irq_type
== index
)
530 return vfio_msi_set_block(vdev
, start
, count
,
533 ret
= vfio_msi_enable(vdev
, start
+ count
, msix
);
537 ret
= vfio_msi_set_block(vdev
, start
, count
, fds
, msix
);
539 vfio_msi_disable(vdev
, msix
);
544 if (!irq_is(vdev
, index
) || start
+ count
> vdev
->num_ctx
)
547 for (i
= start
; i
< start
+ count
; i
++) {
548 if (!vdev
->ctx
[i
].trigger
)
550 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
551 eventfd_signal(vdev
->ctx
[i
].trigger
, 1);
552 } else if (flags
& VFIO_IRQ_SET_DATA_BOOL
) {
553 uint8_t *bools
= data
;
554 if (bools
[i
- start
])
555 eventfd_signal(vdev
->ctx
[i
].trigger
, 1);
561 static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx
**ctx
,
562 unsigned int count
, uint32_t flags
,
565 /* DATA_NONE/DATA_BOOL enables loopback testing */
566 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
569 eventfd_signal(*ctx
, 1);
571 eventfd_ctx_put(*ctx
);
576 } else if (flags
& VFIO_IRQ_SET_DATA_BOOL
) {
582 trigger
= *(uint8_t *)data
;
584 eventfd_signal(*ctx
, 1);
587 } else if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
593 fd
= *(int32_t *)data
;
596 eventfd_ctx_put(*ctx
);
598 } else if (fd
>= 0) {
599 struct eventfd_ctx
*efdctx
;
601 efdctx
= eventfd_ctx_fdget(fd
);
603 return PTR_ERR(efdctx
);
606 eventfd_ctx_put(*ctx
);
616 static int vfio_pci_set_err_trigger(struct vfio_pci_device
*vdev
,
617 unsigned index
, unsigned start
,
618 unsigned count
, uint32_t flags
, void *data
)
620 if (index
!= VFIO_PCI_ERR_IRQ_INDEX
|| start
!= 0 || count
> 1)
623 return vfio_pci_set_ctx_trigger_single(&vdev
->err_trigger
,
627 static int vfio_pci_set_req_trigger(struct vfio_pci_device
*vdev
,
628 unsigned index
, unsigned start
,
629 unsigned count
, uint32_t flags
, void *data
)
631 if (index
!= VFIO_PCI_REQ_IRQ_INDEX
|| start
!= 0 || count
> 1)
634 return vfio_pci_set_ctx_trigger_single(&vdev
->req_trigger
,
638 int vfio_pci_set_irqs_ioctl(struct vfio_pci_device
*vdev
, uint32_t flags
,
639 unsigned index
, unsigned start
, unsigned count
,
642 int (*func
)(struct vfio_pci_device
*vdev
, unsigned index
,
643 unsigned start
, unsigned count
, uint32_t flags
,
647 case VFIO_PCI_INTX_IRQ_INDEX
:
648 switch (flags
& VFIO_IRQ_SET_ACTION_TYPE_MASK
) {
649 case VFIO_IRQ_SET_ACTION_MASK
:
650 func
= vfio_pci_set_intx_mask
;
652 case VFIO_IRQ_SET_ACTION_UNMASK
:
653 func
= vfio_pci_set_intx_unmask
;
655 case VFIO_IRQ_SET_ACTION_TRIGGER
:
656 func
= vfio_pci_set_intx_trigger
;
660 case VFIO_PCI_MSI_IRQ_INDEX
:
661 case VFIO_PCI_MSIX_IRQ_INDEX
:
662 switch (flags
& VFIO_IRQ_SET_ACTION_TYPE_MASK
) {
663 case VFIO_IRQ_SET_ACTION_MASK
:
664 case VFIO_IRQ_SET_ACTION_UNMASK
:
665 /* XXX Need masking support exported */
667 case VFIO_IRQ_SET_ACTION_TRIGGER
:
668 func
= vfio_pci_set_msi_trigger
;
672 case VFIO_PCI_ERR_IRQ_INDEX
:
673 switch (flags
& VFIO_IRQ_SET_ACTION_TYPE_MASK
) {
674 case VFIO_IRQ_SET_ACTION_TRIGGER
:
675 if (pci_is_pcie(vdev
->pdev
))
676 func
= vfio_pci_set_err_trigger
;
680 case VFIO_PCI_REQ_IRQ_INDEX
:
681 switch (flags
& VFIO_IRQ_SET_ACTION_TYPE_MASK
) {
682 case VFIO_IRQ_SET_ACTION_TRIGGER
:
683 func
= vfio_pci_set_req_trigger
;
692 return func(vdev
, index
, start
, count
, flags
, data
);