2 * VFIO PCI interrupt handling
4 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Derived from original vfio:
12 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
13 * Author: Tom Lyon, pugs@cisco.com
16 #include <linux/device.h>
17 #include <linux/interrupt.h>
18 #include <linux/eventfd.h>
19 #include <linux/msi.h>
20 #include <linux/pci.h>
21 #include <linux/file.h>
22 #include <linux/vfio.h>
23 #include <linux/wait.h>
24 #include <linux/slab.h>
26 #include "vfio_pci_private.h"
31 static void vfio_send_intx_eventfd(void *opaque
, void *unused
)
33 struct vfio_pci_device
*vdev
= opaque
;
35 if (likely(is_intx(vdev
) && !vdev
->virq_disabled
))
36 eventfd_signal(vdev
->ctx
[0].trigger
, 1);
39 void vfio_pci_intx_mask(struct vfio_pci_device
*vdev
)
41 struct pci_dev
*pdev
= vdev
->pdev
;
44 spin_lock_irqsave(&vdev
->irqlock
, flags
);
47 * Masking can come from interrupt, ioctl, or config space
48 * via INTx disable. The latter means this can get called
49 * even when not using intx delivery. In this case, just
50 * try to have the physical bit follow the virtual bit.
52 if (unlikely(!is_intx(vdev
))) {
55 } else if (!vdev
->ctx
[0].masked
) {
57 * Can't use check_and_mask here because we always want to
58 * mask, not just when something is pending.
63 disable_irq_nosync(pdev
->irq
);
65 vdev
->ctx
[0].masked
= true;
68 spin_unlock_irqrestore(&vdev
->irqlock
, flags
);
72 * If this is triggered by an eventfd, we can't call eventfd_signal
73 * or else we'll deadlock on the eventfd wait queue. Return >0 when
74 * a signal is necessary, which can then be handled via a work queue
75 * or directly depending on the caller.
77 static int vfio_pci_intx_unmask_handler(void *opaque
, void *unused
)
79 struct vfio_pci_device
*vdev
= opaque
;
80 struct pci_dev
*pdev
= vdev
->pdev
;
84 spin_lock_irqsave(&vdev
->irqlock
, flags
);
87 * Unmasking comes from ioctl or config, so again, have the
88 * physical bit follow the virtual even when not using INTx.
90 if (unlikely(!is_intx(vdev
))) {
93 } else if (vdev
->ctx
[0].masked
&& !vdev
->virq_disabled
) {
95 * A pending interrupt here would immediately trigger,
96 * but we can avoid that overhead by just re-sending
97 * the interrupt to the user.
100 if (!pci_check_and_unmask_intx(pdev
))
103 enable_irq(pdev
->irq
);
105 vdev
->ctx
[0].masked
= (ret
> 0);
108 spin_unlock_irqrestore(&vdev
->irqlock
, flags
);
113 void vfio_pci_intx_unmask(struct vfio_pci_device
*vdev
)
115 if (vfio_pci_intx_unmask_handler(vdev
, NULL
) > 0)
116 vfio_send_intx_eventfd(vdev
, NULL
);
119 static irqreturn_t
vfio_intx_handler(int irq
, void *dev_id
)
121 struct vfio_pci_device
*vdev
= dev_id
;
125 spin_lock_irqsave(&vdev
->irqlock
, flags
);
127 if (!vdev
->pci_2_3
) {
128 disable_irq_nosync(vdev
->pdev
->irq
);
129 vdev
->ctx
[0].masked
= true;
131 } else if (!vdev
->ctx
[0].masked
&& /* may be shared */
132 pci_check_and_mask_intx(vdev
->pdev
)) {
133 vdev
->ctx
[0].masked
= true;
137 spin_unlock_irqrestore(&vdev
->irqlock
, flags
);
139 if (ret
== IRQ_HANDLED
)
140 vfio_send_intx_eventfd(vdev
, NULL
);
145 static int vfio_intx_enable(struct vfio_pci_device
*vdev
)
147 if (!is_irq_none(vdev
))
150 if (!vdev
->pdev
->irq
)
153 vdev
->ctx
= kzalloc(sizeof(struct vfio_pci_irq_ctx
), GFP_KERNEL
);
160 * If the virtual interrupt is masked, restore it. Devices
161 * supporting DisINTx can be masked at the hardware level
162 * here, non-PCI-2.3 devices will have to wait until the
163 * interrupt is enabled.
165 vdev
->ctx
[0].masked
= vdev
->virq_disabled
;
167 pci_intx(vdev
->pdev
, !vdev
->ctx
[0].masked
);
169 vdev
->irq_type
= VFIO_PCI_INTX_IRQ_INDEX
;
174 static int vfio_intx_set_signal(struct vfio_pci_device
*vdev
, int fd
)
176 struct pci_dev
*pdev
= vdev
->pdev
;
177 unsigned long irqflags
= IRQF_SHARED
;
178 struct eventfd_ctx
*trigger
;
182 if (vdev
->ctx
[0].trigger
) {
183 free_irq(pdev
->irq
, vdev
);
184 kfree(vdev
->ctx
[0].name
);
185 eventfd_ctx_put(vdev
->ctx
[0].trigger
);
186 vdev
->ctx
[0].trigger
= NULL
;
189 if (fd
< 0) /* Disable only */
192 vdev
->ctx
[0].name
= kasprintf(GFP_KERNEL
, "vfio-intx(%s)",
194 if (!vdev
->ctx
[0].name
)
197 trigger
= eventfd_ctx_fdget(fd
);
198 if (IS_ERR(trigger
)) {
199 kfree(vdev
->ctx
[0].name
);
200 return PTR_ERR(trigger
);
203 vdev
->ctx
[0].trigger
= trigger
;
208 ret
= request_irq(pdev
->irq
, vfio_intx_handler
,
209 irqflags
, vdev
->ctx
[0].name
, vdev
);
211 vdev
->ctx
[0].trigger
= NULL
;
212 kfree(vdev
->ctx
[0].name
);
213 eventfd_ctx_put(trigger
);
218 * INTx disable will stick across the new irq setup,
221 spin_lock_irqsave(&vdev
->irqlock
, flags
);
222 if (!vdev
->pci_2_3
&& vdev
->ctx
[0].masked
)
223 disable_irq_nosync(pdev
->irq
);
224 spin_unlock_irqrestore(&vdev
->irqlock
, flags
);
229 static void vfio_intx_disable(struct vfio_pci_device
*vdev
)
231 vfio_intx_set_signal(vdev
, -1);
232 vfio_virqfd_disable(&vdev
->ctx
[0].unmask
);
233 vfio_virqfd_disable(&vdev
->ctx
[0].mask
);
234 vdev
->irq_type
= VFIO_PCI_NUM_IRQS
;
242 static irqreturn_t
vfio_msihandler(int irq
, void *arg
)
244 struct eventfd_ctx
*trigger
= arg
;
246 eventfd_signal(trigger
, 1);
250 static int vfio_msi_enable(struct vfio_pci_device
*vdev
, int nvec
, bool msix
)
252 struct pci_dev
*pdev
= vdev
->pdev
;
255 if (!is_irq_none(vdev
))
258 vdev
->ctx
= kzalloc(nvec
* sizeof(struct vfio_pci_irq_ctx
), GFP_KERNEL
);
265 vdev
->msix
= kzalloc(nvec
* sizeof(struct msix_entry
),
272 for (i
= 0; i
< nvec
; i
++)
273 vdev
->msix
[i
].entry
= i
;
275 ret
= pci_enable_msix_range(pdev
, vdev
->msix
, 1, nvec
);
278 pci_disable_msix(pdev
);
284 ret
= pci_enable_msi_range(pdev
, 1, nvec
);
287 pci_disable_msi(pdev
);
293 vdev
->num_ctx
= nvec
;
294 vdev
->irq_type
= msix
? VFIO_PCI_MSIX_IRQ_INDEX
:
295 VFIO_PCI_MSI_IRQ_INDEX
;
299 * Compute the virtual hardware field for max msi vectors -
300 * it is the log base 2 of the number of vectors.
302 vdev
->msi_qmax
= fls(nvec
* 2 - 1) - 1;
308 static int vfio_msi_set_vector_signal(struct vfio_pci_device
*vdev
,
309 int vector
, int fd
, bool msix
)
311 struct pci_dev
*pdev
= vdev
->pdev
;
312 int irq
= msix
? vdev
->msix
[vector
].vector
: pdev
->irq
+ vector
;
313 char *name
= msix
? "vfio-msix" : "vfio-msi";
314 struct eventfd_ctx
*trigger
;
317 if (vector
>= vdev
->num_ctx
)
320 if (vdev
->ctx
[vector
].trigger
) {
321 free_irq(irq
, vdev
->ctx
[vector
].trigger
);
322 irq_bypass_unregister_producer(&vdev
->ctx
[vector
].producer
);
323 kfree(vdev
->ctx
[vector
].name
);
324 eventfd_ctx_put(vdev
->ctx
[vector
].trigger
);
325 vdev
->ctx
[vector
].trigger
= NULL
;
331 vdev
->ctx
[vector
].name
= kasprintf(GFP_KERNEL
, "%s[%d](%s)",
332 name
, vector
, pci_name(pdev
));
333 if (!vdev
->ctx
[vector
].name
)
336 trigger
= eventfd_ctx_fdget(fd
);
337 if (IS_ERR(trigger
)) {
338 kfree(vdev
->ctx
[vector
].name
);
339 return PTR_ERR(trigger
);
343 * The MSIx vector table resides in device memory which may be cleared
344 * via backdoor resets. We don't allow direct access to the vector
345 * table so even if a userspace driver attempts to save/restore around
346 * such a reset it would be unsuccessful. To avoid this, restore the
347 * cached value of the message prior to enabling.
352 get_cached_msi_msg(irq
, &msg
);
353 pci_write_msi_msg(irq
, &msg
);
356 ret
= request_irq(irq
, vfio_msihandler
, 0,
357 vdev
->ctx
[vector
].name
, trigger
);
359 kfree(vdev
->ctx
[vector
].name
);
360 eventfd_ctx_put(trigger
);
364 vdev
->ctx
[vector
].producer
.token
= trigger
;
365 vdev
->ctx
[vector
].producer
.irq
= irq
;
366 ret
= irq_bypass_register_producer(&vdev
->ctx
[vector
].producer
);
369 "irq bypass producer (token %p) registration fails: %d\n",
370 vdev
->ctx
[vector
].producer
.token
, ret
);
372 vdev
->ctx
[vector
].trigger
= trigger
;
377 static int vfio_msi_set_block(struct vfio_pci_device
*vdev
, unsigned start
,
378 unsigned count
, int32_t *fds
, bool msix
)
382 if (start
+ count
> vdev
->num_ctx
)
385 for (i
= 0, j
= start
; i
< count
&& !ret
; i
++, j
++) {
386 int fd
= fds
? fds
[i
] : -1;
387 ret
= vfio_msi_set_vector_signal(vdev
, j
, fd
, msix
);
391 for (--j
; j
>= start
; j
--)
392 vfio_msi_set_vector_signal(vdev
, j
, -1, msix
);
398 static void vfio_msi_disable(struct vfio_pci_device
*vdev
, bool msix
)
400 struct pci_dev
*pdev
= vdev
->pdev
;
403 vfio_msi_set_block(vdev
, 0, vdev
->num_ctx
, NULL
, msix
);
405 for (i
= 0; i
< vdev
->num_ctx
; i
++) {
406 vfio_virqfd_disable(&vdev
->ctx
[i
].unmask
);
407 vfio_virqfd_disable(&vdev
->ctx
[i
].mask
);
411 pci_disable_msix(vdev
->pdev
);
414 pci_disable_msi(pdev
);
416 vdev
->irq_type
= VFIO_PCI_NUM_IRQS
;
424 static int vfio_pci_set_intx_unmask(struct vfio_pci_device
*vdev
,
425 unsigned index
, unsigned start
,
426 unsigned count
, uint32_t flags
, void *data
)
428 if (!is_intx(vdev
) || start
!= 0 || count
!= 1)
431 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
432 vfio_pci_intx_unmask(vdev
);
433 } else if (flags
& VFIO_IRQ_SET_DATA_BOOL
) {
434 uint8_t unmask
= *(uint8_t *)data
;
436 vfio_pci_intx_unmask(vdev
);
437 } else if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
438 int32_t fd
= *(int32_t *)data
;
440 return vfio_virqfd_enable((void *) vdev
,
441 vfio_pci_intx_unmask_handler
,
442 vfio_send_intx_eventfd
, NULL
,
443 &vdev
->ctx
[0].unmask
, fd
);
445 vfio_virqfd_disable(&vdev
->ctx
[0].unmask
);
451 static int vfio_pci_set_intx_mask(struct vfio_pci_device
*vdev
,
452 unsigned index
, unsigned start
,
453 unsigned count
, uint32_t flags
, void *data
)
455 if (!is_intx(vdev
) || start
!= 0 || count
!= 1)
458 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
459 vfio_pci_intx_mask(vdev
);
460 } else if (flags
& VFIO_IRQ_SET_DATA_BOOL
) {
461 uint8_t mask
= *(uint8_t *)data
;
463 vfio_pci_intx_mask(vdev
);
464 } else if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
465 return -ENOTTY
; /* XXX implement me */
471 static int vfio_pci_set_intx_trigger(struct vfio_pci_device
*vdev
,
472 unsigned index
, unsigned start
,
473 unsigned count
, uint32_t flags
, void *data
)
475 if (is_intx(vdev
) && !count
&& (flags
& VFIO_IRQ_SET_DATA_NONE
)) {
476 vfio_intx_disable(vdev
);
480 if (!(is_intx(vdev
) || is_irq_none(vdev
)) || start
!= 0 || count
!= 1)
483 if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
484 int32_t fd
= *(int32_t *)data
;
488 return vfio_intx_set_signal(vdev
, fd
);
490 ret
= vfio_intx_enable(vdev
);
494 ret
= vfio_intx_set_signal(vdev
, fd
);
496 vfio_intx_disable(vdev
);
504 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
505 vfio_send_intx_eventfd(vdev
, NULL
);
506 } else if (flags
& VFIO_IRQ_SET_DATA_BOOL
) {
507 uint8_t trigger
= *(uint8_t *)data
;
509 vfio_send_intx_eventfd(vdev
, NULL
);
514 static int vfio_pci_set_msi_trigger(struct vfio_pci_device
*vdev
,
515 unsigned index
, unsigned start
,
516 unsigned count
, uint32_t flags
, void *data
)
519 bool msix
= (index
== VFIO_PCI_MSIX_IRQ_INDEX
) ? true : false;
521 if (irq_is(vdev
, index
) && !count
&& (flags
& VFIO_IRQ_SET_DATA_NONE
)) {
522 vfio_msi_disable(vdev
, msix
);
526 if (!(irq_is(vdev
, index
) || is_irq_none(vdev
)))
529 if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
533 if (vdev
->irq_type
== index
)
534 return vfio_msi_set_block(vdev
, start
, count
,
537 ret
= vfio_msi_enable(vdev
, start
+ count
, msix
);
541 ret
= vfio_msi_set_block(vdev
, start
, count
, fds
, msix
);
543 vfio_msi_disable(vdev
, msix
);
548 if (!irq_is(vdev
, index
) || start
+ count
> vdev
->num_ctx
)
551 for (i
= start
; i
< start
+ count
; i
++) {
552 if (!vdev
->ctx
[i
].trigger
)
554 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
555 eventfd_signal(vdev
->ctx
[i
].trigger
, 1);
556 } else if (flags
& VFIO_IRQ_SET_DATA_BOOL
) {
557 uint8_t *bools
= data
;
558 if (bools
[i
- start
])
559 eventfd_signal(vdev
->ctx
[i
].trigger
, 1);
565 static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx
**ctx
,
566 uint32_t flags
, void *data
)
568 int32_t fd
= *(int32_t *)data
;
570 if (!(flags
& VFIO_IRQ_SET_DATA_TYPE_MASK
))
573 /* DATA_NONE/DATA_BOOL enables loopback testing */
574 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
576 eventfd_signal(*ctx
, 1);
578 } else if (flags
& VFIO_IRQ_SET_DATA_BOOL
) {
579 uint8_t trigger
= *(uint8_t *)data
;
581 eventfd_signal(*ctx
, 1);
585 /* Handle SET_DATA_EVENTFD */
588 eventfd_ctx_put(*ctx
);
591 } else if (fd
>= 0) {
592 struct eventfd_ctx
*efdctx
;
593 efdctx
= eventfd_ctx_fdget(fd
);
595 return PTR_ERR(efdctx
);
597 eventfd_ctx_put(*ctx
);
604 static int vfio_pci_set_err_trigger(struct vfio_pci_device
*vdev
,
605 unsigned index
, unsigned start
,
606 unsigned count
, uint32_t flags
, void *data
)
608 if (index
!= VFIO_PCI_ERR_IRQ_INDEX
)
612 * We should sanitize start & count, but that wasn't caught
613 * originally, so this IRQ index must forever ignore them :-(
616 return vfio_pci_set_ctx_trigger_single(&vdev
->err_trigger
, flags
, data
);
619 static int vfio_pci_set_req_trigger(struct vfio_pci_device
*vdev
,
620 unsigned index
, unsigned start
,
621 unsigned count
, uint32_t flags
, void *data
)
623 if (index
!= VFIO_PCI_REQ_IRQ_INDEX
|| start
!= 0 || count
!= 1)
626 return vfio_pci_set_ctx_trigger_single(&vdev
->req_trigger
, flags
, data
);
629 int vfio_pci_set_irqs_ioctl(struct vfio_pci_device
*vdev
, uint32_t flags
,
630 unsigned index
, unsigned start
, unsigned count
,
633 int (*func
)(struct vfio_pci_device
*vdev
, unsigned index
,
634 unsigned start
, unsigned count
, uint32_t flags
,
638 case VFIO_PCI_INTX_IRQ_INDEX
:
639 switch (flags
& VFIO_IRQ_SET_ACTION_TYPE_MASK
) {
640 case VFIO_IRQ_SET_ACTION_MASK
:
641 func
= vfio_pci_set_intx_mask
;
643 case VFIO_IRQ_SET_ACTION_UNMASK
:
644 func
= vfio_pci_set_intx_unmask
;
646 case VFIO_IRQ_SET_ACTION_TRIGGER
:
647 func
= vfio_pci_set_intx_trigger
;
651 case VFIO_PCI_MSI_IRQ_INDEX
:
652 case VFIO_PCI_MSIX_IRQ_INDEX
:
653 switch (flags
& VFIO_IRQ_SET_ACTION_TYPE_MASK
) {
654 case VFIO_IRQ_SET_ACTION_MASK
:
655 case VFIO_IRQ_SET_ACTION_UNMASK
:
656 /* XXX Need masking support exported */
658 case VFIO_IRQ_SET_ACTION_TRIGGER
:
659 func
= vfio_pci_set_msi_trigger
;
663 case VFIO_PCI_ERR_IRQ_INDEX
:
664 switch (flags
& VFIO_IRQ_SET_ACTION_TYPE_MASK
) {
665 case VFIO_IRQ_SET_ACTION_TRIGGER
:
666 if (pci_is_pcie(vdev
->pdev
))
667 func
= vfio_pci_set_err_trigger
;
671 case VFIO_PCI_REQ_IRQ_INDEX
:
672 switch (flags
& VFIO_IRQ_SET_ACTION_TYPE_MASK
) {
673 case VFIO_IRQ_SET_ACTION_TRIGGER
:
674 func
= vfio_pci_set_req_trigger
;
683 return func(vdev
, index
, start
, count
, flags
, data
);