1 // SPDX-License-Identifier: GPL-2.0-only
3 * VFIO platform devices interrupt handling
5 * Copyright (C) 2013 - Virtual Open Systems
6 * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
9 #include <linux/eventfd.h>
10 #include <linux/interrupt.h>
11 #include <linux/slab.h>
12 #include <linux/types.h>
13 #include <linux/vfio.h>
14 #include <linux/irq.h>
16 #include "vfio_platform_private.h"
18 static void vfio_platform_mask(struct vfio_platform_irq
*irq_ctx
)
22 spin_lock_irqsave(&irq_ctx
->lock
, flags
);
24 if (!irq_ctx
->masked
) {
25 disable_irq_nosync(irq_ctx
->hwirq
);
26 irq_ctx
->masked
= true;
29 spin_unlock_irqrestore(&irq_ctx
->lock
, flags
);
32 static int vfio_platform_mask_handler(void *opaque
, void *unused
)
34 struct vfio_platform_irq
*irq_ctx
= opaque
;
36 vfio_platform_mask(irq_ctx
);
41 static int vfio_platform_set_irq_mask(struct vfio_platform_device
*vdev
,
42 unsigned index
, unsigned start
,
43 unsigned count
, uint32_t flags
,
46 if (start
!= 0 || count
!= 1)
49 if (!(vdev
->irqs
[index
].flags
& VFIO_IRQ_INFO_MASKABLE
))
52 if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
53 int32_t fd
= *(int32_t *)data
;
56 return vfio_virqfd_enable((void *) &vdev
->irqs
[index
],
57 vfio_platform_mask_handler
,
59 &vdev
->irqs
[index
].mask
, fd
);
61 vfio_virqfd_disable(&vdev
->irqs
[index
].mask
);
65 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
66 vfio_platform_mask(&vdev
->irqs
[index
]);
68 } else if (flags
& VFIO_IRQ_SET_DATA_BOOL
) {
69 uint8_t mask
= *(uint8_t *)data
;
72 vfio_platform_mask(&vdev
->irqs
[index
]);
78 static void vfio_platform_unmask(struct vfio_platform_irq
*irq_ctx
)
82 spin_lock_irqsave(&irq_ctx
->lock
, flags
);
84 if (irq_ctx
->masked
) {
85 enable_irq(irq_ctx
->hwirq
);
86 irq_ctx
->masked
= false;
89 spin_unlock_irqrestore(&irq_ctx
->lock
, flags
);
92 static int vfio_platform_unmask_handler(void *opaque
, void *unused
)
94 struct vfio_platform_irq
*irq_ctx
= opaque
;
96 vfio_platform_unmask(irq_ctx
);
101 static int vfio_platform_set_irq_unmask(struct vfio_platform_device
*vdev
,
102 unsigned index
, unsigned start
,
103 unsigned count
, uint32_t flags
,
106 if (start
!= 0 || count
!= 1)
109 if (!(vdev
->irqs
[index
].flags
& VFIO_IRQ_INFO_MASKABLE
))
112 if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
113 int32_t fd
= *(int32_t *)data
;
116 return vfio_virqfd_enable((void *) &vdev
->irqs
[index
],
117 vfio_platform_unmask_handler
,
119 &vdev
->irqs
[index
].unmask
,
122 vfio_virqfd_disable(&vdev
->irqs
[index
].unmask
);
126 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
127 vfio_platform_unmask(&vdev
->irqs
[index
]);
129 } else if (flags
& VFIO_IRQ_SET_DATA_BOOL
) {
130 uint8_t unmask
= *(uint8_t *)data
;
133 vfio_platform_unmask(&vdev
->irqs
[index
]);
140 * The trigger eventfd is guaranteed valid in the interrupt path
141 * and protected by the igate mutex when triggered via ioctl.
143 static void vfio_send_eventfd(struct vfio_platform_irq
*irq_ctx
)
145 if (likely(irq_ctx
->trigger
))
146 eventfd_signal(irq_ctx
->trigger
);
149 static irqreturn_t
vfio_automasked_irq_handler(int irq
, void *dev_id
)
151 struct vfio_platform_irq
*irq_ctx
= dev_id
;
155 spin_lock_irqsave(&irq_ctx
->lock
, flags
);
157 if (!irq_ctx
->masked
) {
160 /* automask maskable interrupts */
161 disable_irq_nosync(irq_ctx
->hwirq
);
162 irq_ctx
->masked
= true;
165 spin_unlock_irqrestore(&irq_ctx
->lock
, flags
);
167 if (ret
== IRQ_HANDLED
)
168 vfio_send_eventfd(irq_ctx
);
173 static irqreturn_t
vfio_irq_handler(int irq
, void *dev_id
)
175 struct vfio_platform_irq
*irq_ctx
= dev_id
;
177 vfio_send_eventfd(irq_ctx
);
182 static int vfio_set_trigger(struct vfio_platform_device
*vdev
, int index
,
185 struct vfio_platform_irq
*irq
= &vdev
->irqs
[index
];
186 struct eventfd_ctx
*trigger
;
189 disable_irq(irq
->hwirq
);
190 eventfd_ctx_put(irq
->trigger
);
194 if (fd
< 0) /* Disable only */
197 trigger
= eventfd_ctx_fdget(fd
);
199 return PTR_ERR(trigger
);
201 irq
->trigger
= trigger
;
204 * irq->masked effectively provides nested disables within the overall
205 * enable relative to trigger. Specifically request_irq() is called
206 * with NO_AUTOEN, therefore the IRQ is initially disabled. The user
207 * may only further disable the IRQ with a MASK operations because
208 * irq->masked is initially false.
210 enable_irq(irq
->hwirq
);
215 static int vfio_platform_set_irq_trigger(struct vfio_platform_device
*vdev
,
216 unsigned index
, unsigned start
,
217 unsigned count
, uint32_t flags
,
220 struct vfio_platform_irq
*irq
= &vdev
->irqs
[index
];
221 irq_handler_t handler
;
223 if (vdev
->irqs
[index
].flags
& VFIO_IRQ_INFO_AUTOMASKED
)
224 handler
= vfio_automasked_irq_handler
;
226 handler
= vfio_irq_handler
;
228 if (!count
&& (flags
& VFIO_IRQ_SET_DATA_NONE
))
229 return vfio_set_trigger(vdev
, index
, -1);
231 if (start
!= 0 || count
!= 1)
234 if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
235 int32_t fd
= *(int32_t *)data
;
237 return vfio_set_trigger(vdev
, index
, fd
);
240 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
241 handler(irq
->hwirq
, irq
);
243 } else if (flags
& VFIO_IRQ_SET_DATA_BOOL
) {
244 uint8_t trigger
= *(uint8_t *)data
;
247 handler(irq
->hwirq
, irq
);
253 int vfio_platform_set_irqs_ioctl(struct vfio_platform_device
*vdev
,
254 uint32_t flags
, unsigned index
, unsigned start
,
255 unsigned count
, void *data
)
257 int (*func
)(struct vfio_platform_device
*vdev
, unsigned index
,
258 unsigned start
, unsigned count
, uint32_t flags
,
262 * For compatibility, errors from request_irq() are local to the
263 * SET_IRQS path and reflected in the name pointer. This allows,
264 * for example, polling mode fallback for an exclusive IRQ failure.
266 if (IS_ERR(vdev
->irqs
[index
].name
))
267 return PTR_ERR(vdev
->irqs
[index
].name
);
269 switch (flags
& VFIO_IRQ_SET_ACTION_TYPE_MASK
) {
270 case VFIO_IRQ_SET_ACTION_MASK
:
271 func
= vfio_platform_set_irq_mask
;
273 case VFIO_IRQ_SET_ACTION_UNMASK
:
274 func
= vfio_platform_set_irq_unmask
;
276 case VFIO_IRQ_SET_ACTION_TRIGGER
:
277 func
= vfio_platform_set_irq_trigger
;
284 return func(vdev
, index
, start
, count
, flags
, data
);
287 int vfio_platform_irq_init(struct vfio_platform_device
*vdev
)
289 int cnt
= 0, i
, ret
= 0;
291 while (vdev
->get_irq(vdev
, cnt
) >= 0)
294 vdev
->irqs
= kcalloc(cnt
, sizeof(struct vfio_platform_irq
),
299 for (i
= 0; i
< cnt
; i
++) {
300 int hwirq
= vdev
->get_irq(vdev
, i
);
301 irq_handler_t handler
= vfio_irq_handler
;
308 spin_lock_init(&vdev
->irqs
[i
].lock
);
310 vdev
->irqs
[i
].flags
= VFIO_IRQ_INFO_EVENTFD
;
312 if (irq_get_trigger_type(hwirq
) & IRQ_TYPE_LEVEL_MASK
) {
313 vdev
->irqs
[i
].flags
|= VFIO_IRQ_INFO_MASKABLE
314 | VFIO_IRQ_INFO_AUTOMASKED
;
315 handler
= vfio_automasked_irq_handler
;
318 vdev
->irqs
[i
].count
= 1;
319 vdev
->irqs
[i
].hwirq
= hwirq
;
320 vdev
->irqs
[i
].masked
= false;
321 vdev
->irqs
[i
].name
= kasprintf(GFP_KERNEL_ACCOUNT
,
322 "vfio-irq[%d](%s)", hwirq
,
324 if (!vdev
->irqs
[i
].name
) {
329 ret
= request_irq(hwirq
, handler
, IRQF_NO_AUTOEN
,
330 vdev
->irqs
[i
].name
, &vdev
->irqs
[i
]);
332 kfree(vdev
->irqs
[i
].name
);
333 vdev
->irqs
[i
].name
= ERR_PTR(ret
);
337 vdev
->num_irqs
= cnt
;
341 for (--i
; i
>= 0; i
--) {
342 if (!IS_ERR(vdev
->irqs
[i
].name
)) {
343 free_irq(vdev
->irqs
[i
].hwirq
, &vdev
->irqs
[i
]);
344 kfree(vdev
->irqs
[i
].name
);
351 void vfio_platform_irq_cleanup(struct vfio_platform_device
*vdev
)
355 for (i
= 0; i
< vdev
->num_irqs
; i
++) {
356 vfio_virqfd_disable(&vdev
->irqs
[i
].mask
);
357 vfio_virqfd_disable(&vdev
->irqs
[i
].unmask
);
358 if (!IS_ERR(vdev
->irqs
[i
].name
)) {
359 free_irq(vdev
->irqs
[i
].hwirq
, &vdev
->irqs
[i
]);
360 if (vdev
->irqs
[i
].trigger
)
361 eventfd_ctx_put(vdev
->irqs
[i
].trigger
);
362 kfree(vdev
->irqs
[i
].name
);