1 // SPDX-License-Identifier: GPL-2.0
3 * VFIO based Physical Subchannel device driver
5 * Copyright IBM Corp. 2017
6 * Copyright Red Hat, Inc. 2019
8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
9 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
10 * Cornelia Huck <cohuck@redhat.com>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16 #include <linux/slab.h>
17 #include <linux/uuid.h>
18 #include <linux/mdev.h>
24 #include "vfio_ccw_private.h"
26 struct workqueue_struct
*vfio_ccw_work_q
;
27 static struct kmem_cache
*vfio_ccw_io_region
;
28 static struct kmem_cache
*vfio_ccw_cmd_region
;
30 debug_info_t
*vfio_ccw_debug_msg_id
;
31 debug_info_t
*vfio_ccw_debug_trace_id
;
36 int vfio_ccw_sch_quiesce(struct subchannel
*sch
)
38 struct vfio_ccw_private
*private = dev_get_drvdata(&sch
->dev
);
39 DECLARE_COMPLETION_ONSTACK(completion
);
42 spin_lock_irq(sch
->lock
);
43 if (!sch
->schib
.pmcw
.ena
)
45 ret
= cio_disable_subchannel(sch
);
52 ret
= cio_cancel_halt_clear(sch
, &iretry
);
55 pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
56 sch
->schid
.ssid
, sch
->schid
.sch_no
);
61 * Flush all I/O and wait for
62 * cancel/halt/clear completion.
64 private->completion
= &completion
;
65 spin_unlock_irq(sch
->lock
);
68 wait_for_completion_timeout(&completion
, 3*HZ
);
70 private->completion
= NULL
;
71 flush_workqueue(vfio_ccw_work_q
);
72 spin_lock_irq(sch
->lock
);
73 ret
= cio_disable_subchannel(sch
);
74 } while (ret
== -EBUSY
);
76 private->state
= VFIO_CCW_STATE_NOT_OPER
;
77 spin_unlock_irq(sch
->lock
);
81 static void vfio_ccw_sch_io_todo(struct work_struct
*work
)
83 struct vfio_ccw_private
*private;
87 private = container_of(work
, struct vfio_ccw_private
, io_work
);
90 is_final
= !(scsw_actl(&irb
->scsw
) &
91 (SCSW_ACTL_DEVACT
| SCSW_ACTL_SCHACT
));
92 if (scsw_is_solicited(&irb
->scsw
)) {
93 cp_update_scsw(&private->cp
, &irb
->scsw
);
94 if (is_final
&& private->state
== VFIO_CCW_STATE_CP_PENDING
)
95 cp_free(&private->cp
);
97 mutex_lock(&private->io_mutex
);
98 memcpy(private->io_region
->irb_area
, irb
, sizeof(*irb
));
99 mutex_unlock(&private->io_mutex
);
101 if (private->mdev
&& is_final
)
102 private->state
= VFIO_CCW_STATE_IDLE
;
104 if (private->io_trigger
)
105 eventfd_signal(private->io_trigger
, 1);
109 * Css driver callbacks
111 static void vfio_ccw_sch_irq(struct subchannel
*sch
)
113 struct vfio_ccw_private
*private = dev_get_drvdata(&sch
->dev
);
115 inc_irq_stat(IRQIO_CIO
);
116 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT
);
119 static int vfio_ccw_sch_probe(struct subchannel
*sch
)
121 struct pmcw
*pmcw
= &sch
->schib
.pmcw
;
122 struct vfio_ccw_private
*private;
126 dev_warn(&sch
->dev
, "vfio: ccw: does not support QDIO: %s\n",
127 dev_name(&sch
->dev
));
131 private = kzalloc(sizeof(*private), GFP_KERNEL
| GFP_DMA
);
135 private->cp
.guest_cp
= kcalloc(CCWCHAIN_LEN_MAX
, sizeof(struct ccw1
),
137 if (!private->cp
.guest_cp
)
140 private->io_region
= kmem_cache_zalloc(vfio_ccw_io_region
,
141 GFP_KERNEL
| GFP_DMA
);
142 if (!private->io_region
)
145 private->cmd_region
= kmem_cache_zalloc(vfio_ccw_cmd_region
,
146 GFP_KERNEL
| GFP_DMA
);
147 if (!private->cmd_region
)
151 dev_set_drvdata(&sch
->dev
, private);
152 mutex_init(&private->io_mutex
);
154 spin_lock_irq(sch
->lock
);
155 private->state
= VFIO_CCW_STATE_NOT_OPER
;
156 sch
->isc
= VFIO_CCW_ISC
;
157 ret
= cio_enable_subchannel(sch
, (u32
)(unsigned long)sch
);
158 spin_unlock_irq(sch
->lock
);
162 INIT_WORK(&private->io_work
, vfio_ccw_sch_io_todo
);
163 atomic_set(&private->avail
, 1);
164 private->state
= VFIO_CCW_STATE_STANDBY
;
166 ret
= vfio_ccw_mdev_reg(sch
);
170 VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
171 sch
->schid
.cssid
, sch
->schid
.ssid
,
176 cio_disable_subchannel(sch
);
178 dev_set_drvdata(&sch
->dev
, NULL
);
179 if (private->cmd_region
)
180 kmem_cache_free(vfio_ccw_cmd_region
, private->cmd_region
);
181 if (private->io_region
)
182 kmem_cache_free(vfio_ccw_io_region
, private->io_region
);
183 kfree(private->cp
.guest_cp
);
188 static int vfio_ccw_sch_remove(struct subchannel
*sch
)
190 struct vfio_ccw_private
*private = dev_get_drvdata(&sch
->dev
);
192 vfio_ccw_sch_quiesce(sch
);
194 vfio_ccw_mdev_unreg(sch
);
196 dev_set_drvdata(&sch
->dev
, NULL
);
198 kmem_cache_free(vfio_ccw_cmd_region
, private->cmd_region
);
199 kmem_cache_free(vfio_ccw_io_region
, private->io_region
);
200 kfree(private->cp
.guest_cp
);
203 VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n",
204 sch
->schid
.cssid
, sch
->schid
.ssid
,
209 static void vfio_ccw_sch_shutdown(struct subchannel
*sch
)
211 vfio_ccw_sch_quiesce(sch
);
215 * vfio_ccw_sch_event - process subchannel event
217 * @process: non-zero if function is called in process context
219 * An unspecified event occurred for this subchannel. Adjust data according
220 * to the current operational state of the subchannel. Return zero when the
221 * event has been handled sufficiently or -EAGAIN when this function should
222 * be called again in process context.
224 static int vfio_ccw_sch_event(struct subchannel
*sch
, int process
)
226 struct vfio_ccw_private
*private = dev_get_drvdata(&sch
->dev
);
230 spin_lock_irqsave(sch
->lock
, flags
);
231 if (!device_is_registered(&sch
->dev
))
234 if (work_pending(&sch
->todo_work
))
237 if (cio_update_schib(sch
)) {
238 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER
);
243 private = dev_get_drvdata(&sch
->dev
);
244 if (private->state
== VFIO_CCW_STATE_NOT_OPER
) {
245 private->state
= private->mdev
? VFIO_CCW_STATE_IDLE
:
246 VFIO_CCW_STATE_STANDBY
;
251 spin_unlock_irqrestore(sch
->lock
, flags
);
256 static struct css_device_id vfio_ccw_sch_ids
[] = {
257 { .match_flags
= 0x1, .type
= SUBCHANNEL_TYPE_IO
, },
258 { /* end of list */ },
260 MODULE_DEVICE_TABLE(css
, vfio_ccw_sch_ids
);
262 static struct css_driver vfio_ccw_sch_driver
= {
265 .owner
= THIS_MODULE
,
267 .subchannel_type
= vfio_ccw_sch_ids
,
268 .irq
= vfio_ccw_sch_irq
,
269 .probe
= vfio_ccw_sch_probe
,
270 .remove
= vfio_ccw_sch_remove
,
271 .shutdown
= vfio_ccw_sch_shutdown
,
272 .sch_event
= vfio_ccw_sch_event
,
275 static int __init
vfio_ccw_debug_init(void)
277 vfio_ccw_debug_msg_id
= debug_register("vfio_ccw_msg", 16, 1,
279 if (!vfio_ccw_debug_msg_id
)
281 debug_register_view(vfio_ccw_debug_msg_id
, &debug_sprintf_view
);
282 debug_set_level(vfio_ccw_debug_msg_id
, 2);
283 vfio_ccw_debug_trace_id
= debug_register("vfio_ccw_trace", 16, 1, 16);
284 if (!vfio_ccw_debug_trace_id
)
286 debug_register_view(vfio_ccw_debug_trace_id
, &debug_hex_ascii_view
);
287 debug_set_level(vfio_ccw_debug_trace_id
, 2);
291 debug_unregister(vfio_ccw_debug_msg_id
);
292 debug_unregister(vfio_ccw_debug_trace_id
);
296 static void vfio_ccw_debug_exit(void)
298 debug_unregister(vfio_ccw_debug_msg_id
);
299 debug_unregister(vfio_ccw_debug_trace_id
);
302 static int __init
vfio_ccw_sch_init(void)
306 ret
= vfio_ccw_debug_init();
310 vfio_ccw_work_q
= create_singlethread_workqueue("vfio-ccw");
311 if (!vfio_ccw_work_q
) {
316 vfio_ccw_io_region
= kmem_cache_create_usercopy("vfio_ccw_io_region",
317 sizeof(struct ccw_io_region
), 0,
319 sizeof(struct ccw_io_region
), NULL
);
320 if (!vfio_ccw_io_region
) {
325 vfio_ccw_cmd_region
= kmem_cache_create_usercopy("vfio_ccw_cmd_region",
326 sizeof(struct ccw_cmd_region
), 0,
328 sizeof(struct ccw_cmd_region
), NULL
);
329 if (!vfio_ccw_cmd_region
) {
334 isc_register(VFIO_CCW_ISC
);
335 ret
= css_driver_register(&vfio_ccw_sch_driver
);
337 isc_unregister(VFIO_CCW_ISC
);
344 kmem_cache_destroy(vfio_ccw_cmd_region
);
345 kmem_cache_destroy(vfio_ccw_io_region
);
346 destroy_workqueue(vfio_ccw_work_q
);
347 vfio_ccw_debug_exit();
351 static void __exit
vfio_ccw_sch_exit(void)
353 css_driver_unregister(&vfio_ccw_sch_driver
);
354 isc_unregister(VFIO_CCW_ISC
);
355 kmem_cache_destroy(vfio_ccw_io_region
);
356 kmem_cache_destroy(vfio_ccw_cmd_region
);
357 destroy_workqueue(vfio_ccw_work_q
);
358 vfio_ccw_debug_exit();
360 module_init(vfio_ccw_sch_init
);
361 module_exit(vfio_ccw_sch_exit
);
363 MODULE_LICENSE("GPL v2");