vt: vt_ioctl: fix VT_DISALLOCATE freeing in-use virtual console
[linux/fpc-iii.git] / drivers / s390 / cio / vfio_ccw_drv.c
blob7a06cdff6572d8549ced8ce4575517b530ababc2
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * VFIO based Physical Subchannel device driver
5 * Copyright IBM Corp. 2017
7 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
8 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
9 */
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/device.h>
14 #include <linux/slab.h>
15 #include <linux/uuid.h>
16 #include <linux/mdev.h>
18 #include <asm/isc.h>
20 #include "ioasm.h"
21 #include "css.h"
22 #include "vfio_ccw_private.h"
24 struct workqueue_struct *vfio_ccw_work_q;
25 struct kmem_cache *vfio_ccw_io_region;
28 * Helpers
30 int vfio_ccw_sch_quiesce(struct subchannel *sch)
32 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
33 DECLARE_COMPLETION_ONSTACK(completion);
34 int iretry, ret = 0;
36 spin_lock_irq(sch->lock);
37 if (!sch->schib.pmcw.ena)
38 goto out_unlock;
39 ret = cio_disable_subchannel(sch);
40 if (ret != -EBUSY)
41 goto out_unlock;
43 iretry = 255;
44 do {
46 ret = cio_cancel_halt_clear(sch, &iretry);
48 if (ret == -EIO) {
49 pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
50 sch->schid.ssid, sch->schid.sch_no);
51 break;
55 * Flush all I/O and wait for
56 * cancel/halt/clear completion.
58 private->completion = &completion;
59 spin_unlock_irq(sch->lock);
61 if (ret == -EBUSY)
62 wait_for_completion_timeout(&completion, 3*HZ);
64 private->completion = NULL;
65 flush_workqueue(vfio_ccw_work_q);
66 spin_lock_irq(sch->lock);
67 ret = cio_disable_subchannel(sch);
68 } while (ret == -EBUSY);
69 out_unlock:
70 private->state = VFIO_CCW_STATE_NOT_OPER;
71 spin_unlock_irq(sch->lock);
72 return ret;
75 static void vfio_ccw_sch_io_todo(struct work_struct *work)
77 struct vfio_ccw_private *private;
78 struct irb *irb;
79 bool is_final;
81 private = container_of(work, struct vfio_ccw_private, io_work);
82 irb = &private->irb;
84 is_final = !(scsw_actl(&irb->scsw) &
85 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
86 if (scsw_is_solicited(&irb->scsw)) {
87 cp_update_scsw(&private->cp, &irb->scsw);
88 if (is_final)
89 cp_free(&private->cp);
91 memcpy(private->io_region->irb_area, irb, sizeof(*irb));
93 if (private->io_trigger)
94 eventfd_signal(private->io_trigger, 1);
96 if (private->mdev && is_final)
97 private->state = VFIO_CCW_STATE_IDLE;
101 * Css driver callbacks
103 static void vfio_ccw_sch_irq(struct subchannel *sch)
105 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
107 inc_irq_stat(IRQIO_CIO);
108 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
111 static int vfio_ccw_sch_probe(struct subchannel *sch)
113 struct pmcw *pmcw = &sch->schib.pmcw;
114 struct vfio_ccw_private *private;
115 int ret;
117 if (pmcw->qf) {
118 dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
119 dev_name(&sch->dev));
120 return -ENODEV;
123 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
124 if (!private)
125 return -ENOMEM;
127 private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
128 GFP_KERNEL | GFP_DMA);
129 if (!private->io_region) {
130 kfree(private);
131 return -ENOMEM;
134 private->sch = sch;
135 dev_set_drvdata(&sch->dev, private);
137 spin_lock_irq(sch->lock);
138 private->state = VFIO_CCW_STATE_NOT_OPER;
139 sch->isc = VFIO_CCW_ISC;
140 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
141 spin_unlock_irq(sch->lock);
142 if (ret)
143 goto out_free;
145 ret = vfio_ccw_mdev_reg(sch);
146 if (ret)
147 goto out_disable;
149 INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
150 atomic_set(&private->avail, 1);
151 private->state = VFIO_CCW_STATE_STANDBY;
153 return 0;
155 out_disable:
156 cio_disable_subchannel(sch);
157 out_free:
158 dev_set_drvdata(&sch->dev, NULL);
159 kmem_cache_free(vfio_ccw_io_region, private->io_region);
160 kfree(private);
161 return ret;
164 static int vfio_ccw_sch_remove(struct subchannel *sch)
166 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
168 vfio_ccw_sch_quiesce(sch);
170 vfio_ccw_mdev_unreg(sch);
172 dev_set_drvdata(&sch->dev, NULL);
174 kmem_cache_free(vfio_ccw_io_region, private->io_region);
175 kfree(private);
177 return 0;
180 static void vfio_ccw_sch_shutdown(struct subchannel *sch)
182 vfio_ccw_sch_quiesce(sch);
186 * vfio_ccw_sch_event - process subchannel event
187 * @sch: subchannel
188 * @process: non-zero if function is called in process context
190 * An unspecified event occurred for this subchannel. Adjust data according
191 * to the current operational state of the subchannel. Return zero when the
192 * event has been handled sufficiently or -EAGAIN when this function should
193 * be called again in process context.
195 static int vfio_ccw_sch_event(struct subchannel *sch, int process)
197 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
198 unsigned long flags;
199 int rc = -EAGAIN;
201 spin_lock_irqsave(sch->lock, flags);
202 if (!device_is_registered(&sch->dev))
203 goto out_unlock;
205 if (work_pending(&sch->todo_work))
206 goto out_unlock;
208 if (cio_update_schib(sch)) {
209 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
210 rc = 0;
211 goto out_unlock;
214 private = dev_get_drvdata(&sch->dev);
215 if (private->state == VFIO_CCW_STATE_NOT_OPER) {
216 private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
217 VFIO_CCW_STATE_STANDBY;
219 rc = 0;
221 out_unlock:
222 spin_unlock_irqrestore(sch->lock, flags);
224 return rc;
227 static struct css_device_id vfio_ccw_sch_ids[] = {
228 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
229 { /* end of list */ },
231 MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids);
233 static struct css_driver vfio_ccw_sch_driver = {
234 .drv = {
235 .name = "vfio_ccw",
236 .owner = THIS_MODULE,
238 .subchannel_type = vfio_ccw_sch_ids,
239 .irq = vfio_ccw_sch_irq,
240 .probe = vfio_ccw_sch_probe,
241 .remove = vfio_ccw_sch_remove,
242 .shutdown = vfio_ccw_sch_shutdown,
243 .sch_event = vfio_ccw_sch_event,
246 static int __init vfio_ccw_sch_init(void)
248 int ret;
250 vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
251 if (!vfio_ccw_work_q)
252 return -ENOMEM;
254 vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
255 sizeof(struct ccw_io_region), 0,
256 SLAB_ACCOUNT, 0,
257 sizeof(struct ccw_io_region), NULL);
258 if (!vfio_ccw_io_region) {
259 destroy_workqueue(vfio_ccw_work_q);
260 return -ENOMEM;
263 isc_register(VFIO_CCW_ISC);
264 ret = css_driver_register(&vfio_ccw_sch_driver);
265 if (ret) {
266 isc_unregister(VFIO_CCW_ISC);
267 kmem_cache_destroy(vfio_ccw_io_region);
268 destroy_workqueue(vfio_ccw_work_q);
271 return ret;
274 static void __exit vfio_ccw_sch_exit(void)
276 css_driver_unregister(&vfio_ccw_sch_driver);
277 isc_unregister(VFIO_CCW_ISC);
278 kmem_cache_destroy(vfio_ccw_io_region);
279 destroy_workqueue(vfio_ccw_work_q);
281 module_init(vfio_ccw_sch_init);
282 module_exit(vfio_ccw_sch_exit);
284 MODULE_LICENSE("GPL v2");