x86/unwinder: Handle stack overflows more gracefully
[linux/fpc-iii.git] / drivers / s390 / cio / vfio_ccw_drv.c
blob82f05c4b8c526f73a52aed819eacb33cdfd18e3a
1 /*
2 * VFIO based Physical Subchannel device driver
4 * Copyright IBM Corp. 2017
6 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
7 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
8 */
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/device.h>
13 #include <linux/slab.h>
14 #include <linux/uuid.h>
15 #include <linux/mdev.h>
17 #include <asm/isc.h>
19 #include "ioasm.h"
20 #include "css.h"
21 #include "vfio_ccw_private.h"
23 struct workqueue_struct *vfio_ccw_work_q;
26 * Helpers
28 int vfio_ccw_sch_quiesce(struct subchannel *sch)
30 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
31 DECLARE_COMPLETION_ONSTACK(completion);
32 int iretry, ret = 0;
34 spin_lock_irq(sch->lock);
35 if (!sch->schib.pmcw.ena)
36 goto out_unlock;
37 ret = cio_disable_subchannel(sch);
38 if (ret != -EBUSY)
39 goto out_unlock;
41 do {
42 iretry = 255;
44 ret = cio_cancel_halt_clear(sch, &iretry);
45 while (ret == -EBUSY) {
47 * Flush all I/O and wait for
48 * cancel/halt/clear completion.
50 private->completion = &completion;
51 spin_unlock_irq(sch->lock);
53 wait_for_completion_timeout(&completion, 3*HZ);
55 spin_lock_irq(sch->lock);
56 private->completion = NULL;
57 flush_workqueue(vfio_ccw_work_q);
58 ret = cio_cancel_halt_clear(sch, &iretry);
61 ret = cio_disable_subchannel(sch);
62 } while (ret == -EBUSY);
63 out_unlock:
64 private->state = VFIO_CCW_STATE_NOT_OPER;
65 spin_unlock_irq(sch->lock);
66 return ret;
69 static void vfio_ccw_sch_io_todo(struct work_struct *work)
71 struct vfio_ccw_private *private;
72 struct irb *irb;
74 private = container_of(work, struct vfio_ccw_private, io_work);
75 irb = &private->irb;
77 if (scsw_is_solicited(&irb->scsw)) {
78 cp_update_scsw(&private->cp, &irb->scsw);
79 cp_free(&private->cp);
81 memcpy(private->io_region.irb_area, irb, sizeof(*irb));
83 if (private->io_trigger)
84 eventfd_signal(private->io_trigger, 1);
86 if (private->mdev)
87 private->state = VFIO_CCW_STATE_IDLE;
91 * Css driver callbacks
93 static void vfio_ccw_sch_irq(struct subchannel *sch)
95 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
97 inc_irq_stat(IRQIO_CIO);
98 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
101 static int vfio_ccw_sch_probe(struct subchannel *sch)
103 struct pmcw *pmcw = &sch->schib.pmcw;
104 struct vfio_ccw_private *private;
105 int ret;
107 if (pmcw->qf) {
108 dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
109 dev_name(&sch->dev));
110 return -ENODEV;
113 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
114 if (!private)
115 return -ENOMEM;
116 private->sch = sch;
117 dev_set_drvdata(&sch->dev, private);
119 spin_lock_irq(sch->lock);
120 private->state = VFIO_CCW_STATE_NOT_OPER;
121 sch->isc = VFIO_CCW_ISC;
122 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
123 spin_unlock_irq(sch->lock);
124 if (ret)
125 goto out_free;
127 ret = vfio_ccw_mdev_reg(sch);
128 if (ret)
129 goto out_disable;
131 INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
132 atomic_set(&private->avail, 1);
133 private->state = VFIO_CCW_STATE_STANDBY;
135 return 0;
137 out_disable:
138 cio_disable_subchannel(sch);
139 out_free:
140 dev_set_drvdata(&sch->dev, NULL);
141 kfree(private);
142 return ret;
145 static int vfio_ccw_sch_remove(struct subchannel *sch)
147 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
149 vfio_ccw_sch_quiesce(sch);
151 vfio_ccw_mdev_unreg(sch);
153 dev_set_drvdata(&sch->dev, NULL);
155 kfree(private);
157 return 0;
160 static void vfio_ccw_sch_shutdown(struct subchannel *sch)
162 vfio_ccw_sch_quiesce(sch);
166 * vfio_ccw_sch_event - process subchannel event
167 * @sch: subchannel
168 * @process: non-zero if function is called in process context
170 * An unspecified event occurred for this subchannel. Adjust data according
171 * to the current operational state of the subchannel. Return zero when the
172 * event has been handled sufficiently or -EAGAIN when this function should
173 * be called again in process context.
175 static int vfio_ccw_sch_event(struct subchannel *sch, int process)
177 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
178 unsigned long flags;
180 spin_lock_irqsave(sch->lock, flags);
181 if (!device_is_registered(&sch->dev))
182 goto out_unlock;
184 if (work_pending(&sch->todo_work))
185 goto out_unlock;
187 if (cio_update_schib(sch)) {
188 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
189 goto out_unlock;
192 private = dev_get_drvdata(&sch->dev);
193 if (private->state == VFIO_CCW_STATE_NOT_OPER) {
194 private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
195 VFIO_CCW_STATE_STANDBY;
198 out_unlock:
199 spin_unlock_irqrestore(sch->lock, flags);
201 return 0;
204 static struct css_device_id vfio_ccw_sch_ids[] = {
205 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
206 { /* end of list */ },
208 MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids);
210 static struct css_driver vfio_ccw_sch_driver = {
211 .drv = {
212 .name = "vfio_ccw",
213 .owner = THIS_MODULE,
215 .subchannel_type = vfio_ccw_sch_ids,
216 .irq = vfio_ccw_sch_irq,
217 .probe = vfio_ccw_sch_probe,
218 .remove = vfio_ccw_sch_remove,
219 .shutdown = vfio_ccw_sch_shutdown,
220 .sch_event = vfio_ccw_sch_event,
223 static int __init vfio_ccw_sch_init(void)
225 int ret;
227 vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
228 if (!vfio_ccw_work_q)
229 return -ENOMEM;
231 isc_register(VFIO_CCW_ISC);
232 ret = css_driver_register(&vfio_ccw_sch_driver);
233 if (ret) {
234 isc_unregister(VFIO_CCW_ISC);
235 destroy_workqueue(vfio_ccw_work_q);
238 return ret;
241 static void __exit vfio_ccw_sch_exit(void)
243 css_driver_unregister(&vfio_ccw_sch_driver);
244 isc_unregister(VFIO_CCW_ISC);
245 destroy_workqueue(vfio_ccw_work_q);
247 module_init(vfio_ccw_sch_init);
248 module_exit(vfio_ccw_sch_exit);
250 MODULE_LICENSE("GPL v2");