svcrdma: Ignore source port when computing DRC hash
[linux/fpc-iii.git] / drivers / s390 / cio / vfio_ccw_ops.c
blobdc5ff47de3feec42ca9aabd2f0cf97c238f612fe
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Physical device callbacks for vfio_ccw
5 * Copyright IBM Corp. 2017
7 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
8 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
9 */
11 #include <linux/vfio.h>
12 #include <linux/mdev.h>
14 #include "vfio_ccw_private.h"
16 static int vfio_ccw_mdev_reset(struct mdev_device *mdev)
18 struct vfio_ccw_private *private;
19 struct subchannel *sch;
20 int ret;
22 private = dev_get_drvdata(mdev_parent_dev(mdev));
23 sch = private->sch;
25 * TODO:
26 * In the cureent stage, some things like "no I/O running" and "no
27 * interrupt pending" are clear, but we are not sure what other state
28 * we need to care about.
29 * There are still a lot more instructions need to be handled. We
30 * should come back here later.
32 ret = vfio_ccw_sch_quiesce(sch);
33 if (ret)
34 return ret;
36 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
37 if (!ret)
38 private->state = VFIO_CCW_STATE_IDLE;
40 return ret;
43 static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
44 unsigned long action,
45 void *data)
47 struct vfio_ccw_private *private =
48 container_of(nb, struct vfio_ccw_private, nb);
51 * Vendor drivers MUST unpin pages in response to an
52 * invalidation.
54 if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
55 struct vfio_iommu_type1_dma_unmap *unmap = data;
57 if (!cp_iova_pinned(&private->cp, unmap->iova))
58 return NOTIFY_OK;
60 if (vfio_ccw_mdev_reset(private->mdev))
61 return NOTIFY_BAD;
63 cp_free(&private->cp);
64 return NOTIFY_OK;
67 return NOTIFY_DONE;
70 static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
72 return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
74 static MDEV_TYPE_ATTR_RO(name);
76 static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
77 char *buf)
79 return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
81 static MDEV_TYPE_ATTR_RO(device_api);
83 static ssize_t available_instances_show(struct kobject *kobj,
84 struct device *dev, char *buf)
86 struct vfio_ccw_private *private = dev_get_drvdata(dev);
88 return sprintf(buf, "%d\n", atomic_read(&private->avail));
90 static MDEV_TYPE_ATTR_RO(available_instances);
92 static struct attribute *mdev_types_attrs[] = {
93 &mdev_type_attr_name.attr,
94 &mdev_type_attr_device_api.attr,
95 &mdev_type_attr_available_instances.attr,
96 NULL,
99 static struct attribute_group mdev_type_group = {
100 .name = "io",
101 .attrs = mdev_types_attrs,
104 static struct attribute_group *mdev_type_groups[] = {
105 &mdev_type_group,
106 NULL,
109 static int vfio_ccw_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
111 struct vfio_ccw_private *private =
112 dev_get_drvdata(mdev_parent_dev(mdev));
114 if (private->state == VFIO_CCW_STATE_NOT_OPER)
115 return -ENODEV;
117 if (atomic_dec_if_positive(&private->avail) < 0)
118 return -EPERM;
120 private->mdev = mdev;
121 private->state = VFIO_CCW_STATE_IDLE;
123 return 0;
126 static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
128 struct vfio_ccw_private *private =
129 dev_get_drvdata(mdev_parent_dev(mdev));
131 if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
132 (private->state != VFIO_CCW_STATE_STANDBY)) {
133 if (!vfio_ccw_sch_quiesce(private->sch))
134 private->state = VFIO_CCW_STATE_STANDBY;
135 /* The state will be NOT_OPER on error. */
138 cp_free(&private->cp);
139 private->mdev = NULL;
140 atomic_inc(&private->avail);
142 return 0;
145 static int vfio_ccw_mdev_open(struct mdev_device *mdev)
147 struct vfio_ccw_private *private =
148 dev_get_drvdata(mdev_parent_dev(mdev));
149 unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
151 private->nb.notifier_call = vfio_ccw_mdev_notifier;
153 return vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
154 &events, &private->nb);
157 static void vfio_ccw_mdev_release(struct mdev_device *mdev)
159 struct vfio_ccw_private *private =
160 dev_get_drvdata(mdev_parent_dev(mdev));
162 if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
163 (private->state != VFIO_CCW_STATE_STANDBY)) {
164 if (!vfio_ccw_mdev_reset(mdev))
165 private->state = VFIO_CCW_STATE_STANDBY;
166 /* The state will be NOT_OPER on error. */
169 cp_free(&private->cp);
170 vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
171 &private->nb);
174 static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
175 char __user *buf,
176 size_t count,
177 loff_t *ppos)
179 struct vfio_ccw_private *private;
180 struct ccw_io_region *region;
182 if (*ppos + count > sizeof(*region))
183 return -EINVAL;
185 private = dev_get_drvdata(mdev_parent_dev(mdev));
186 region = private->io_region;
187 if (copy_to_user(buf, (void *)region + *ppos, count))
188 return -EFAULT;
190 return count;
193 static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
194 const char __user *buf,
195 size_t count,
196 loff_t *ppos)
198 struct vfio_ccw_private *private;
199 struct ccw_io_region *region;
201 if (*ppos + count > sizeof(*region))
202 return -EINVAL;
204 private = dev_get_drvdata(mdev_parent_dev(mdev));
205 if (private->state != VFIO_CCW_STATE_IDLE)
206 return -EACCES;
208 region = private->io_region;
209 if (copy_from_user((void *)region + *ppos, buf, count))
210 return -EFAULT;
212 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
213 if (region->ret_code != 0) {
214 private->state = VFIO_CCW_STATE_IDLE;
215 return region->ret_code;
218 return count;
221 static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info)
223 info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET;
224 info->num_regions = VFIO_CCW_NUM_REGIONS;
225 info->num_irqs = VFIO_CCW_NUM_IRQS;
227 return 0;
230 static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
231 u16 *cap_type_id,
232 void **cap_type)
234 switch (info->index) {
235 case VFIO_CCW_CONFIG_REGION_INDEX:
236 info->offset = 0;
237 info->size = sizeof(struct ccw_io_region);
238 info->flags = VFIO_REGION_INFO_FLAG_READ
239 | VFIO_REGION_INFO_FLAG_WRITE;
240 return 0;
241 default:
242 return -EINVAL;
246 static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
248 if (info->index != VFIO_CCW_IO_IRQ_INDEX)
249 return -EINVAL;
251 info->count = 1;
252 info->flags = VFIO_IRQ_INFO_EVENTFD;
254 return 0;
257 static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
258 uint32_t flags,
259 void __user *data)
261 struct vfio_ccw_private *private;
262 struct eventfd_ctx **ctx;
264 if (!(flags & VFIO_IRQ_SET_ACTION_TRIGGER))
265 return -EINVAL;
267 private = dev_get_drvdata(mdev_parent_dev(mdev));
268 ctx = &private->io_trigger;
270 switch (flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
271 case VFIO_IRQ_SET_DATA_NONE:
273 if (*ctx)
274 eventfd_signal(*ctx, 1);
275 return 0;
277 case VFIO_IRQ_SET_DATA_BOOL:
279 uint8_t trigger;
281 if (get_user(trigger, (uint8_t __user *)data))
282 return -EFAULT;
284 if (trigger && *ctx)
285 eventfd_signal(*ctx, 1);
286 return 0;
288 case VFIO_IRQ_SET_DATA_EVENTFD:
290 int32_t fd;
292 if (get_user(fd, (int32_t __user *)data))
293 return -EFAULT;
295 if (fd == -1) {
296 if (*ctx)
297 eventfd_ctx_put(*ctx);
298 *ctx = NULL;
299 } else if (fd >= 0) {
300 struct eventfd_ctx *efdctx;
302 efdctx = eventfd_ctx_fdget(fd);
303 if (IS_ERR(efdctx))
304 return PTR_ERR(efdctx);
306 if (*ctx)
307 eventfd_ctx_put(*ctx);
309 *ctx = efdctx;
310 } else
311 return -EINVAL;
313 return 0;
315 default:
316 return -EINVAL;
320 static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
321 unsigned int cmd,
322 unsigned long arg)
324 int ret = 0;
325 unsigned long minsz;
327 switch (cmd) {
328 case VFIO_DEVICE_GET_INFO:
330 struct vfio_device_info info;
332 minsz = offsetofend(struct vfio_device_info, num_irqs);
334 if (copy_from_user(&info, (void __user *)arg, minsz))
335 return -EFAULT;
337 if (info.argsz < minsz)
338 return -EINVAL;
340 ret = vfio_ccw_mdev_get_device_info(&info);
341 if (ret)
342 return ret;
344 return copy_to_user((void __user *)arg, &info, minsz);
346 case VFIO_DEVICE_GET_REGION_INFO:
348 struct vfio_region_info info;
349 u16 cap_type_id = 0;
350 void *cap_type = NULL;
352 minsz = offsetofend(struct vfio_region_info, offset);
354 if (copy_from_user(&info, (void __user *)arg, minsz))
355 return -EFAULT;
357 if (info.argsz < minsz)
358 return -EINVAL;
360 ret = vfio_ccw_mdev_get_region_info(&info, &cap_type_id,
361 &cap_type);
362 if (ret)
363 return ret;
365 return copy_to_user((void __user *)arg, &info, minsz);
367 case VFIO_DEVICE_GET_IRQ_INFO:
369 struct vfio_irq_info info;
371 minsz = offsetofend(struct vfio_irq_info, count);
373 if (copy_from_user(&info, (void __user *)arg, minsz))
374 return -EFAULT;
376 if (info.argsz < minsz || info.index >= VFIO_CCW_NUM_IRQS)
377 return -EINVAL;
379 ret = vfio_ccw_mdev_get_irq_info(&info);
380 if (ret)
381 return ret;
383 if (info.count == -1)
384 return -EINVAL;
386 return copy_to_user((void __user *)arg, &info, minsz);
388 case VFIO_DEVICE_SET_IRQS:
390 struct vfio_irq_set hdr;
391 size_t data_size;
392 void __user *data;
394 minsz = offsetofend(struct vfio_irq_set, count);
396 if (copy_from_user(&hdr, (void __user *)arg, minsz))
397 return -EFAULT;
399 ret = vfio_set_irqs_validate_and_prepare(&hdr, 1,
400 VFIO_CCW_NUM_IRQS,
401 &data_size);
402 if (ret)
403 return ret;
405 data = (void __user *)(arg + minsz);
406 return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, data);
408 case VFIO_DEVICE_RESET:
409 return vfio_ccw_mdev_reset(mdev);
410 default:
411 return -ENOTTY;
415 static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
416 .owner = THIS_MODULE,
417 .supported_type_groups = mdev_type_groups,
418 .create = vfio_ccw_mdev_create,
419 .remove = vfio_ccw_mdev_remove,
420 .open = vfio_ccw_mdev_open,
421 .release = vfio_ccw_mdev_release,
422 .read = vfio_ccw_mdev_read,
423 .write = vfio_ccw_mdev_write,
424 .ioctl = vfio_ccw_mdev_ioctl,
427 int vfio_ccw_mdev_reg(struct subchannel *sch)
429 return mdev_register_device(&sch->dev, &vfio_ccw_mdev_ops);
432 void vfio_ccw_mdev_unreg(struct subchannel *sch)
434 mdev_unregister_device(&sch->dev);