Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / s390 / cio / vfio_ccw_ops.c
blob68106be4ba7a19b5fb182e820cf92cc132eb328e
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Physical device callbacks for vfio_ccw
5 * Copyright IBM Corp. 2017
6 * Copyright Red Hat, Inc. 2019
8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
9 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
10 * Cornelia Huck <cohuck@redhat.com>
13 #include <linux/vfio.h>
14 #include <linux/mdev.h>
15 #include <linux/nospec.h>
16 #include <linux/slab.h>
18 #include "vfio_ccw_private.h"
20 static int vfio_ccw_mdev_reset(struct mdev_device *mdev)
22 struct vfio_ccw_private *private;
23 struct subchannel *sch;
24 int ret;
26 private = dev_get_drvdata(mdev_parent_dev(mdev));
27 sch = private->sch;
29 * TODO:
30 * In the cureent stage, some things like "no I/O running" and "no
31 * interrupt pending" are clear, but we are not sure what other state
32 * we need to care about.
33 * There are still a lot more instructions need to be handled. We
34 * should come back here later.
36 ret = vfio_ccw_sch_quiesce(sch);
37 if (ret)
38 return ret;
40 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
41 if (!ret)
42 private->state = VFIO_CCW_STATE_IDLE;
44 return ret;
47 static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
48 unsigned long action,
49 void *data)
51 struct vfio_ccw_private *private =
52 container_of(nb, struct vfio_ccw_private, nb);
55 * Vendor drivers MUST unpin pages in response to an
56 * invalidation.
58 if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
59 struct vfio_iommu_type1_dma_unmap *unmap = data;
61 if (!cp_iova_pinned(&private->cp, unmap->iova))
62 return NOTIFY_OK;
64 if (vfio_ccw_mdev_reset(private->mdev))
65 return NOTIFY_BAD;
67 cp_free(&private->cp);
68 return NOTIFY_OK;
71 return NOTIFY_DONE;
74 static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
76 return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
78 static MDEV_TYPE_ATTR_RO(name);
80 static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
81 char *buf)
83 return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
85 static MDEV_TYPE_ATTR_RO(device_api);
87 static ssize_t available_instances_show(struct kobject *kobj,
88 struct device *dev, char *buf)
90 struct vfio_ccw_private *private = dev_get_drvdata(dev);
92 return sprintf(buf, "%d\n", atomic_read(&private->avail));
94 static MDEV_TYPE_ATTR_RO(available_instances);
96 static struct attribute *mdev_types_attrs[] = {
97 &mdev_type_attr_name.attr,
98 &mdev_type_attr_device_api.attr,
99 &mdev_type_attr_available_instances.attr,
100 NULL,
103 static struct attribute_group mdev_type_group = {
104 .name = "io",
105 .attrs = mdev_types_attrs,
108 static struct attribute_group *mdev_type_groups[] = {
109 &mdev_type_group,
110 NULL,
113 static int vfio_ccw_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
115 struct vfio_ccw_private *private =
116 dev_get_drvdata(mdev_parent_dev(mdev));
118 if (private->state == VFIO_CCW_STATE_NOT_OPER)
119 return -ENODEV;
121 if (atomic_dec_if_positive(&private->avail) < 0)
122 return -EPERM;
124 private->mdev = mdev;
125 private->state = VFIO_CCW_STATE_IDLE;
127 VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: create\n",
128 mdev_uuid(mdev), private->sch->schid.cssid,
129 private->sch->schid.ssid,
130 private->sch->schid.sch_no);
132 return 0;
135 static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
137 struct vfio_ccw_private *private =
138 dev_get_drvdata(mdev_parent_dev(mdev));
140 VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: remove\n",
141 mdev_uuid(mdev), private->sch->schid.cssid,
142 private->sch->schid.ssid,
143 private->sch->schid.sch_no);
145 if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
146 (private->state != VFIO_CCW_STATE_STANDBY)) {
147 if (!vfio_ccw_sch_quiesce(private->sch))
148 private->state = VFIO_CCW_STATE_STANDBY;
149 /* The state will be NOT_OPER on error. */
152 cp_free(&private->cp);
153 private->mdev = NULL;
154 atomic_inc(&private->avail);
156 return 0;
159 static int vfio_ccw_mdev_open(struct mdev_device *mdev)
161 struct vfio_ccw_private *private =
162 dev_get_drvdata(mdev_parent_dev(mdev));
163 unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
164 int ret;
166 private->nb.notifier_call = vfio_ccw_mdev_notifier;
168 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
169 &events, &private->nb);
170 if (ret)
171 return ret;
173 ret = vfio_ccw_register_async_dev_regions(private);
174 if (ret)
175 goto out_unregister;
177 ret = vfio_ccw_register_schib_dev_regions(private);
178 if (ret)
179 goto out_unregister;
181 ret = vfio_ccw_register_crw_dev_regions(private);
182 if (ret)
183 goto out_unregister;
185 return ret;
187 out_unregister:
188 vfio_ccw_unregister_dev_regions(private);
189 vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
190 &private->nb);
191 return ret;
194 static void vfio_ccw_mdev_release(struct mdev_device *mdev)
196 struct vfio_ccw_private *private =
197 dev_get_drvdata(mdev_parent_dev(mdev));
199 if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
200 (private->state != VFIO_CCW_STATE_STANDBY)) {
201 if (!vfio_ccw_mdev_reset(mdev))
202 private->state = VFIO_CCW_STATE_STANDBY;
203 /* The state will be NOT_OPER on error. */
206 cp_free(&private->cp);
207 vfio_ccw_unregister_dev_regions(private);
208 vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
209 &private->nb);
212 static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
213 char __user *buf, size_t count,
214 loff_t *ppos)
216 loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
217 struct ccw_io_region *region;
218 int ret;
220 if (pos + count > sizeof(*region))
221 return -EINVAL;
223 mutex_lock(&private->io_mutex);
224 region = private->io_region;
225 if (copy_to_user(buf, (void *)region + pos, count))
226 ret = -EFAULT;
227 else
228 ret = count;
229 mutex_unlock(&private->io_mutex);
230 return ret;
233 static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
234 char __user *buf,
235 size_t count,
236 loff_t *ppos)
238 unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
239 struct vfio_ccw_private *private;
241 private = dev_get_drvdata(mdev_parent_dev(mdev));
243 if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
244 return -EINVAL;
246 switch (index) {
247 case VFIO_CCW_CONFIG_REGION_INDEX:
248 return vfio_ccw_mdev_read_io_region(private, buf, count, ppos);
249 default:
250 index -= VFIO_CCW_NUM_REGIONS;
251 return private->region[index].ops->read(private, buf, count,
252 ppos);
255 return -EINVAL;
258 static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private,
259 const char __user *buf,
260 size_t count, loff_t *ppos)
262 loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
263 struct ccw_io_region *region;
264 int ret;
266 if (pos + count > sizeof(*region))
267 return -EINVAL;
269 if (!mutex_trylock(&private->io_mutex))
270 return -EAGAIN;
272 region = private->io_region;
273 if (copy_from_user((void *)region + pos, buf, count)) {
274 ret = -EFAULT;
275 goto out_unlock;
278 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
279 if (region->ret_code != 0)
280 private->state = VFIO_CCW_STATE_IDLE;
281 ret = (region->ret_code != 0) ? region->ret_code : count;
283 out_unlock:
284 mutex_unlock(&private->io_mutex);
285 return ret;
288 static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
289 const char __user *buf,
290 size_t count,
291 loff_t *ppos)
293 unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
294 struct vfio_ccw_private *private;
296 private = dev_get_drvdata(mdev_parent_dev(mdev));
298 if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
299 return -EINVAL;
301 switch (index) {
302 case VFIO_CCW_CONFIG_REGION_INDEX:
303 return vfio_ccw_mdev_write_io_region(private, buf, count, ppos);
304 default:
305 index -= VFIO_CCW_NUM_REGIONS;
306 return private->region[index].ops->write(private, buf, count,
307 ppos);
310 return -EINVAL;
313 static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info,
314 struct mdev_device *mdev)
316 struct vfio_ccw_private *private;
318 private = dev_get_drvdata(mdev_parent_dev(mdev));
319 info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET;
320 info->num_regions = VFIO_CCW_NUM_REGIONS + private->num_regions;
321 info->num_irqs = VFIO_CCW_NUM_IRQS;
323 return 0;
326 static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
327 struct mdev_device *mdev,
328 unsigned long arg)
330 struct vfio_ccw_private *private;
331 int i;
333 private = dev_get_drvdata(mdev_parent_dev(mdev));
334 switch (info->index) {
335 case VFIO_CCW_CONFIG_REGION_INDEX:
336 info->offset = 0;
337 info->size = sizeof(struct ccw_io_region);
338 info->flags = VFIO_REGION_INFO_FLAG_READ
339 | VFIO_REGION_INFO_FLAG_WRITE;
340 return 0;
341 default: /* all other regions are handled via capability chain */
343 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
344 struct vfio_region_info_cap_type cap_type = {
345 .header.id = VFIO_REGION_INFO_CAP_TYPE,
346 .header.version = 1 };
347 int ret;
349 if (info->index >=
350 VFIO_CCW_NUM_REGIONS + private->num_regions)
351 return -EINVAL;
353 info->index = array_index_nospec(info->index,
354 VFIO_CCW_NUM_REGIONS +
355 private->num_regions);
357 i = info->index - VFIO_CCW_NUM_REGIONS;
359 info->offset = VFIO_CCW_INDEX_TO_OFFSET(info->index);
360 info->size = private->region[i].size;
361 info->flags = private->region[i].flags;
363 cap_type.type = private->region[i].type;
364 cap_type.subtype = private->region[i].subtype;
366 ret = vfio_info_add_capability(&caps, &cap_type.header,
367 sizeof(cap_type));
368 if (ret)
369 return ret;
371 info->flags |= VFIO_REGION_INFO_FLAG_CAPS;
372 if (info->argsz < sizeof(*info) + caps.size) {
373 info->argsz = sizeof(*info) + caps.size;
374 info->cap_offset = 0;
375 } else {
376 vfio_info_cap_shift(&caps, sizeof(*info));
377 if (copy_to_user((void __user *)arg + sizeof(*info),
378 caps.buf, caps.size)) {
379 kfree(caps.buf);
380 return -EFAULT;
382 info->cap_offset = sizeof(*info);
385 kfree(caps.buf);
389 return 0;
392 static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
394 switch (info->index) {
395 case VFIO_CCW_IO_IRQ_INDEX:
396 case VFIO_CCW_CRW_IRQ_INDEX:
397 case VFIO_CCW_REQ_IRQ_INDEX:
398 info->count = 1;
399 info->flags = VFIO_IRQ_INFO_EVENTFD;
400 break;
401 default:
402 return -EINVAL;
405 return 0;
408 static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
409 uint32_t flags,
410 uint32_t index,
411 void __user *data)
413 struct vfio_ccw_private *private;
414 struct eventfd_ctx **ctx;
416 if (!(flags & VFIO_IRQ_SET_ACTION_TRIGGER))
417 return -EINVAL;
419 private = dev_get_drvdata(mdev_parent_dev(mdev));
421 switch (index) {
422 case VFIO_CCW_IO_IRQ_INDEX:
423 ctx = &private->io_trigger;
424 break;
425 case VFIO_CCW_CRW_IRQ_INDEX:
426 ctx = &private->crw_trigger;
427 break;
428 case VFIO_CCW_REQ_IRQ_INDEX:
429 ctx = &private->req_trigger;
430 break;
431 default:
432 return -EINVAL;
435 switch (flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
436 case VFIO_IRQ_SET_DATA_NONE:
438 if (*ctx)
439 eventfd_signal(*ctx, 1);
440 return 0;
442 case VFIO_IRQ_SET_DATA_BOOL:
444 uint8_t trigger;
446 if (get_user(trigger, (uint8_t __user *)data))
447 return -EFAULT;
449 if (trigger && *ctx)
450 eventfd_signal(*ctx, 1);
451 return 0;
453 case VFIO_IRQ_SET_DATA_EVENTFD:
455 int32_t fd;
457 if (get_user(fd, (int32_t __user *)data))
458 return -EFAULT;
460 if (fd == -1) {
461 if (*ctx)
462 eventfd_ctx_put(*ctx);
463 *ctx = NULL;
464 } else if (fd >= 0) {
465 struct eventfd_ctx *efdctx;
467 efdctx = eventfd_ctx_fdget(fd);
468 if (IS_ERR(efdctx))
469 return PTR_ERR(efdctx);
471 if (*ctx)
472 eventfd_ctx_put(*ctx);
474 *ctx = efdctx;
475 } else
476 return -EINVAL;
478 return 0;
480 default:
481 return -EINVAL;
485 int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
486 unsigned int subtype,
487 const struct vfio_ccw_regops *ops,
488 size_t size, u32 flags, void *data)
490 struct vfio_ccw_region *region;
492 region = krealloc(private->region,
493 (private->num_regions + 1) * sizeof(*region),
494 GFP_KERNEL);
495 if (!region)
496 return -ENOMEM;
498 private->region = region;
499 private->region[private->num_regions].type = VFIO_REGION_TYPE_CCW;
500 private->region[private->num_regions].subtype = subtype;
501 private->region[private->num_regions].ops = ops;
502 private->region[private->num_regions].size = size;
503 private->region[private->num_regions].flags = flags;
504 private->region[private->num_regions].data = data;
506 private->num_regions++;
508 return 0;
511 void vfio_ccw_unregister_dev_regions(struct vfio_ccw_private *private)
513 int i;
515 for (i = 0; i < private->num_regions; i++)
516 private->region[i].ops->release(private, &private->region[i]);
517 private->num_regions = 0;
518 kfree(private->region);
519 private->region = NULL;
522 static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
523 unsigned int cmd,
524 unsigned long arg)
526 int ret = 0;
527 unsigned long minsz;
529 switch (cmd) {
530 case VFIO_DEVICE_GET_INFO:
532 struct vfio_device_info info;
534 minsz = offsetofend(struct vfio_device_info, num_irqs);
536 if (copy_from_user(&info, (void __user *)arg, minsz))
537 return -EFAULT;
539 if (info.argsz < minsz)
540 return -EINVAL;
542 ret = vfio_ccw_mdev_get_device_info(&info, mdev);
543 if (ret)
544 return ret;
546 return copy_to_user((void __user *)arg, &info, minsz);
548 case VFIO_DEVICE_GET_REGION_INFO:
550 struct vfio_region_info info;
552 minsz = offsetofend(struct vfio_region_info, offset);
554 if (copy_from_user(&info, (void __user *)arg, minsz))
555 return -EFAULT;
557 if (info.argsz < minsz)
558 return -EINVAL;
560 ret = vfio_ccw_mdev_get_region_info(&info, mdev, arg);
561 if (ret)
562 return ret;
564 return copy_to_user((void __user *)arg, &info, minsz);
566 case VFIO_DEVICE_GET_IRQ_INFO:
568 struct vfio_irq_info info;
570 minsz = offsetofend(struct vfio_irq_info, count);
572 if (copy_from_user(&info, (void __user *)arg, minsz))
573 return -EFAULT;
575 if (info.argsz < minsz || info.index >= VFIO_CCW_NUM_IRQS)
576 return -EINVAL;
578 ret = vfio_ccw_mdev_get_irq_info(&info);
579 if (ret)
580 return ret;
582 if (info.count == -1)
583 return -EINVAL;
585 return copy_to_user((void __user *)arg, &info, minsz);
587 case VFIO_DEVICE_SET_IRQS:
589 struct vfio_irq_set hdr;
590 size_t data_size;
591 void __user *data;
593 minsz = offsetofend(struct vfio_irq_set, count);
595 if (copy_from_user(&hdr, (void __user *)arg, minsz))
596 return -EFAULT;
598 ret = vfio_set_irqs_validate_and_prepare(&hdr, 1,
599 VFIO_CCW_NUM_IRQS,
600 &data_size);
601 if (ret)
602 return ret;
604 data = (void __user *)(arg + minsz);
605 return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, hdr.index, data);
607 case VFIO_DEVICE_RESET:
608 return vfio_ccw_mdev_reset(mdev);
609 default:
610 return -ENOTTY;
614 /* Request removal of the device*/
615 static void vfio_ccw_mdev_request(struct mdev_device *mdev, unsigned int count)
617 struct vfio_ccw_private *private = dev_get_drvdata(mdev_parent_dev(mdev));
619 if (!private)
620 return;
622 if (private->req_trigger) {
623 if (!(count % 10))
624 dev_notice_ratelimited(mdev_dev(private->mdev),
625 "Relaying device request to user (#%u)\n",
626 count);
628 eventfd_signal(private->req_trigger, 1);
629 } else if (count == 0) {
630 dev_notice(mdev_dev(private->mdev),
631 "No device request channel registered, blocked until released by user\n");
635 static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
636 .owner = THIS_MODULE,
637 .supported_type_groups = mdev_type_groups,
638 .create = vfio_ccw_mdev_create,
639 .remove = vfio_ccw_mdev_remove,
640 .open = vfio_ccw_mdev_open,
641 .release = vfio_ccw_mdev_release,
642 .read = vfio_ccw_mdev_read,
643 .write = vfio_ccw_mdev_write,
644 .ioctl = vfio_ccw_mdev_ioctl,
645 .request = vfio_ccw_mdev_request,
648 int vfio_ccw_mdev_reg(struct subchannel *sch)
650 return mdev_register_device(&sch->dev, &vfio_ccw_mdev_ops);
653 void vfio_ccw_mdev_unreg(struct subchannel *sch)
655 mdev_unregister_device(&sch->dev);