staging: rtl8192u: remove redundant assignment to pointer crypt
[linux/fpc-iii.git] / drivers / s390 / cio / vfio_ccw_ops.c
blob5eb61116ca6fb2252d99ce2cc1c7ce006ea6aeb8
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Physical device callbacks for vfio_ccw
5 * Copyright IBM Corp. 2017
6 * Copyright Red Hat, Inc. 2019
8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
9 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
10 * Cornelia Huck <cohuck@redhat.com>
13 #include <linux/vfio.h>
14 #include <linux/mdev.h>
15 #include <linux/nospec.h>
16 #include <linux/slab.h>
18 #include "vfio_ccw_private.h"
20 static int vfio_ccw_mdev_reset(struct mdev_device *mdev)
22 struct vfio_ccw_private *private;
23 struct subchannel *sch;
24 int ret;
26 private = dev_get_drvdata(mdev_parent_dev(mdev));
27 sch = private->sch;
29 * TODO:
30 * In the cureent stage, some things like "no I/O running" and "no
31 * interrupt pending" are clear, but we are not sure what other state
32 * we need to care about.
33 * There are still a lot more instructions need to be handled. We
34 * should come back here later.
36 ret = vfio_ccw_sch_quiesce(sch);
37 if (ret)
38 return ret;
40 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
41 if (!ret)
42 private->state = VFIO_CCW_STATE_IDLE;
44 return ret;
47 static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
48 unsigned long action,
49 void *data)
51 struct vfio_ccw_private *private =
52 container_of(nb, struct vfio_ccw_private, nb);
55 * Vendor drivers MUST unpin pages in response to an
56 * invalidation.
58 if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
59 struct vfio_iommu_type1_dma_unmap *unmap = data;
61 if (!cp_iova_pinned(&private->cp, unmap->iova))
62 return NOTIFY_OK;
64 if (vfio_ccw_mdev_reset(private->mdev))
65 return NOTIFY_BAD;
67 cp_free(&private->cp);
68 return NOTIFY_OK;
71 return NOTIFY_DONE;
74 static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
76 return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
78 static MDEV_TYPE_ATTR_RO(name);
80 static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
81 char *buf)
83 return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
85 static MDEV_TYPE_ATTR_RO(device_api);
87 static ssize_t available_instances_show(struct kobject *kobj,
88 struct device *dev, char *buf)
90 struct vfio_ccw_private *private = dev_get_drvdata(dev);
92 return sprintf(buf, "%d\n", atomic_read(&private->avail));
94 static MDEV_TYPE_ATTR_RO(available_instances);
96 static struct attribute *mdev_types_attrs[] = {
97 &mdev_type_attr_name.attr,
98 &mdev_type_attr_device_api.attr,
99 &mdev_type_attr_available_instances.attr,
100 NULL,
103 static struct attribute_group mdev_type_group = {
104 .name = "io",
105 .attrs = mdev_types_attrs,
108 static struct attribute_group *mdev_type_groups[] = {
109 &mdev_type_group,
110 NULL,
113 static int vfio_ccw_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
115 struct vfio_ccw_private *private =
116 dev_get_drvdata(mdev_parent_dev(mdev));
118 if (private->state == VFIO_CCW_STATE_NOT_OPER)
119 return -ENODEV;
121 if (atomic_dec_if_positive(&private->avail) < 0)
122 return -EPERM;
124 private->mdev = mdev;
125 private->state = VFIO_CCW_STATE_IDLE;
127 return 0;
130 static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
132 struct vfio_ccw_private *private =
133 dev_get_drvdata(mdev_parent_dev(mdev));
135 if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
136 (private->state != VFIO_CCW_STATE_STANDBY)) {
137 if (!vfio_ccw_sch_quiesce(private->sch))
138 private->state = VFIO_CCW_STATE_STANDBY;
139 /* The state will be NOT_OPER on error. */
142 cp_free(&private->cp);
143 private->mdev = NULL;
144 atomic_inc(&private->avail);
146 return 0;
149 static int vfio_ccw_mdev_open(struct mdev_device *mdev)
151 struct vfio_ccw_private *private =
152 dev_get_drvdata(mdev_parent_dev(mdev));
153 unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
154 int ret;
156 private->nb.notifier_call = vfio_ccw_mdev_notifier;
158 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
159 &events, &private->nb);
160 if (ret)
161 return ret;
163 ret = vfio_ccw_register_async_dev_regions(private);
164 if (ret)
165 vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
166 &private->nb);
167 return ret;
170 static void vfio_ccw_mdev_release(struct mdev_device *mdev)
172 struct vfio_ccw_private *private =
173 dev_get_drvdata(mdev_parent_dev(mdev));
174 int i;
176 if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
177 (private->state != VFIO_CCW_STATE_STANDBY)) {
178 if (!vfio_ccw_mdev_reset(mdev))
179 private->state = VFIO_CCW_STATE_STANDBY;
180 /* The state will be NOT_OPER on error. */
183 cp_free(&private->cp);
184 vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
185 &private->nb);
187 for (i = 0; i < private->num_regions; i++)
188 private->region[i].ops->release(private, &private->region[i]);
190 private->num_regions = 0;
191 kfree(private->region);
192 private->region = NULL;
195 static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
196 char __user *buf, size_t count,
197 loff_t *ppos)
199 loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
200 struct ccw_io_region *region;
201 int ret;
203 if (pos + count > sizeof(*region))
204 return -EINVAL;
206 mutex_lock(&private->io_mutex);
207 region = private->io_region;
208 if (copy_to_user(buf, (void *)region + pos, count))
209 ret = -EFAULT;
210 else
211 ret = count;
212 mutex_unlock(&private->io_mutex);
213 return ret;
216 static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
217 char __user *buf,
218 size_t count,
219 loff_t *ppos)
221 unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
222 struct vfio_ccw_private *private;
224 private = dev_get_drvdata(mdev_parent_dev(mdev));
226 if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
227 return -EINVAL;
229 switch (index) {
230 case VFIO_CCW_CONFIG_REGION_INDEX:
231 return vfio_ccw_mdev_read_io_region(private, buf, count, ppos);
232 default:
233 index -= VFIO_CCW_NUM_REGIONS;
234 return private->region[index].ops->read(private, buf, count,
235 ppos);
238 return -EINVAL;
241 static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private,
242 const char __user *buf,
243 size_t count, loff_t *ppos)
245 loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
246 struct ccw_io_region *region;
247 int ret;
249 if (pos + count > sizeof(*region))
250 return -EINVAL;
252 if (!mutex_trylock(&private->io_mutex))
253 return -EAGAIN;
255 region = private->io_region;
256 if (copy_from_user((void *)region + pos, buf, count)) {
257 ret = -EFAULT;
258 goto out_unlock;
261 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
262 if (region->ret_code != 0)
263 private->state = VFIO_CCW_STATE_IDLE;
264 ret = (region->ret_code != 0) ? region->ret_code : count;
266 out_unlock:
267 mutex_unlock(&private->io_mutex);
268 return ret;
271 static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
272 const char __user *buf,
273 size_t count,
274 loff_t *ppos)
276 unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
277 struct vfio_ccw_private *private;
279 private = dev_get_drvdata(mdev_parent_dev(mdev));
281 if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
282 return -EINVAL;
284 switch (index) {
285 case VFIO_CCW_CONFIG_REGION_INDEX:
286 return vfio_ccw_mdev_write_io_region(private, buf, count, ppos);
287 default:
288 index -= VFIO_CCW_NUM_REGIONS;
289 return private->region[index].ops->write(private, buf, count,
290 ppos);
293 return -EINVAL;
296 static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info,
297 struct mdev_device *mdev)
299 struct vfio_ccw_private *private;
301 private = dev_get_drvdata(mdev_parent_dev(mdev));
302 info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET;
303 info->num_regions = VFIO_CCW_NUM_REGIONS + private->num_regions;
304 info->num_irqs = VFIO_CCW_NUM_IRQS;
306 return 0;
309 static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
310 struct mdev_device *mdev,
311 unsigned long arg)
313 struct vfio_ccw_private *private;
314 int i;
316 private = dev_get_drvdata(mdev_parent_dev(mdev));
317 switch (info->index) {
318 case VFIO_CCW_CONFIG_REGION_INDEX:
319 info->offset = 0;
320 info->size = sizeof(struct ccw_io_region);
321 info->flags = VFIO_REGION_INFO_FLAG_READ
322 | VFIO_REGION_INFO_FLAG_WRITE;
323 return 0;
324 default: /* all other regions are handled via capability chain */
326 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
327 struct vfio_region_info_cap_type cap_type = {
328 .header.id = VFIO_REGION_INFO_CAP_TYPE,
329 .header.version = 1 };
330 int ret;
332 if (info->index >=
333 VFIO_CCW_NUM_REGIONS + private->num_regions)
334 return -EINVAL;
336 info->index = array_index_nospec(info->index,
337 VFIO_CCW_NUM_REGIONS +
338 private->num_regions);
340 i = info->index - VFIO_CCW_NUM_REGIONS;
342 info->offset = VFIO_CCW_INDEX_TO_OFFSET(info->index);
343 info->size = private->region[i].size;
344 info->flags = private->region[i].flags;
346 cap_type.type = private->region[i].type;
347 cap_type.subtype = private->region[i].subtype;
349 ret = vfio_info_add_capability(&caps, &cap_type.header,
350 sizeof(cap_type));
351 if (ret)
352 return ret;
354 info->flags |= VFIO_REGION_INFO_FLAG_CAPS;
355 if (info->argsz < sizeof(*info) + caps.size) {
356 info->argsz = sizeof(*info) + caps.size;
357 info->cap_offset = 0;
358 } else {
359 vfio_info_cap_shift(&caps, sizeof(*info));
360 if (copy_to_user((void __user *)arg + sizeof(*info),
361 caps.buf, caps.size)) {
362 kfree(caps.buf);
363 return -EFAULT;
365 info->cap_offset = sizeof(*info);
368 kfree(caps.buf);
372 return 0;
375 static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
377 if (info->index != VFIO_CCW_IO_IRQ_INDEX)
378 return -EINVAL;
380 info->count = 1;
381 info->flags = VFIO_IRQ_INFO_EVENTFD;
383 return 0;
386 static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
387 uint32_t flags,
388 void __user *data)
390 struct vfio_ccw_private *private;
391 struct eventfd_ctx **ctx;
393 if (!(flags & VFIO_IRQ_SET_ACTION_TRIGGER))
394 return -EINVAL;
396 private = dev_get_drvdata(mdev_parent_dev(mdev));
397 ctx = &private->io_trigger;
399 switch (flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
400 case VFIO_IRQ_SET_DATA_NONE:
402 if (*ctx)
403 eventfd_signal(*ctx, 1);
404 return 0;
406 case VFIO_IRQ_SET_DATA_BOOL:
408 uint8_t trigger;
410 if (get_user(trigger, (uint8_t __user *)data))
411 return -EFAULT;
413 if (trigger && *ctx)
414 eventfd_signal(*ctx, 1);
415 return 0;
417 case VFIO_IRQ_SET_DATA_EVENTFD:
419 int32_t fd;
421 if (get_user(fd, (int32_t __user *)data))
422 return -EFAULT;
424 if (fd == -1) {
425 if (*ctx)
426 eventfd_ctx_put(*ctx);
427 *ctx = NULL;
428 } else if (fd >= 0) {
429 struct eventfd_ctx *efdctx;
431 efdctx = eventfd_ctx_fdget(fd);
432 if (IS_ERR(efdctx))
433 return PTR_ERR(efdctx);
435 if (*ctx)
436 eventfd_ctx_put(*ctx);
438 *ctx = efdctx;
439 } else
440 return -EINVAL;
442 return 0;
444 default:
445 return -EINVAL;
449 int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
450 unsigned int subtype,
451 const struct vfio_ccw_regops *ops,
452 size_t size, u32 flags, void *data)
454 struct vfio_ccw_region *region;
456 region = krealloc(private->region,
457 (private->num_regions + 1) * sizeof(*region),
458 GFP_KERNEL);
459 if (!region)
460 return -ENOMEM;
462 private->region = region;
463 private->region[private->num_regions].type = VFIO_REGION_TYPE_CCW;
464 private->region[private->num_regions].subtype = subtype;
465 private->region[private->num_regions].ops = ops;
466 private->region[private->num_regions].size = size;
467 private->region[private->num_regions].flags = flags;
468 private->region[private->num_regions].data = data;
470 private->num_regions++;
472 return 0;
475 static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
476 unsigned int cmd,
477 unsigned long arg)
479 int ret = 0;
480 unsigned long minsz;
482 switch (cmd) {
483 case VFIO_DEVICE_GET_INFO:
485 struct vfio_device_info info;
487 minsz = offsetofend(struct vfio_device_info, num_irqs);
489 if (copy_from_user(&info, (void __user *)arg, minsz))
490 return -EFAULT;
492 if (info.argsz < minsz)
493 return -EINVAL;
495 ret = vfio_ccw_mdev_get_device_info(&info, mdev);
496 if (ret)
497 return ret;
499 return copy_to_user((void __user *)arg, &info, minsz);
501 case VFIO_DEVICE_GET_REGION_INFO:
503 struct vfio_region_info info;
505 minsz = offsetofend(struct vfio_region_info, offset);
507 if (copy_from_user(&info, (void __user *)arg, minsz))
508 return -EFAULT;
510 if (info.argsz < minsz)
511 return -EINVAL;
513 ret = vfio_ccw_mdev_get_region_info(&info, mdev, arg);
514 if (ret)
515 return ret;
517 return copy_to_user((void __user *)arg, &info, minsz);
519 case VFIO_DEVICE_GET_IRQ_INFO:
521 struct vfio_irq_info info;
523 minsz = offsetofend(struct vfio_irq_info, count);
525 if (copy_from_user(&info, (void __user *)arg, minsz))
526 return -EFAULT;
528 if (info.argsz < minsz || info.index >= VFIO_CCW_NUM_IRQS)
529 return -EINVAL;
531 ret = vfio_ccw_mdev_get_irq_info(&info);
532 if (ret)
533 return ret;
535 if (info.count == -1)
536 return -EINVAL;
538 return copy_to_user((void __user *)arg, &info, minsz);
540 case VFIO_DEVICE_SET_IRQS:
542 struct vfio_irq_set hdr;
543 size_t data_size;
544 void __user *data;
546 minsz = offsetofend(struct vfio_irq_set, count);
548 if (copy_from_user(&hdr, (void __user *)arg, minsz))
549 return -EFAULT;
551 ret = vfio_set_irqs_validate_and_prepare(&hdr, 1,
552 VFIO_CCW_NUM_IRQS,
553 &data_size);
554 if (ret)
555 return ret;
557 data = (void __user *)(arg + minsz);
558 return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, data);
560 case VFIO_DEVICE_RESET:
561 return vfio_ccw_mdev_reset(mdev);
562 default:
563 return -ENOTTY;
567 static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
568 .owner = THIS_MODULE,
569 .supported_type_groups = mdev_type_groups,
570 .create = vfio_ccw_mdev_create,
571 .remove = vfio_ccw_mdev_remove,
572 .open = vfio_ccw_mdev_open,
573 .release = vfio_ccw_mdev_release,
574 .read = vfio_ccw_mdev_read,
575 .write = vfio_ccw_mdev_write,
576 .ioctl = vfio_ccw_mdev_ioctl,
579 int vfio_ccw_mdev_reg(struct subchannel *sch)
581 return mdev_register_device(&sch->dev, &vfio_ccw_mdev_ops);
584 void vfio_ccw_mdev_unreg(struct subchannel *sch)
586 mdev_unregister_device(&sch->dev);