1 // SPDX-License-Identifier: GPL-2.0
3 * Physical device callbacks for vfio_ccw
5 * Copyright IBM Corp. 2017
6 * Copyright Red Hat, Inc. 2019
8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
9 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
10 * Cornelia Huck <cohuck@redhat.com>
13 #include <linux/vfio.h>
14 #include <linux/mdev.h>
15 #include <linux/nospec.h>
16 #include <linux/slab.h>
18 #include "vfio_ccw_private.h"
20 static int vfio_ccw_mdev_reset(struct mdev_device
*mdev
)
22 struct vfio_ccw_private
*private;
23 struct subchannel
*sch
;
26 private = dev_get_drvdata(mdev_parent_dev(mdev
));
30 * In the cureent stage, some things like "no I/O running" and "no
31 * interrupt pending" are clear, but we are not sure what other state
32 * we need to care about.
33 * There are still a lot more instructions need to be handled. We
34 * should come back here later.
36 ret
= vfio_ccw_sch_quiesce(sch
);
40 ret
= cio_enable_subchannel(sch
, (u32
)(unsigned long)sch
);
42 private->state
= VFIO_CCW_STATE_IDLE
;
47 static int vfio_ccw_mdev_notifier(struct notifier_block
*nb
,
51 struct vfio_ccw_private
*private =
52 container_of(nb
, struct vfio_ccw_private
, nb
);
55 * Vendor drivers MUST unpin pages in response to an
58 if (action
== VFIO_IOMMU_NOTIFY_DMA_UNMAP
) {
59 struct vfio_iommu_type1_dma_unmap
*unmap
= data
;
61 if (!cp_iova_pinned(&private->cp
, unmap
->iova
))
64 if (vfio_ccw_mdev_reset(private->mdev
))
67 cp_free(&private->cp
);
74 static ssize_t
name_show(struct kobject
*kobj
, struct device
*dev
, char *buf
)
76 return sprintf(buf
, "I/O subchannel (Non-QDIO)\n");
78 static MDEV_TYPE_ATTR_RO(name
);
80 static ssize_t
device_api_show(struct kobject
*kobj
, struct device
*dev
,
83 return sprintf(buf
, "%s\n", VFIO_DEVICE_API_CCW_STRING
);
85 static MDEV_TYPE_ATTR_RO(device_api
);
87 static ssize_t
available_instances_show(struct kobject
*kobj
,
88 struct device
*dev
, char *buf
)
90 struct vfio_ccw_private
*private = dev_get_drvdata(dev
);
92 return sprintf(buf
, "%d\n", atomic_read(&private->avail
));
94 static MDEV_TYPE_ATTR_RO(available_instances
);
96 static struct attribute
*mdev_types_attrs
[] = {
97 &mdev_type_attr_name
.attr
,
98 &mdev_type_attr_device_api
.attr
,
99 &mdev_type_attr_available_instances
.attr
,
103 static struct attribute_group mdev_type_group
= {
105 .attrs
= mdev_types_attrs
,
108 static struct attribute_group
*mdev_type_groups
[] = {
113 static int vfio_ccw_mdev_create(struct kobject
*kobj
, struct mdev_device
*mdev
)
115 struct vfio_ccw_private
*private =
116 dev_get_drvdata(mdev_parent_dev(mdev
));
118 if (private->state
== VFIO_CCW_STATE_NOT_OPER
)
121 if (atomic_dec_if_positive(&private->avail
) < 0)
124 private->mdev
= mdev
;
125 private->state
= VFIO_CCW_STATE_IDLE
;
130 static int vfio_ccw_mdev_remove(struct mdev_device
*mdev
)
132 struct vfio_ccw_private
*private =
133 dev_get_drvdata(mdev_parent_dev(mdev
));
135 if ((private->state
!= VFIO_CCW_STATE_NOT_OPER
) &&
136 (private->state
!= VFIO_CCW_STATE_STANDBY
)) {
137 if (!vfio_ccw_sch_quiesce(private->sch
))
138 private->state
= VFIO_CCW_STATE_STANDBY
;
139 /* The state will be NOT_OPER on error. */
142 cp_free(&private->cp
);
143 private->mdev
= NULL
;
144 atomic_inc(&private->avail
);
149 static int vfio_ccw_mdev_open(struct mdev_device
*mdev
)
151 struct vfio_ccw_private
*private =
152 dev_get_drvdata(mdev_parent_dev(mdev
));
153 unsigned long events
= VFIO_IOMMU_NOTIFY_DMA_UNMAP
;
156 private->nb
.notifier_call
= vfio_ccw_mdev_notifier
;
158 ret
= vfio_register_notifier(mdev_dev(mdev
), VFIO_IOMMU_NOTIFY
,
159 &events
, &private->nb
);
163 ret
= vfio_ccw_register_async_dev_regions(private);
165 vfio_unregister_notifier(mdev_dev(mdev
), VFIO_IOMMU_NOTIFY
,
170 static void vfio_ccw_mdev_release(struct mdev_device
*mdev
)
172 struct vfio_ccw_private
*private =
173 dev_get_drvdata(mdev_parent_dev(mdev
));
176 if ((private->state
!= VFIO_CCW_STATE_NOT_OPER
) &&
177 (private->state
!= VFIO_CCW_STATE_STANDBY
)) {
178 if (!vfio_ccw_mdev_reset(mdev
))
179 private->state
= VFIO_CCW_STATE_STANDBY
;
180 /* The state will be NOT_OPER on error. */
183 cp_free(&private->cp
);
184 vfio_unregister_notifier(mdev_dev(mdev
), VFIO_IOMMU_NOTIFY
,
187 for (i
= 0; i
< private->num_regions
; i
++)
188 private->region
[i
].ops
->release(private, &private->region
[i
]);
190 private->num_regions
= 0;
191 kfree(private->region
);
192 private->region
= NULL
;
195 static ssize_t
vfio_ccw_mdev_read_io_region(struct vfio_ccw_private
*private,
196 char __user
*buf
, size_t count
,
199 loff_t pos
= *ppos
& VFIO_CCW_OFFSET_MASK
;
200 struct ccw_io_region
*region
;
203 if (pos
+ count
> sizeof(*region
))
206 mutex_lock(&private->io_mutex
);
207 region
= private->io_region
;
208 if (copy_to_user(buf
, (void *)region
+ pos
, count
))
212 mutex_unlock(&private->io_mutex
);
216 static ssize_t
vfio_ccw_mdev_read(struct mdev_device
*mdev
,
221 unsigned int index
= VFIO_CCW_OFFSET_TO_INDEX(*ppos
);
222 struct vfio_ccw_private
*private;
224 private = dev_get_drvdata(mdev_parent_dev(mdev
));
226 if (index
>= VFIO_CCW_NUM_REGIONS
+ private->num_regions
)
230 case VFIO_CCW_CONFIG_REGION_INDEX
:
231 return vfio_ccw_mdev_read_io_region(private, buf
, count
, ppos
);
233 index
-= VFIO_CCW_NUM_REGIONS
;
234 return private->region
[index
].ops
->read(private, buf
, count
,
241 static ssize_t
vfio_ccw_mdev_write_io_region(struct vfio_ccw_private
*private,
242 const char __user
*buf
,
243 size_t count
, loff_t
*ppos
)
245 loff_t pos
= *ppos
& VFIO_CCW_OFFSET_MASK
;
246 struct ccw_io_region
*region
;
249 if (pos
+ count
> sizeof(*region
))
252 if (!mutex_trylock(&private->io_mutex
))
255 region
= private->io_region
;
256 if (copy_from_user((void *)region
+ pos
, buf
, count
)) {
261 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ
);
262 if (region
->ret_code
!= 0)
263 private->state
= VFIO_CCW_STATE_IDLE
;
264 ret
= (region
->ret_code
!= 0) ? region
->ret_code
: count
;
267 mutex_unlock(&private->io_mutex
);
271 static ssize_t
vfio_ccw_mdev_write(struct mdev_device
*mdev
,
272 const char __user
*buf
,
276 unsigned int index
= VFIO_CCW_OFFSET_TO_INDEX(*ppos
);
277 struct vfio_ccw_private
*private;
279 private = dev_get_drvdata(mdev_parent_dev(mdev
));
281 if (index
>= VFIO_CCW_NUM_REGIONS
+ private->num_regions
)
285 case VFIO_CCW_CONFIG_REGION_INDEX
:
286 return vfio_ccw_mdev_write_io_region(private, buf
, count
, ppos
);
288 index
-= VFIO_CCW_NUM_REGIONS
;
289 return private->region
[index
].ops
->write(private, buf
, count
,
296 static int vfio_ccw_mdev_get_device_info(struct vfio_device_info
*info
,
297 struct mdev_device
*mdev
)
299 struct vfio_ccw_private
*private;
301 private = dev_get_drvdata(mdev_parent_dev(mdev
));
302 info
->flags
= VFIO_DEVICE_FLAGS_CCW
| VFIO_DEVICE_FLAGS_RESET
;
303 info
->num_regions
= VFIO_CCW_NUM_REGIONS
+ private->num_regions
;
304 info
->num_irqs
= VFIO_CCW_NUM_IRQS
;
309 static int vfio_ccw_mdev_get_region_info(struct vfio_region_info
*info
,
310 struct mdev_device
*mdev
,
313 struct vfio_ccw_private
*private;
316 private = dev_get_drvdata(mdev_parent_dev(mdev
));
317 switch (info
->index
) {
318 case VFIO_CCW_CONFIG_REGION_INDEX
:
320 info
->size
= sizeof(struct ccw_io_region
);
321 info
->flags
= VFIO_REGION_INFO_FLAG_READ
322 | VFIO_REGION_INFO_FLAG_WRITE
;
324 default: /* all other regions are handled via capability chain */
326 struct vfio_info_cap caps
= { .buf
= NULL
, .size
= 0 };
327 struct vfio_region_info_cap_type cap_type
= {
328 .header
.id
= VFIO_REGION_INFO_CAP_TYPE
,
329 .header
.version
= 1 };
333 VFIO_CCW_NUM_REGIONS
+ private->num_regions
)
336 info
->index
= array_index_nospec(info
->index
,
337 VFIO_CCW_NUM_REGIONS
+
338 private->num_regions
);
340 i
= info
->index
- VFIO_CCW_NUM_REGIONS
;
342 info
->offset
= VFIO_CCW_INDEX_TO_OFFSET(info
->index
);
343 info
->size
= private->region
[i
].size
;
344 info
->flags
= private->region
[i
].flags
;
346 cap_type
.type
= private->region
[i
].type
;
347 cap_type
.subtype
= private->region
[i
].subtype
;
349 ret
= vfio_info_add_capability(&caps
, &cap_type
.header
,
354 info
->flags
|= VFIO_REGION_INFO_FLAG_CAPS
;
355 if (info
->argsz
< sizeof(*info
) + caps
.size
) {
356 info
->argsz
= sizeof(*info
) + caps
.size
;
357 info
->cap_offset
= 0;
359 vfio_info_cap_shift(&caps
, sizeof(*info
));
360 if (copy_to_user((void __user
*)arg
+ sizeof(*info
),
361 caps
.buf
, caps
.size
)) {
365 info
->cap_offset
= sizeof(*info
);
375 static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info
*info
)
377 if (info
->index
!= VFIO_CCW_IO_IRQ_INDEX
)
381 info
->flags
= VFIO_IRQ_INFO_EVENTFD
;
386 static int vfio_ccw_mdev_set_irqs(struct mdev_device
*mdev
,
390 struct vfio_ccw_private
*private;
391 struct eventfd_ctx
**ctx
;
393 if (!(flags
& VFIO_IRQ_SET_ACTION_TRIGGER
))
396 private = dev_get_drvdata(mdev_parent_dev(mdev
));
397 ctx
= &private->io_trigger
;
399 switch (flags
& VFIO_IRQ_SET_DATA_TYPE_MASK
) {
400 case VFIO_IRQ_SET_DATA_NONE
:
403 eventfd_signal(*ctx
, 1);
406 case VFIO_IRQ_SET_DATA_BOOL
:
410 if (get_user(trigger
, (uint8_t __user
*)data
))
414 eventfd_signal(*ctx
, 1);
417 case VFIO_IRQ_SET_DATA_EVENTFD
:
421 if (get_user(fd
, (int32_t __user
*)data
))
426 eventfd_ctx_put(*ctx
);
428 } else if (fd
>= 0) {
429 struct eventfd_ctx
*efdctx
;
431 efdctx
= eventfd_ctx_fdget(fd
);
433 return PTR_ERR(efdctx
);
436 eventfd_ctx_put(*ctx
);
449 int vfio_ccw_register_dev_region(struct vfio_ccw_private
*private,
450 unsigned int subtype
,
451 const struct vfio_ccw_regops
*ops
,
452 size_t size
, u32 flags
, void *data
)
454 struct vfio_ccw_region
*region
;
456 region
= krealloc(private->region
,
457 (private->num_regions
+ 1) * sizeof(*region
),
462 private->region
= region
;
463 private->region
[private->num_regions
].type
= VFIO_REGION_TYPE_CCW
;
464 private->region
[private->num_regions
].subtype
= subtype
;
465 private->region
[private->num_regions
].ops
= ops
;
466 private->region
[private->num_regions
].size
= size
;
467 private->region
[private->num_regions
].flags
= flags
;
468 private->region
[private->num_regions
].data
= data
;
470 private->num_regions
++;
475 static ssize_t
vfio_ccw_mdev_ioctl(struct mdev_device
*mdev
,
483 case VFIO_DEVICE_GET_INFO
:
485 struct vfio_device_info info
;
487 minsz
= offsetofend(struct vfio_device_info
, num_irqs
);
489 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
492 if (info
.argsz
< minsz
)
495 ret
= vfio_ccw_mdev_get_device_info(&info
, mdev
);
499 return copy_to_user((void __user
*)arg
, &info
, minsz
);
501 case VFIO_DEVICE_GET_REGION_INFO
:
503 struct vfio_region_info info
;
505 minsz
= offsetofend(struct vfio_region_info
, offset
);
507 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
510 if (info
.argsz
< minsz
)
513 ret
= vfio_ccw_mdev_get_region_info(&info
, mdev
, arg
);
517 return copy_to_user((void __user
*)arg
, &info
, minsz
);
519 case VFIO_DEVICE_GET_IRQ_INFO
:
521 struct vfio_irq_info info
;
523 minsz
= offsetofend(struct vfio_irq_info
, count
);
525 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
528 if (info
.argsz
< minsz
|| info
.index
>= VFIO_CCW_NUM_IRQS
)
531 ret
= vfio_ccw_mdev_get_irq_info(&info
);
535 if (info
.count
== -1)
538 return copy_to_user((void __user
*)arg
, &info
, minsz
);
540 case VFIO_DEVICE_SET_IRQS
:
542 struct vfio_irq_set hdr
;
546 minsz
= offsetofend(struct vfio_irq_set
, count
);
548 if (copy_from_user(&hdr
, (void __user
*)arg
, minsz
))
551 ret
= vfio_set_irqs_validate_and_prepare(&hdr
, 1,
557 data
= (void __user
*)(arg
+ minsz
);
558 return vfio_ccw_mdev_set_irqs(mdev
, hdr
.flags
, data
);
560 case VFIO_DEVICE_RESET
:
561 return vfio_ccw_mdev_reset(mdev
);
567 static const struct mdev_parent_ops vfio_ccw_mdev_ops
= {
568 .owner
= THIS_MODULE
,
569 .supported_type_groups
= mdev_type_groups
,
570 .create
= vfio_ccw_mdev_create
,
571 .remove
= vfio_ccw_mdev_remove
,
572 .open
= vfio_ccw_mdev_open
,
573 .release
= vfio_ccw_mdev_release
,
574 .read
= vfio_ccw_mdev_read
,
575 .write
= vfio_ccw_mdev_write
,
576 .ioctl
= vfio_ccw_mdev_ioctl
,
579 int vfio_ccw_mdev_reg(struct subchannel
*sch
)
581 return mdev_register_device(&sch
->dev
, &vfio_ccw_mdev_ops
);
584 void vfio_ccw_mdev_unreg(struct subchannel
*sch
)
586 mdev_unregister_device(&sch
->dev
);