1 // SPDX-License-Identifier: GPL-2.0
3 * Physical device callbacks for vfio_ccw
5 * Copyright IBM Corp. 2017
6 * Copyright Red Hat, Inc. 2019
8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
9 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
10 * Cornelia Huck <cohuck@redhat.com>
13 #include <linux/vfio.h>
14 #include <linux/mdev.h>
15 #include <linux/nospec.h>
16 #include <linux/slab.h>
18 #include "vfio_ccw_private.h"
20 static int vfio_ccw_mdev_reset(struct mdev_device
*mdev
)
22 struct vfio_ccw_private
*private;
23 struct subchannel
*sch
;
26 private = dev_get_drvdata(mdev_parent_dev(mdev
));
30 * In the cureent stage, some things like "no I/O running" and "no
31 * interrupt pending" are clear, but we are not sure what other state
32 * we need to care about.
33 * There are still a lot more instructions need to be handled. We
34 * should come back here later.
36 ret
= vfio_ccw_sch_quiesce(sch
);
40 ret
= cio_enable_subchannel(sch
, (u32
)(unsigned long)sch
);
42 private->state
= VFIO_CCW_STATE_IDLE
;
47 static int vfio_ccw_mdev_notifier(struct notifier_block
*nb
,
51 struct vfio_ccw_private
*private =
52 container_of(nb
, struct vfio_ccw_private
, nb
);
55 * Vendor drivers MUST unpin pages in response to an
58 if (action
== VFIO_IOMMU_NOTIFY_DMA_UNMAP
) {
59 struct vfio_iommu_type1_dma_unmap
*unmap
= data
;
61 if (!cp_iova_pinned(&private->cp
, unmap
->iova
))
64 if (vfio_ccw_mdev_reset(private->mdev
))
67 cp_free(&private->cp
);
74 static ssize_t
name_show(struct kobject
*kobj
, struct device
*dev
, char *buf
)
76 return sprintf(buf
, "I/O subchannel (Non-QDIO)\n");
78 static MDEV_TYPE_ATTR_RO(name
);
80 static ssize_t
device_api_show(struct kobject
*kobj
, struct device
*dev
,
83 return sprintf(buf
, "%s\n", VFIO_DEVICE_API_CCW_STRING
);
85 static MDEV_TYPE_ATTR_RO(device_api
);
87 static ssize_t
available_instances_show(struct kobject
*kobj
,
88 struct device
*dev
, char *buf
)
90 struct vfio_ccw_private
*private = dev_get_drvdata(dev
);
92 return sprintf(buf
, "%d\n", atomic_read(&private->avail
));
94 static MDEV_TYPE_ATTR_RO(available_instances
);
96 static struct attribute
*mdev_types_attrs
[] = {
97 &mdev_type_attr_name
.attr
,
98 &mdev_type_attr_device_api
.attr
,
99 &mdev_type_attr_available_instances
.attr
,
103 static struct attribute_group mdev_type_group
= {
105 .attrs
= mdev_types_attrs
,
108 static struct attribute_group
*mdev_type_groups
[] = {
113 static int vfio_ccw_mdev_create(struct kobject
*kobj
, struct mdev_device
*mdev
)
115 struct vfio_ccw_private
*private =
116 dev_get_drvdata(mdev_parent_dev(mdev
));
118 if (private->state
== VFIO_CCW_STATE_NOT_OPER
)
121 if (atomic_dec_if_positive(&private->avail
) < 0)
124 private->mdev
= mdev
;
125 private->state
= VFIO_CCW_STATE_IDLE
;
127 VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: create\n",
128 mdev_uuid(mdev
), private->sch
->schid
.cssid
,
129 private->sch
->schid
.ssid
,
130 private->sch
->schid
.sch_no
);
135 static int vfio_ccw_mdev_remove(struct mdev_device
*mdev
)
137 struct vfio_ccw_private
*private =
138 dev_get_drvdata(mdev_parent_dev(mdev
));
140 VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: remove\n",
141 mdev_uuid(mdev
), private->sch
->schid
.cssid
,
142 private->sch
->schid
.ssid
,
143 private->sch
->schid
.sch_no
);
145 if ((private->state
!= VFIO_CCW_STATE_NOT_OPER
) &&
146 (private->state
!= VFIO_CCW_STATE_STANDBY
)) {
147 if (!vfio_ccw_sch_quiesce(private->sch
))
148 private->state
= VFIO_CCW_STATE_STANDBY
;
149 /* The state will be NOT_OPER on error. */
152 cp_free(&private->cp
);
153 private->mdev
= NULL
;
154 atomic_inc(&private->avail
);
159 static int vfio_ccw_mdev_open(struct mdev_device
*mdev
)
161 struct vfio_ccw_private
*private =
162 dev_get_drvdata(mdev_parent_dev(mdev
));
163 unsigned long events
= VFIO_IOMMU_NOTIFY_DMA_UNMAP
;
166 private->nb
.notifier_call
= vfio_ccw_mdev_notifier
;
168 ret
= vfio_register_notifier(mdev_dev(mdev
), VFIO_IOMMU_NOTIFY
,
169 &events
, &private->nb
);
173 ret
= vfio_ccw_register_async_dev_regions(private);
175 vfio_unregister_notifier(mdev_dev(mdev
), VFIO_IOMMU_NOTIFY
,
180 static void vfio_ccw_mdev_release(struct mdev_device
*mdev
)
182 struct vfio_ccw_private
*private =
183 dev_get_drvdata(mdev_parent_dev(mdev
));
186 if ((private->state
!= VFIO_CCW_STATE_NOT_OPER
) &&
187 (private->state
!= VFIO_CCW_STATE_STANDBY
)) {
188 if (!vfio_ccw_mdev_reset(mdev
))
189 private->state
= VFIO_CCW_STATE_STANDBY
;
190 /* The state will be NOT_OPER on error. */
193 cp_free(&private->cp
);
194 vfio_unregister_notifier(mdev_dev(mdev
), VFIO_IOMMU_NOTIFY
,
197 for (i
= 0; i
< private->num_regions
; i
++)
198 private->region
[i
].ops
->release(private, &private->region
[i
]);
200 private->num_regions
= 0;
201 kfree(private->region
);
202 private->region
= NULL
;
205 static ssize_t
vfio_ccw_mdev_read_io_region(struct vfio_ccw_private
*private,
206 char __user
*buf
, size_t count
,
209 loff_t pos
= *ppos
& VFIO_CCW_OFFSET_MASK
;
210 struct ccw_io_region
*region
;
213 if (pos
+ count
> sizeof(*region
))
216 mutex_lock(&private->io_mutex
);
217 region
= private->io_region
;
218 if (copy_to_user(buf
, (void *)region
+ pos
, count
))
222 mutex_unlock(&private->io_mutex
);
226 static ssize_t
vfio_ccw_mdev_read(struct mdev_device
*mdev
,
231 unsigned int index
= VFIO_CCW_OFFSET_TO_INDEX(*ppos
);
232 struct vfio_ccw_private
*private;
234 private = dev_get_drvdata(mdev_parent_dev(mdev
));
236 if (index
>= VFIO_CCW_NUM_REGIONS
+ private->num_regions
)
240 case VFIO_CCW_CONFIG_REGION_INDEX
:
241 return vfio_ccw_mdev_read_io_region(private, buf
, count
, ppos
);
243 index
-= VFIO_CCW_NUM_REGIONS
;
244 return private->region
[index
].ops
->read(private, buf
, count
,
251 static ssize_t
vfio_ccw_mdev_write_io_region(struct vfio_ccw_private
*private,
252 const char __user
*buf
,
253 size_t count
, loff_t
*ppos
)
255 loff_t pos
= *ppos
& VFIO_CCW_OFFSET_MASK
;
256 struct ccw_io_region
*region
;
259 if (pos
+ count
> sizeof(*region
))
262 if (!mutex_trylock(&private->io_mutex
))
265 region
= private->io_region
;
266 if (copy_from_user((void *)region
+ pos
, buf
, count
)) {
271 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ
);
272 if (region
->ret_code
!= 0)
273 private->state
= VFIO_CCW_STATE_IDLE
;
274 ret
= (region
->ret_code
!= 0) ? region
->ret_code
: count
;
277 mutex_unlock(&private->io_mutex
);
281 static ssize_t
vfio_ccw_mdev_write(struct mdev_device
*mdev
,
282 const char __user
*buf
,
286 unsigned int index
= VFIO_CCW_OFFSET_TO_INDEX(*ppos
);
287 struct vfio_ccw_private
*private;
289 private = dev_get_drvdata(mdev_parent_dev(mdev
));
291 if (index
>= VFIO_CCW_NUM_REGIONS
+ private->num_regions
)
295 case VFIO_CCW_CONFIG_REGION_INDEX
:
296 return vfio_ccw_mdev_write_io_region(private, buf
, count
, ppos
);
298 index
-= VFIO_CCW_NUM_REGIONS
;
299 return private->region
[index
].ops
->write(private, buf
, count
,
306 static int vfio_ccw_mdev_get_device_info(struct vfio_device_info
*info
,
307 struct mdev_device
*mdev
)
309 struct vfio_ccw_private
*private;
311 private = dev_get_drvdata(mdev_parent_dev(mdev
));
312 info
->flags
= VFIO_DEVICE_FLAGS_CCW
| VFIO_DEVICE_FLAGS_RESET
;
313 info
->num_regions
= VFIO_CCW_NUM_REGIONS
+ private->num_regions
;
314 info
->num_irqs
= VFIO_CCW_NUM_IRQS
;
319 static int vfio_ccw_mdev_get_region_info(struct vfio_region_info
*info
,
320 struct mdev_device
*mdev
,
323 struct vfio_ccw_private
*private;
326 private = dev_get_drvdata(mdev_parent_dev(mdev
));
327 switch (info
->index
) {
328 case VFIO_CCW_CONFIG_REGION_INDEX
:
330 info
->size
= sizeof(struct ccw_io_region
);
331 info
->flags
= VFIO_REGION_INFO_FLAG_READ
332 | VFIO_REGION_INFO_FLAG_WRITE
;
334 default: /* all other regions are handled via capability chain */
336 struct vfio_info_cap caps
= { .buf
= NULL
, .size
= 0 };
337 struct vfio_region_info_cap_type cap_type
= {
338 .header
.id
= VFIO_REGION_INFO_CAP_TYPE
,
339 .header
.version
= 1 };
343 VFIO_CCW_NUM_REGIONS
+ private->num_regions
)
346 info
->index
= array_index_nospec(info
->index
,
347 VFIO_CCW_NUM_REGIONS
+
348 private->num_regions
);
350 i
= info
->index
- VFIO_CCW_NUM_REGIONS
;
352 info
->offset
= VFIO_CCW_INDEX_TO_OFFSET(info
->index
);
353 info
->size
= private->region
[i
].size
;
354 info
->flags
= private->region
[i
].flags
;
356 cap_type
.type
= private->region
[i
].type
;
357 cap_type
.subtype
= private->region
[i
].subtype
;
359 ret
= vfio_info_add_capability(&caps
, &cap_type
.header
,
364 info
->flags
|= VFIO_REGION_INFO_FLAG_CAPS
;
365 if (info
->argsz
< sizeof(*info
) + caps
.size
) {
366 info
->argsz
= sizeof(*info
) + caps
.size
;
367 info
->cap_offset
= 0;
369 vfio_info_cap_shift(&caps
, sizeof(*info
));
370 if (copy_to_user((void __user
*)arg
+ sizeof(*info
),
371 caps
.buf
, caps
.size
)) {
375 info
->cap_offset
= sizeof(*info
);
385 static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info
*info
)
387 if (info
->index
!= VFIO_CCW_IO_IRQ_INDEX
)
391 info
->flags
= VFIO_IRQ_INFO_EVENTFD
;
396 static int vfio_ccw_mdev_set_irqs(struct mdev_device
*mdev
,
400 struct vfio_ccw_private
*private;
401 struct eventfd_ctx
**ctx
;
403 if (!(flags
& VFIO_IRQ_SET_ACTION_TRIGGER
))
406 private = dev_get_drvdata(mdev_parent_dev(mdev
));
407 ctx
= &private->io_trigger
;
409 switch (flags
& VFIO_IRQ_SET_DATA_TYPE_MASK
) {
410 case VFIO_IRQ_SET_DATA_NONE
:
413 eventfd_signal(*ctx
, 1);
416 case VFIO_IRQ_SET_DATA_BOOL
:
420 if (get_user(trigger
, (uint8_t __user
*)data
))
424 eventfd_signal(*ctx
, 1);
427 case VFIO_IRQ_SET_DATA_EVENTFD
:
431 if (get_user(fd
, (int32_t __user
*)data
))
436 eventfd_ctx_put(*ctx
);
438 } else if (fd
>= 0) {
439 struct eventfd_ctx
*efdctx
;
441 efdctx
= eventfd_ctx_fdget(fd
);
443 return PTR_ERR(efdctx
);
446 eventfd_ctx_put(*ctx
);
459 int vfio_ccw_register_dev_region(struct vfio_ccw_private
*private,
460 unsigned int subtype
,
461 const struct vfio_ccw_regops
*ops
,
462 size_t size
, u32 flags
, void *data
)
464 struct vfio_ccw_region
*region
;
466 region
= krealloc(private->region
,
467 (private->num_regions
+ 1) * sizeof(*region
),
472 private->region
= region
;
473 private->region
[private->num_regions
].type
= VFIO_REGION_TYPE_CCW
;
474 private->region
[private->num_regions
].subtype
= subtype
;
475 private->region
[private->num_regions
].ops
= ops
;
476 private->region
[private->num_regions
].size
= size
;
477 private->region
[private->num_regions
].flags
= flags
;
478 private->region
[private->num_regions
].data
= data
;
480 private->num_regions
++;
485 static ssize_t
vfio_ccw_mdev_ioctl(struct mdev_device
*mdev
,
493 case VFIO_DEVICE_GET_INFO
:
495 struct vfio_device_info info
;
497 minsz
= offsetofend(struct vfio_device_info
, num_irqs
);
499 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
502 if (info
.argsz
< minsz
)
505 ret
= vfio_ccw_mdev_get_device_info(&info
, mdev
);
509 return copy_to_user((void __user
*)arg
, &info
, minsz
);
511 case VFIO_DEVICE_GET_REGION_INFO
:
513 struct vfio_region_info info
;
515 minsz
= offsetofend(struct vfio_region_info
, offset
);
517 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
520 if (info
.argsz
< minsz
)
523 ret
= vfio_ccw_mdev_get_region_info(&info
, mdev
, arg
);
527 return copy_to_user((void __user
*)arg
, &info
, minsz
);
529 case VFIO_DEVICE_GET_IRQ_INFO
:
531 struct vfio_irq_info info
;
533 minsz
= offsetofend(struct vfio_irq_info
, count
);
535 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
538 if (info
.argsz
< minsz
|| info
.index
>= VFIO_CCW_NUM_IRQS
)
541 ret
= vfio_ccw_mdev_get_irq_info(&info
);
545 if (info
.count
== -1)
548 return copy_to_user((void __user
*)arg
, &info
, minsz
);
550 case VFIO_DEVICE_SET_IRQS
:
552 struct vfio_irq_set hdr
;
556 minsz
= offsetofend(struct vfio_irq_set
, count
);
558 if (copy_from_user(&hdr
, (void __user
*)arg
, minsz
))
561 ret
= vfio_set_irqs_validate_and_prepare(&hdr
, 1,
567 data
= (void __user
*)(arg
+ minsz
);
568 return vfio_ccw_mdev_set_irqs(mdev
, hdr
.flags
, data
);
570 case VFIO_DEVICE_RESET
:
571 return vfio_ccw_mdev_reset(mdev
);
577 static const struct mdev_parent_ops vfio_ccw_mdev_ops
= {
578 .owner
= THIS_MODULE
,
579 .supported_type_groups
= mdev_type_groups
,
580 .create
= vfio_ccw_mdev_create
,
581 .remove
= vfio_ccw_mdev_remove
,
582 .open
= vfio_ccw_mdev_open
,
583 .release
= vfio_ccw_mdev_release
,
584 .read
= vfio_ccw_mdev_read
,
585 .write
= vfio_ccw_mdev_write
,
586 .ioctl
= vfio_ccw_mdev_ioctl
,
589 int vfio_ccw_mdev_reg(struct subchannel
*sch
)
591 return mdev_register_device(&sch
->dev
, &vfio_ccw_mdev_ops
);
594 void vfio_ccw_mdev_unreg(struct subchannel
*sch
)
596 mdev_unregister_device(&sch
->dev
);