1 // SPDX-License-Identifier: GPL-2.0
3 * Mediated virtual PCI display host device driver
5 * See mdpy-defs.h for device specs
7 * (c) Gerd Hoffmann <kraxel@redhat.com>
9 * based on mtty driver which is:
10 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
11 * Author: Neo Jia <cjia@nvidia.com>
12 * Kirti Wankhede <kwankhede@nvidia.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/device.h>
21 #include <linux/kernel.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/cdev.h>
25 #include <linux/vfio.h>
26 #include <linux/iommu.h>
27 #include <linux/sysfs.h>
28 #include <linux/mdev.h>
29 #include <linux/pci.h>
30 #include <drm/drm_fourcc.h>
31 #include "mdpy-defs.h"
33 #define MDPY_NAME "mdpy"
34 #define MDPY_CLASS_NAME "mdpy"
36 #define MDPY_CONFIG_SPACE_SIZE 0xff
37 #define MDPY_MEMORY_BAR_OFFSET PAGE_SIZE
38 #define MDPY_DISPLAY_REGION 16
40 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
41 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
44 MODULE_LICENSE("GPL v2");
46 static int max_devices
= 4;
47 module_param_named(count
, max_devices
, int, 0444);
48 MODULE_PARM_DESC(count
, "number of " MDPY_NAME
" devices");
51 #define MDPY_TYPE_1 "vga"
52 #define MDPY_TYPE_2 "xga"
53 #define MDPY_TYPE_3 "hd"
55 static const struct mdpy_type
{
63 .name
= MDPY_CLASS_NAME
"-" MDPY_TYPE_1
,
64 .format
= DRM_FORMAT_XRGB8888
,
69 .name
= MDPY_CLASS_NAME
"-" MDPY_TYPE_2
,
70 .format
= DRM_FORMAT_XRGB8888
,
75 .name
= MDPY_CLASS_NAME
"-" MDPY_TYPE_3
,
76 .format
= DRM_FORMAT_XRGB8888
,
83 static dev_t mdpy_devt
;
84 static struct class *mdpy_class
;
85 static struct cdev mdpy_cdev
;
86 static struct device mdpy_dev
;
87 static u32 mdpy_count
;
89 /* State of each mdev device */
93 struct mutex ops_lock
;
94 struct mdev_device
*mdev
;
95 struct vfio_device_info dev_info
;
97 const struct mdpy_type
*type
;
102 static const struct mdpy_type
*mdpy_find_type(struct kobject
*kobj
)
106 for (i
= 0; i
< ARRAY_SIZE(mdpy_types
); i
++)
107 if (strcmp(mdpy_types
[i
].name
, kobj
->name
) == 0)
108 return mdpy_types
+ i
;
112 static void mdpy_create_config_space(struct mdev_state
*mdev_state
)
114 STORE_LE16((u16
*) &mdev_state
->vconfig
[PCI_VENDOR_ID
],
116 STORE_LE16((u16
*) &mdev_state
->vconfig
[PCI_DEVICE_ID
],
118 STORE_LE16((u16
*) &mdev_state
->vconfig
[PCI_SUBSYSTEM_VENDOR_ID
],
119 MDPY_PCI_SUBVENDOR_ID
);
120 STORE_LE16((u16
*) &mdev_state
->vconfig
[PCI_SUBSYSTEM_ID
],
121 MDPY_PCI_SUBDEVICE_ID
);
123 STORE_LE16((u16
*) &mdev_state
->vconfig
[PCI_COMMAND
],
124 PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
);
125 STORE_LE16((u16
*) &mdev_state
->vconfig
[PCI_STATUS
],
126 PCI_STATUS_CAP_LIST
);
127 STORE_LE16((u16
*) &mdev_state
->vconfig
[PCI_CLASS_DEVICE
],
128 PCI_CLASS_DISPLAY_OTHER
);
129 mdev_state
->vconfig
[PCI_CLASS_REVISION
] = 0x01;
131 STORE_LE32((u32
*) &mdev_state
->vconfig
[PCI_BASE_ADDRESS_0
],
132 PCI_BASE_ADDRESS_SPACE_MEMORY
|
133 PCI_BASE_ADDRESS_MEM_TYPE_32
|
134 PCI_BASE_ADDRESS_MEM_PREFETCH
);
135 mdev_state
->bar_mask
= ~(mdev_state
->memsize
) + 1;
137 /* vendor specific capability for the config registers */
138 mdev_state
->vconfig
[PCI_CAPABILITY_LIST
] = MDPY_VENDORCAP_OFFSET
;
139 mdev_state
->vconfig
[MDPY_VENDORCAP_OFFSET
+ 0] = 0x09; /* vendor cap */
140 mdev_state
->vconfig
[MDPY_VENDORCAP_OFFSET
+ 1] = 0x00; /* next ptr */
141 mdev_state
->vconfig
[MDPY_VENDORCAP_OFFSET
+ 2] = MDPY_VENDORCAP_SIZE
;
142 STORE_LE32((u32
*) &mdev_state
->vconfig
[MDPY_FORMAT_OFFSET
],
143 mdev_state
->type
->format
);
144 STORE_LE32((u32
*) &mdev_state
->vconfig
[MDPY_WIDTH_OFFSET
],
145 mdev_state
->type
->width
);
146 STORE_LE32((u32
*) &mdev_state
->vconfig
[MDPY_HEIGHT_OFFSET
],
147 mdev_state
->type
->height
);
150 static void handle_pci_cfg_write(struct mdev_state
*mdev_state
, u16 offset
,
151 char *buf
, u32 count
)
153 struct device
*dev
= mdev_dev(mdev_state
->mdev
);
157 case PCI_BASE_ADDRESS_0
:
158 cfg_addr
= *(u32
*)buf
;
160 if (cfg_addr
== 0xffffffff) {
161 cfg_addr
= (cfg_addr
& mdev_state
->bar_mask
);
163 cfg_addr
&= PCI_BASE_ADDRESS_MEM_MASK
;
165 dev_info(dev
, "BAR0 @ 0x%x\n", cfg_addr
);
168 cfg_addr
|= (mdev_state
->vconfig
[offset
] &
169 ~PCI_BASE_ADDRESS_MEM_MASK
);
170 STORE_LE32(&mdev_state
->vconfig
[offset
], cfg_addr
);
175 static ssize_t
mdev_access(struct mdev_device
*mdev
, char *buf
, size_t count
,
176 loff_t pos
, bool is_write
)
178 struct mdev_state
*mdev_state
= mdev_get_drvdata(mdev
);
179 struct device
*dev
= mdev_dev(mdev
);
182 mutex_lock(&mdev_state
->ops_lock
);
184 if (pos
< MDPY_CONFIG_SPACE_SIZE
) {
186 handle_pci_cfg_write(mdev_state
, pos
, buf
, count
);
188 memcpy(buf
, (mdev_state
->vconfig
+ pos
), count
);
190 } else if ((pos
>= MDPY_MEMORY_BAR_OFFSET
) &&
192 MDPY_MEMORY_BAR_OFFSET
+ mdev_state
->memsize
)) {
193 pos
-= MDPY_MEMORY_BAR_OFFSET
;
195 memcpy(mdev_state
->memblk
, buf
, count
);
197 memcpy(buf
, mdev_state
->memblk
, count
);
200 dev_info(dev
, "%s: %s @0x%llx (unhandled)\n",
201 __func__
, is_write
? "WR" : "RD", pos
);
210 mutex_unlock(&mdev_state
->ops_lock
);
215 static int mdpy_reset(struct mdev_device
*mdev
)
217 struct mdev_state
*mdev_state
= mdev_get_drvdata(mdev
);
220 /* initialize with gray gradient */
221 stride
= mdev_state
->type
->width
* mdev_state
->type
->bytepp
;
222 for (i
= 0; i
< mdev_state
->type
->height
; i
++)
223 memset(mdev_state
->memblk
+ i
* stride
,
224 i
* 255 / mdev_state
->type
->height
,
229 static int mdpy_create(struct kobject
*kobj
, struct mdev_device
*mdev
)
231 const struct mdpy_type
*type
= mdpy_find_type(kobj
);
232 struct device
*dev
= mdev_dev(mdev
);
233 struct mdev_state
*mdev_state
;
236 if (mdpy_count
>= max_devices
)
239 mdev_state
= kzalloc(sizeof(struct mdev_state
), GFP_KERNEL
);
240 if (mdev_state
== NULL
)
243 mdev_state
->vconfig
= kzalloc(MDPY_CONFIG_SPACE_SIZE
, GFP_KERNEL
);
244 if (mdev_state
->vconfig
== NULL
) {
250 type
= &mdpy_types
[0];
251 fbsize
= roundup_pow_of_two(type
->width
* type
->height
* type
->bytepp
);
253 mdev_state
->memblk
= vmalloc_user(fbsize
);
254 if (!mdev_state
->memblk
) {
255 kfree(mdev_state
->vconfig
);
259 dev_info(dev
, "%s: %s (%dx%d)\n",
260 __func__
, kobj
->name
, type
->width
, type
->height
);
262 mutex_init(&mdev_state
->ops_lock
);
263 mdev_state
->mdev
= mdev
;
264 mdev_set_drvdata(mdev
, mdev_state
);
266 mdev_state
->type
= type
;
267 mdev_state
->memsize
= fbsize
;
268 mdpy_create_config_space(mdev_state
);
275 static int mdpy_remove(struct mdev_device
*mdev
)
277 struct mdev_state
*mdev_state
= mdev_get_drvdata(mdev
);
278 struct device
*dev
= mdev_dev(mdev
);
280 dev_info(dev
, "%s\n", __func__
);
282 mdev_set_drvdata(mdev
, NULL
);
283 vfree(mdev_state
->memblk
);
284 kfree(mdev_state
->vconfig
);
291 static ssize_t
mdpy_read(struct mdev_device
*mdev
, char __user
*buf
,
292 size_t count
, loff_t
*ppos
)
294 unsigned int done
= 0;
300 if (count
>= 4 && !(*ppos
% 4)) {
303 ret
= mdev_access(mdev
, (char *)&val
, sizeof(val
),
308 if (copy_to_user(buf
, &val
, sizeof(val
)))
312 } else if (count
>= 2 && !(*ppos
% 2)) {
315 ret
= mdev_access(mdev
, (char *)&val
, sizeof(val
),
320 if (copy_to_user(buf
, &val
, sizeof(val
)))
327 ret
= mdev_access(mdev
, (char *)&val
, sizeof(val
),
332 if (copy_to_user(buf
, &val
, sizeof(val
)))
350 static ssize_t
mdpy_write(struct mdev_device
*mdev
, const char __user
*buf
,
351 size_t count
, loff_t
*ppos
)
353 unsigned int done
= 0;
359 if (count
>= 4 && !(*ppos
% 4)) {
362 if (copy_from_user(&val
, buf
, sizeof(val
)))
365 ret
= mdev_access(mdev
, (char *)&val
, sizeof(val
),
371 } else if (count
>= 2 && !(*ppos
% 2)) {
374 if (copy_from_user(&val
, buf
, sizeof(val
)))
377 ret
= mdev_access(mdev
, (char *)&val
, sizeof(val
),
386 if (copy_from_user(&val
, buf
, sizeof(val
)))
389 ret
= mdev_access(mdev
, (char *)&val
, sizeof(val
),
407 static int mdpy_mmap(struct mdev_device
*mdev
, struct vm_area_struct
*vma
)
409 struct mdev_state
*mdev_state
= mdev_get_drvdata(mdev
);
411 if (vma
->vm_pgoff
!= MDPY_MEMORY_BAR_OFFSET
>> PAGE_SHIFT
)
413 if (vma
->vm_end
< vma
->vm_start
)
415 if (vma
->vm_end
- vma
->vm_start
> mdev_state
->memsize
)
417 if ((vma
->vm_flags
& VM_SHARED
) == 0)
420 return remap_vmalloc_range_partial(vma
, vma
->vm_start
,
421 mdev_state
->memblk
, 0,
422 vma
->vm_end
- vma
->vm_start
);
425 static int mdpy_get_region_info(struct mdev_device
*mdev
,
426 struct vfio_region_info
*region_info
,
427 u16
*cap_type_id
, void **cap_type
)
429 struct mdev_state
*mdev_state
;
431 mdev_state
= mdev_get_drvdata(mdev
);
435 if (region_info
->index
>= VFIO_PCI_NUM_REGIONS
&&
436 region_info
->index
!= MDPY_DISPLAY_REGION
)
439 switch (region_info
->index
) {
440 case VFIO_PCI_CONFIG_REGION_INDEX
:
441 region_info
->offset
= 0;
442 region_info
->size
= MDPY_CONFIG_SPACE_SIZE
;
443 region_info
->flags
= (VFIO_REGION_INFO_FLAG_READ
|
444 VFIO_REGION_INFO_FLAG_WRITE
);
446 case VFIO_PCI_BAR0_REGION_INDEX
:
447 case MDPY_DISPLAY_REGION
:
448 region_info
->offset
= MDPY_MEMORY_BAR_OFFSET
;
449 region_info
->size
= mdev_state
->memsize
;
450 region_info
->flags
= (VFIO_REGION_INFO_FLAG_READ
|
451 VFIO_REGION_INFO_FLAG_WRITE
|
452 VFIO_REGION_INFO_FLAG_MMAP
);
455 region_info
->size
= 0;
456 region_info
->offset
= 0;
457 region_info
->flags
= 0;
463 static int mdpy_get_irq_info(struct mdev_device
*mdev
,
464 struct vfio_irq_info
*irq_info
)
470 static int mdpy_get_device_info(struct mdev_device
*mdev
,
471 struct vfio_device_info
*dev_info
)
473 dev_info
->flags
= VFIO_DEVICE_FLAGS_PCI
;
474 dev_info
->num_regions
= VFIO_PCI_NUM_REGIONS
;
475 dev_info
->num_irqs
= VFIO_PCI_NUM_IRQS
;
479 static int mdpy_query_gfx_plane(struct mdev_device
*mdev
,
480 struct vfio_device_gfx_plane_info
*plane
)
482 struct mdev_state
*mdev_state
= mdev_get_drvdata(mdev
);
484 if (plane
->flags
& VFIO_GFX_PLANE_TYPE_PROBE
) {
485 if (plane
->flags
== (VFIO_GFX_PLANE_TYPE_PROBE
|
486 VFIO_GFX_PLANE_TYPE_REGION
))
491 if (plane
->flags
!= VFIO_GFX_PLANE_TYPE_REGION
)
494 plane
->drm_format
= mdev_state
->type
->format
;
495 plane
->width
= mdev_state
->type
->width
;
496 plane
->height
= mdev_state
->type
->height
;
497 plane
->stride
= (mdev_state
->type
->width
*
498 mdev_state
->type
->bytepp
);
499 plane
->size
= mdev_state
->memsize
;
500 plane
->region_index
= MDPY_DISPLAY_REGION
;
503 plane
->drm_format_mod
= 0;
512 static long mdpy_ioctl(struct mdev_device
*mdev
, unsigned int cmd
,
517 struct mdev_state
*mdev_state
;
519 mdev_state
= mdev_get_drvdata(mdev
);
522 case VFIO_DEVICE_GET_INFO
:
524 struct vfio_device_info info
;
526 minsz
= offsetofend(struct vfio_device_info
, num_irqs
);
528 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
531 if (info
.argsz
< minsz
)
534 ret
= mdpy_get_device_info(mdev
, &info
);
538 memcpy(&mdev_state
->dev_info
, &info
, sizeof(info
));
540 if (copy_to_user((void __user
*)arg
, &info
, minsz
))
545 case VFIO_DEVICE_GET_REGION_INFO
:
547 struct vfio_region_info info
;
549 void *cap_type
= NULL
;
551 minsz
= offsetofend(struct vfio_region_info
, offset
);
553 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
556 if (info
.argsz
< minsz
)
559 ret
= mdpy_get_region_info(mdev
, &info
, &cap_type_id
,
564 if (copy_to_user((void __user
*)arg
, &info
, minsz
))
570 case VFIO_DEVICE_GET_IRQ_INFO
:
572 struct vfio_irq_info info
;
574 minsz
= offsetofend(struct vfio_irq_info
, count
);
576 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
579 if ((info
.argsz
< minsz
) ||
580 (info
.index
>= mdev_state
->dev_info
.num_irqs
))
583 ret
= mdpy_get_irq_info(mdev
, &info
);
587 if (copy_to_user((void __user
*)arg
, &info
, minsz
))
593 case VFIO_DEVICE_QUERY_GFX_PLANE
:
595 struct vfio_device_gfx_plane_info plane
;
597 minsz
= offsetofend(struct vfio_device_gfx_plane_info
,
600 if (copy_from_user(&plane
, (void __user
*)arg
, minsz
))
603 if (plane
.argsz
< minsz
)
606 ret
= mdpy_query_gfx_plane(mdev
, &plane
);
610 if (copy_to_user((void __user
*)arg
, &plane
, minsz
))
616 case VFIO_DEVICE_SET_IRQS
:
619 case VFIO_DEVICE_RESET
:
620 return mdpy_reset(mdev
);
625 static int mdpy_open(struct mdev_device
*mdev
)
627 if (!try_module_get(THIS_MODULE
))
633 static void mdpy_close(struct mdev_device
*mdev
)
635 module_put(THIS_MODULE
);
639 resolution_show(struct device
*dev
, struct device_attribute
*attr
,
642 struct mdev_device
*mdev
= mdev_from_dev(dev
);
643 struct mdev_state
*mdev_state
= mdev_get_drvdata(mdev
);
645 return sprintf(buf
, "%dx%d\n",
646 mdev_state
->type
->width
,
647 mdev_state
->type
->height
);
649 static DEVICE_ATTR_RO(resolution
);
651 static struct attribute
*mdev_dev_attrs
[] = {
652 &dev_attr_resolution
.attr
,
656 static const struct attribute_group mdev_dev_group
= {
658 .attrs
= mdev_dev_attrs
,
661 const struct attribute_group
*mdev_dev_groups
[] = {
667 name_show(struct kobject
*kobj
, struct device
*dev
, char *buf
)
669 return sprintf(buf
, "%s\n", kobj
->name
);
671 MDEV_TYPE_ATTR_RO(name
);
674 description_show(struct kobject
*kobj
, struct device
*dev
, char *buf
)
676 const struct mdpy_type
*type
= mdpy_find_type(kobj
);
678 return sprintf(buf
, "virtual display, %dx%d framebuffer\n",
679 type
? type
->width
: 0,
680 type
? type
->height
: 0);
682 MDEV_TYPE_ATTR_RO(description
);
685 available_instances_show(struct kobject
*kobj
, struct device
*dev
, char *buf
)
687 return sprintf(buf
, "%d\n", max_devices
- mdpy_count
);
689 MDEV_TYPE_ATTR_RO(available_instances
);
691 static ssize_t
device_api_show(struct kobject
*kobj
, struct device
*dev
,
694 return sprintf(buf
, "%s\n", VFIO_DEVICE_API_PCI_STRING
);
696 MDEV_TYPE_ATTR_RO(device_api
);
698 static struct attribute
*mdev_types_attrs
[] = {
699 &mdev_type_attr_name
.attr
,
700 &mdev_type_attr_description
.attr
,
701 &mdev_type_attr_device_api
.attr
,
702 &mdev_type_attr_available_instances
.attr
,
706 static struct attribute_group mdev_type_group1
= {
708 .attrs
= mdev_types_attrs
,
711 static struct attribute_group mdev_type_group2
= {
713 .attrs
= mdev_types_attrs
,
716 static struct attribute_group mdev_type_group3
= {
718 .attrs
= mdev_types_attrs
,
721 static struct attribute_group
*mdev_type_groups
[] = {
728 static const struct mdev_parent_ops mdev_fops
= {
729 .owner
= THIS_MODULE
,
730 .mdev_attr_groups
= mdev_dev_groups
,
731 .supported_type_groups
= mdev_type_groups
,
732 .create
= mdpy_create
,
733 .remove
= mdpy_remove
,
735 .release
= mdpy_close
,
742 static const struct file_operations vd_fops
= {
743 .owner
= THIS_MODULE
,
746 static void mdpy_device_release(struct device
*dev
)
751 static int __init
mdpy_dev_init(void)
755 ret
= alloc_chrdev_region(&mdpy_devt
, 0, MINORMASK
+ 1, MDPY_NAME
);
757 pr_err("Error: failed to register mdpy_dev, err: %d\n", ret
);
760 cdev_init(&mdpy_cdev
, &vd_fops
);
761 cdev_add(&mdpy_cdev
, mdpy_devt
, MINORMASK
+ 1);
762 pr_info("%s: major %d\n", __func__
, MAJOR(mdpy_devt
));
764 mdpy_class
= class_create(THIS_MODULE
, MDPY_CLASS_NAME
);
765 if (IS_ERR(mdpy_class
)) {
766 pr_err("Error: failed to register mdpy_dev class\n");
767 ret
= PTR_ERR(mdpy_class
);
770 mdpy_dev
.class = mdpy_class
;
771 mdpy_dev
.release
= mdpy_device_release
;
772 dev_set_name(&mdpy_dev
, "%s", MDPY_NAME
);
774 ret
= device_register(&mdpy_dev
);
778 ret
= mdev_register_device(&mdpy_dev
, &mdev_fops
);
785 device_unregister(&mdpy_dev
);
787 class_destroy(mdpy_class
);
789 cdev_del(&mdpy_cdev
);
790 unregister_chrdev_region(mdpy_devt
, MINORMASK
+ 1);
794 static void __exit
mdpy_dev_exit(void)
797 mdev_unregister_device(&mdpy_dev
);
799 device_unregister(&mdpy_dev
);
800 cdev_del(&mdpy_cdev
);
801 unregister_chrdev_region(mdpy_devt
, MINORMASK
+ 1);
802 class_destroy(mdpy_class
);
806 module_init(mdpy_dev_init
)
807 module_exit(mdpy_dev_exit
)