1 // SPDX-License-Identifier: GPL-2.0
3 * Mediated virtual PCI display host device driver
5 * Emulate enough of qemu stdvga to make bochs-drm.ko happy. That is
6 * basically the vram memory bar and the bochs dispi interface vbe
7 * registers in the mmio register bar. Specifically it does *not*
8 * include any legacy vga stuff. Device looks a lot like "qemu -device
11 * (c) Gerd Hoffmann <kraxel@redhat.com>
13 * based on mtty driver which is:
14 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
15 * Author: Neo Jia <cjia@nvidia.com>
16 * Kirti Wankhede <kwankhede@nvidia.com>
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License version 2 as
20 * published by the Free Software Foundation.
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/cdev.h>
28 #include <linux/vfio.h>
29 #include <linux/iommu.h>
30 #include <linux/sysfs.h>
31 #include <linux/mdev.h>
32 #include <linux/pci.h>
33 #include <linux/dma-buf.h>
34 #include <linux/highmem.h>
35 #include <drm/drm_fourcc.h>
36 #include <drm/drm_rect.h>
37 #include <drm/drm_modeset_lock.h>
38 #include <drm/drm_property.h>
39 #include <drm/drm_plane.h>
42 #define VBE_DISPI_INDEX_ID 0x0
43 #define VBE_DISPI_INDEX_XRES 0x1
44 #define VBE_DISPI_INDEX_YRES 0x2
45 #define VBE_DISPI_INDEX_BPP 0x3
46 #define VBE_DISPI_INDEX_ENABLE 0x4
47 #define VBE_DISPI_INDEX_BANK 0x5
48 #define VBE_DISPI_INDEX_VIRT_WIDTH 0x6
49 #define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7
50 #define VBE_DISPI_INDEX_X_OFFSET 0x8
51 #define VBE_DISPI_INDEX_Y_OFFSET 0x9
52 #define VBE_DISPI_INDEX_VIDEO_MEMORY_64K 0xa
53 #define VBE_DISPI_INDEX_COUNT 0xb
55 #define VBE_DISPI_ID0 0xB0C0
56 #define VBE_DISPI_ID1 0xB0C1
57 #define VBE_DISPI_ID2 0xB0C2
58 #define VBE_DISPI_ID3 0xB0C3
59 #define VBE_DISPI_ID4 0xB0C4
60 #define VBE_DISPI_ID5 0xB0C5
62 #define VBE_DISPI_DISABLED 0x00
63 #define VBE_DISPI_ENABLED 0x01
64 #define VBE_DISPI_GETCAPS 0x02
65 #define VBE_DISPI_8BIT_DAC 0x20
66 #define VBE_DISPI_LFB_ENABLED 0x40
67 #define VBE_DISPI_NOCLEARMEM 0x80
70 #define MBOCHS_NAME "mbochs"
71 #define MBOCHS_CLASS_NAME "mbochs"
73 #define MBOCHS_EDID_REGION_INDEX VFIO_PCI_NUM_REGIONS
74 #define MBOCHS_NUM_REGIONS (MBOCHS_EDID_REGION_INDEX+1)
76 #define MBOCHS_CONFIG_SPACE_SIZE 0xff
77 #define MBOCHS_MMIO_BAR_OFFSET PAGE_SIZE
78 #define MBOCHS_MMIO_BAR_SIZE PAGE_SIZE
79 #define MBOCHS_EDID_OFFSET (MBOCHS_MMIO_BAR_OFFSET + \
81 #define MBOCHS_EDID_SIZE PAGE_SIZE
82 #define MBOCHS_MEMORY_BAR_OFFSET (MBOCHS_EDID_OFFSET + \
85 #define MBOCHS_EDID_BLOB_OFFSET (MBOCHS_EDID_SIZE/2)
87 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
88 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
91 MODULE_DESCRIPTION("Mediated virtual PCI display host device driver");
92 MODULE_LICENSE("GPL v2");
94 static int max_mbytes
= 256;
95 module_param_named(count
, max_mbytes
, int, 0444);
96 MODULE_PARM_DESC(mem
, "megabytes available to " MBOCHS_NAME
" devices");
99 #define MBOCHS_TYPE_1 "small"
100 #define MBOCHS_TYPE_2 "medium"
101 #define MBOCHS_TYPE_3 "large"
103 static struct mbochs_type
{
104 struct mdev_type type
;
110 .type
.sysfs_name
= MBOCHS_TYPE_1
,
111 .type
.pretty_name
= MBOCHS_CLASS_NAME
"-" MBOCHS_TYPE_1
,
116 .type
.sysfs_name
= MBOCHS_TYPE_2
,
117 .type
.pretty_name
= MBOCHS_CLASS_NAME
"-" MBOCHS_TYPE_2
,
122 .type
.sysfs_name
= MBOCHS_TYPE_3
,
123 .type
.pretty_name
= MBOCHS_CLASS_NAME
"-" MBOCHS_TYPE_3
,
130 static struct mdev_type
*mbochs_mdev_types
[] = {
131 &mbochs_types
[0].type
,
132 &mbochs_types
[1].type
,
133 &mbochs_types
[2].type
,
136 static dev_t mbochs_devt
;
137 static const struct class mbochs_class
= {
138 .name
= MBOCHS_CLASS_NAME
,
140 static struct cdev mbochs_cdev
;
141 static struct device mbochs_dev
;
142 static struct mdev_parent mbochs_parent
;
143 static atomic_t mbochs_avail_mbytes
;
144 static const struct vfio_device_ops mbochs_dev_ops
;
146 struct vfio_region_info_ext
{
147 struct vfio_region_info base
;
148 struct vfio_region_info_cap_type type
;
162 struct mbochs_dmabuf
{
163 struct mbochs_mode mode
;
168 struct mdev_state
*mdev_state
;
169 struct list_head next
;
173 /* State of each mdev device */
175 struct vfio_device vdev
;
179 struct mutex ops_lock
;
180 struct mdev_device
*mdev
;
182 const struct mbochs_type
*type
;
183 u16 vbe
[VBE_DISPI_INDEX_COUNT
];
187 struct vfio_region_gfx_edid edid_regs
;
190 struct list_head dmabufs
;
195 static const char *vbe_name_list
[VBE_DISPI_INDEX_COUNT
] = {
196 [VBE_DISPI_INDEX_ID
] = "id",
197 [VBE_DISPI_INDEX_XRES
] = "xres",
198 [VBE_DISPI_INDEX_YRES
] = "yres",
199 [VBE_DISPI_INDEX_BPP
] = "bpp",
200 [VBE_DISPI_INDEX_ENABLE
] = "enable",
201 [VBE_DISPI_INDEX_BANK
] = "bank",
202 [VBE_DISPI_INDEX_VIRT_WIDTH
] = "virt-width",
203 [VBE_DISPI_INDEX_VIRT_HEIGHT
] = "virt-height",
204 [VBE_DISPI_INDEX_X_OFFSET
] = "x-offset",
205 [VBE_DISPI_INDEX_Y_OFFSET
] = "y-offset",
206 [VBE_DISPI_INDEX_VIDEO_MEMORY_64K
] = "video-mem",
209 static const char *vbe_name(u32 index
)
211 if (index
< ARRAY_SIZE(vbe_name_list
))
212 return vbe_name_list
[index
];
216 static struct page
*__mbochs_get_page(struct mdev_state
*mdev_state
,
218 static struct page
*mbochs_get_page(struct mdev_state
*mdev_state
,
221 static void mbochs_create_config_space(struct mdev_state
*mdev_state
)
223 STORE_LE16((u16
*) &mdev_state
->vconfig
[PCI_VENDOR_ID
],
225 STORE_LE16((u16
*) &mdev_state
->vconfig
[PCI_DEVICE_ID
],
227 STORE_LE16((u16
*) &mdev_state
->vconfig
[PCI_SUBSYSTEM_VENDOR_ID
],
228 PCI_SUBVENDOR_ID_REDHAT_QUMRANET
);
229 STORE_LE16((u16
*) &mdev_state
->vconfig
[PCI_SUBSYSTEM_ID
],
230 PCI_SUBDEVICE_ID_QEMU
);
232 STORE_LE16((u16
*) &mdev_state
->vconfig
[PCI_COMMAND
],
233 PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
);
234 STORE_LE16((u16
*) &mdev_state
->vconfig
[PCI_CLASS_DEVICE
],
235 PCI_CLASS_DISPLAY_OTHER
);
236 mdev_state
->vconfig
[PCI_CLASS_REVISION
] = 0x01;
238 STORE_LE32((u32
*) &mdev_state
->vconfig
[PCI_BASE_ADDRESS_0
],
239 PCI_BASE_ADDRESS_SPACE_MEMORY
|
240 PCI_BASE_ADDRESS_MEM_TYPE_32
|
241 PCI_BASE_ADDRESS_MEM_PREFETCH
);
242 mdev_state
->bar_mask
[0] = ~(mdev_state
->memsize
) + 1;
244 STORE_LE32((u32
*) &mdev_state
->vconfig
[PCI_BASE_ADDRESS_2
],
245 PCI_BASE_ADDRESS_SPACE_MEMORY
|
246 PCI_BASE_ADDRESS_MEM_TYPE_32
);
247 mdev_state
->bar_mask
[2] = ~(MBOCHS_MMIO_BAR_SIZE
) + 1;
250 static int mbochs_check_framebuffer(struct mdev_state
*mdev_state
,
251 struct mbochs_mode
*mode
)
253 struct device
*dev
= mdev_dev(mdev_state
->mdev
);
254 u16
*vbe
= mdev_state
->vbe
;
257 WARN_ON(!mutex_is_locked(&mdev_state
->ops_lock
));
259 if (!(vbe
[VBE_DISPI_INDEX_ENABLE
] & VBE_DISPI_ENABLED
))
262 memset(mode
, 0, sizeof(*mode
));
263 switch (vbe
[VBE_DISPI_INDEX_BPP
]) {
265 mode
->drm_format
= DRM_FORMAT_XRGB8888
;
269 dev_info_ratelimited(dev
, "%s: bpp %d not supported\n",
270 __func__
, vbe
[VBE_DISPI_INDEX_BPP
]);
274 mode
->width
= vbe
[VBE_DISPI_INDEX_XRES
];
275 mode
->height
= vbe
[VBE_DISPI_INDEX_YRES
];
276 virt_width
= vbe
[VBE_DISPI_INDEX_VIRT_WIDTH
];
277 if (virt_width
< mode
->width
)
278 virt_width
= mode
->width
;
279 mode
->stride
= virt_width
* mode
->bytepp
;
280 mode
->size
= (u64
)mode
->stride
* mode
->height
;
281 mode
->offset
= ((u64
)vbe
[VBE_DISPI_INDEX_X_OFFSET
] * mode
->bytepp
+
282 (u64
)vbe
[VBE_DISPI_INDEX_Y_OFFSET
] * mode
->stride
);
284 if (mode
->width
< 64 || mode
->height
< 64) {
285 dev_info_ratelimited(dev
, "%s: invalid resolution %dx%d\n",
286 __func__
, mode
->width
, mode
->height
);
289 if (mode
->offset
+ mode
->size
> mdev_state
->memsize
) {
290 dev_info_ratelimited(dev
, "%s: framebuffer memory overflow\n",
298 memset(mode
, 0, sizeof(*mode
));
302 static bool mbochs_modes_equal(struct mbochs_mode
*mode1
,
303 struct mbochs_mode
*mode2
)
305 return memcmp(mode1
, mode2
, sizeof(struct mbochs_mode
)) == 0;
308 static void handle_pci_cfg_write(struct mdev_state
*mdev_state
, u16 offset
,
309 char *buf
, u32 count
)
311 struct device
*dev
= mdev_dev(mdev_state
->mdev
);
312 int index
= (offset
- PCI_BASE_ADDRESS_0
) / 0x04;
316 case PCI_BASE_ADDRESS_0
:
317 case PCI_BASE_ADDRESS_2
:
318 cfg_addr
= *(u32
*)buf
;
320 if (cfg_addr
== 0xffffffff) {
321 cfg_addr
= (cfg_addr
& mdev_state
->bar_mask
[index
]);
323 cfg_addr
&= PCI_BASE_ADDRESS_MEM_MASK
;
325 dev_info(dev
, "BAR #%d @ 0x%x\n",
329 cfg_addr
|= (mdev_state
->vconfig
[offset
] &
330 ~PCI_BASE_ADDRESS_MEM_MASK
);
331 STORE_LE32(&mdev_state
->vconfig
[offset
], cfg_addr
);
336 static void handle_mmio_write(struct mdev_state
*mdev_state
, u16 offset
,
337 char *buf
, u32 count
)
339 struct device
*dev
= mdev_dev(mdev_state
->mdev
);
344 case 0x400 ... 0x41f: /* vga ioports remapped */
346 case 0x500 ... 0x515: /* bochs dispi interface */
349 index
= (offset
- 0x500) / 2;
351 if (index
< ARRAY_SIZE(mdev_state
->vbe
))
352 mdev_state
->vbe
[index
] = reg16
;
353 dev_dbg(dev
, "%s: vbe write %d = %d (%s)\n",
354 __func__
, index
, reg16
, vbe_name(index
));
356 case 0x600 ... 0x607: /* qemu extended regs */
360 dev_dbg(dev
, "%s: @0x%03x, count %d (unhandled)\n",
361 __func__
, offset
, count
);
366 static void handle_mmio_read(struct mdev_state
*mdev_state
, u16 offset
,
367 char *buf
, u32 count
)
369 struct device
*dev
= mdev_dev(mdev_state
->mdev
);
370 struct vfio_region_gfx_edid
*edid
;
375 case 0x000 ... 0x3ff: /* edid block */
376 edid
= &mdev_state
->edid_regs
;
377 if (edid
->link_state
!= VFIO_DEVICE_GFX_LINK_STATE_UP
||
378 offset
>= edid
->edid_size
) {
379 memset(buf
, 0, count
);
382 memcpy(buf
, mdev_state
->edid_blob
+ offset
, count
);
384 case 0x500 ... 0x515: /* bochs dispi interface */
387 index
= (offset
- 0x500) / 2;
388 if (index
< ARRAY_SIZE(mdev_state
->vbe
))
389 reg16
= mdev_state
->vbe
[index
];
390 dev_dbg(dev
, "%s: vbe read %d = %d (%s)\n",
391 __func__
, index
, reg16
, vbe_name(index
));
396 dev_dbg(dev
, "%s: @0x%03x, count %d (unhandled)\n",
397 __func__
, offset
, count
);
398 memset(buf
, 0, count
);
403 static void handle_edid_regs(struct mdev_state
*mdev_state
, u16 offset
,
404 char *buf
, u32 count
, bool is_write
)
406 char *regs
= (void *)&mdev_state
->edid_regs
;
408 if (offset
+ count
> sizeof(mdev_state
->edid_regs
))
417 case offsetof(struct vfio_region_gfx_edid
, link_state
):
418 case offsetof(struct vfio_region_gfx_edid
, edid_size
):
419 memcpy(regs
+ offset
, buf
, count
);
426 memcpy(buf
, regs
+ offset
, count
);
430 static void handle_edid_blob(struct mdev_state
*mdev_state
, u16 offset
,
431 char *buf
, u32 count
, bool is_write
)
433 if (offset
+ count
> mdev_state
->edid_regs
.edid_max_size
)
436 memcpy(mdev_state
->edid_blob
+ offset
, buf
, count
);
438 memcpy(buf
, mdev_state
->edid_blob
+ offset
, count
);
441 static ssize_t
mdev_access(struct mdev_state
*mdev_state
, char *buf
,
442 size_t count
, loff_t pos
, bool is_write
)
449 mutex_lock(&mdev_state
->ops_lock
);
451 if (pos
< MBOCHS_CONFIG_SPACE_SIZE
) {
453 handle_pci_cfg_write(mdev_state
, pos
, buf
, count
);
455 memcpy(buf
, (mdev_state
->vconfig
+ pos
), count
);
457 } else if (pos
>= MBOCHS_MMIO_BAR_OFFSET
&&
458 pos
+ count
<= (MBOCHS_MMIO_BAR_OFFSET
+
459 MBOCHS_MMIO_BAR_SIZE
)) {
460 pos
-= MBOCHS_MMIO_BAR_OFFSET
;
462 handle_mmio_write(mdev_state
, pos
, buf
, count
);
464 handle_mmio_read(mdev_state
, pos
, buf
, count
);
466 } else if (pos
>= MBOCHS_EDID_OFFSET
&&
467 pos
+ count
<= (MBOCHS_EDID_OFFSET
+
469 pos
-= MBOCHS_EDID_OFFSET
;
470 if (pos
< MBOCHS_EDID_BLOB_OFFSET
) {
471 handle_edid_regs(mdev_state
, pos
, buf
, count
, is_write
);
473 pos
-= MBOCHS_EDID_BLOB_OFFSET
;
474 handle_edid_blob(mdev_state
, pos
, buf
, count
, is_write
);
477 } else if (pos
>= MBOCHS_MEMORY_BAR_OFFSET
&&
479 MBOCHS_MEMORY_BAR_OFFSET
+ mdev_state
->memsize
) {
480 pos
-= MBOCHS_MMIO_BAR_OFFSET
;
481 poff
= pos
& ~PAGE_MASK
;
482 pg
= __mbochs_get_page(mdev_state
, pos
>> PAGE_SHIFT
);
485 memcpy(map
+ poff
, buf
, count
);
487 memcpy(buf
, map
+ poff
, count
);
492 dev_dbg(mdev_state
->vdev
.dev
, "%s: %s @0x%llx (unhandled)\n",
493 __func__
, is_write
? "WR" : "RD", pos
);
502 mutex_unlock(&mdev_state
->ops_lock
);
507 static int mbochs_reset(struct mdev_state
*mdev_state
)
509 u32 size64k
= mdev_state
->memsize
/ (64 * 1024);
512 for (i
= 0; i
< ARRAY_SIZE(mdev_state
->vbe
); i
++)
513 mdev_state
->vbe
[i
] = 0;
514 mdev_state
->vbe
[VBE_DISPI_INDEX_ID
] = VBE_DISPI_ID5
;
515 mdev_state
->vbe
[VBE_DISPI_INDEX_VIDEO_MEMORY_64K
] = size64k
;
519 static int mbochs_init_dev(struct vfio_device
*vdev
)
521 struct mdev_state
*mdev_state
=
522 container_of(vdev
, struct mdev_state
, vdev
);
523 struct mdev_device
*mdev
= to_mdev_device(vdev
->dev
);
524 struct mbochs_type
*type
=
525 container_of(mdev
->type
, struct mbochs_type
, type
);
526 int avail_mbytes
= atomic_read(&mbochs_avail_mbytes
);
530 if (avail_mbytes
< type
->mbytes
)
532 } while (!atomic_try_cmpxchg(&mbochs_avail_mbytes
, &avail_mbytes
,
533 avail_mbytes
- type
->mbytes
));
535 mdev_state
->vconfig
= kzalloc(MBOCHS_CONFIG_SPACE_SIZE
, GFP_KERNEL
);
536 if (!mdev_state
->vconfig
)
539 mdev_state
->memsize
= type
->mbytes
* 1024 * 1024;
540 mdev_state
->pagecount
= mdev_state
->memsize
>> PAGE_SHIFT
;
541 mdev_state
->pages
= kcalloc(mdev_state
->pagecount
,
542 sizeof(struct page
*),
544 if (!mdev_state
->pages
)
547 mutex_init(&mdev_state
->ops_lock
);
548 mdev_state
->mdev
= mdev
;
549 INIT_LIST_HEAD(&mdev_state
->dmabufs
);
550 mdev_state
->next_id
= 1;
552 mdev_state
->type
= type
;
553 mdev_state
->edid_regs
.max_xres
= type
->max_x
;
554 mdev_state
->edid_regs
.max_yres
= type
->max_y
;
555 mdev_state
->edid_regs
.edid_offset
= MBOCHS_EDID_BLOB_OFFSET
;
556 mdev_state
->edid_regs
.edid_max_size
= sizeof(mdev_state
->edid_blob
);
557 mbochs_create_config_space(mdev_state
);
558 mbochs_reset(mdev_state
);
560 dev_info(vdev
->dev
, "%s: %s, %d MB, %ld pages\n", __func__
,
561 type
->type
.pretty_name
, type
->mbytes
, mdev_state
->pagecount
);
565 kfree(mdev_state
->vconfig
);
567 atomic_add(type
->mbytes
, &mbochs_avail_mbytes
);
571 static int mbochs_probe(struct mdev_device
*mdev
)
573 struct mdev_state
*mdev_state
;
576 mdev_state
= vfio_alloc_device(mdev_state
, vdev
, &mdev
->dev
,
578 if (IS_ERR(mdev_state
))
579 return PTR_ERR(mdev_state
);
581 ret
= vfio_register_emulated_iommu_dev(&mdev_state
->vdev
);
584 dev_set_drvdata(&mdev
->dev
, mdev_state
);
588 vfio_put_device(&mdev_state
->vdev
);
592 static void mbochs_release_dev(struct vfio_device
*vdev
)
594 struct mdev_state
*mdev_state
=
595 container_of(vdev
, struct mdev_state
, vdev
);
597 atomic_add(mdev_state
->type
->mbytes
, &mbochs_avail_mbytes
);
598 kfree(mdev_state
->pages
);
599 kfree(mdev_state
->vconfig
);
602 static void mbochs_remove(struct mdev_device
*mdev
)
604 struct mdev_state
*mdev_state
= dev_get_drvdata(&mdev
->dev
);
606 vfio_unregister_group_dev(&mdev_state
->vdev
);
607 vfio_put_device(&mdev_state
->vdev
);
610 static ssize_t
mbochs_read(struct vfio_device
*vdev
, char __user
*buf
,
611 size_t count
, loff_t
*ppos
)
613 struct mdev_state
*mdev_state
=
614 container_of(vdev
, struct mdev_state
, vdev
);
615 unsigned int done
= 0;
621 if (count
>= 4 && !(*ppos
% 4)) {
624 ret
= mdev_access(mdev_state
, (char *)&val
, sizeof(val
),
629 if (copy_to_user(buf
, &val
, sizeof(val
)))
633 } else if (count
>= 2 && !(*ppos
% 2)) {
636 ret
= mdev_access(mdev_state
, (char *)&val
, sizeof(val
),
641 if (copy_to_user(buf
, &val
, sizeof(val
)))
648 ret
= mdev_access(mdev_state
, (char *)&val
, sizeof(val
),
653 if (copy_to_user(buf
, &val
, sizeof(val
)))
671 static ssize_t
mbochs_write(struct vfio_device
*vdev
, const char __user
*buf
,
672 size_t count
, loff_t
*ppos
)
674 struct mdev_state
*mdev_state
=
675 container_of(vdev
, struct mdev_state
, vdev
);
676 unsigned int done
= 0;
682 if (count
>= 4 && !(*ppos
% 4)) {
685 if (copy_from_user(&val
, buf
, sizeof(val
)))
688 ret
= mdev_access(mdev_state
, (char *)&val
, sizeof(val
),
694 } else if (count
>= 2 && !(*ppos
% 2)) {
697 if (copy_from_user(&val
, buf
, sizeof(val
)))
700 ret
= mdev_access(mdev_state
, (char *)&val
, sizeof(val
),
709 if (copy_from_user(&val
, buf
, sizeof(val
)))
712 ret
= mdev_access(mdev_state
, (char *)&val
, sizeof(val
),
730 static struct page
*__mbochs_get_page(struct mdev_state
*mdev_state
,
733 WARN_ON(!mutex_is_locked(&mdev_state
->ops_lock
));
735 if (!mdev_state
->pages
[pgoff
]) {
736 mdev_state
->pages
[pgoff
] =
737 alloc_pages(GFP_HIGHUSER
| __GFP_ZERO
, 0);
738 if (!mdev_state
->pages
[pgoff
])
742 get_page(mdev_state
->pages
[pgoff
]);
743 return mdev_state
->pages
[pgoff
];
746 static struct page
*mbochs_get_page(struct mdev_state
*mdev_state
,
751 if (WARN_ON(pgoff
>= mdev_state
->pagecount
))
754 mutex_lock(&mdev_state
->ops_lock
);
755 page
= __mbochs_get_page(mdev_state
, pgoff
);
756 mutex_unlock(&mdev_state
->ops_lock
);
761 static void mbochs_put_pages(struct mdev_state
*mdev_state
)
763 struct device
*dev
= mdev_dev(mdev_state
->mdev
);
766 WARN_ON(!mutex_is_locked(&mdev_state
->ops_lock
));
768 for (i
= 0; i
< mdev_state
->pagecount
; i
++) {
769 if (!mdev_state
->pages
[i
])
771 put_page(mdev_state
->pages
[i
]);
772 mdev_state
->pages
[i
] = NULL
;
775 dev_dbg(dev
, "%s: %d pages released\n", __func__
, count
);
778 static vm_fault_t
mbochs_region_vm_fault(struct vm_fault
*vmf
)
780 struct vm_area_struct
*vma
= vmf
->vma
;
781 struct mdev_state
*mdev_state
= vma
->vm_private_data
;
782 pgoff_t page_offset
= (vmf
->address
- vma
->vm_start
) >> PAGE_SHIFT
;
784 if (page_offset
>= mdev_state
->pagecount
)
785 return VM_FAULT_SIGBUS
;
787 vmf
->page
= mbochs_get_page(mdev_state
, page_offset
);
789 return VM_FAULT_SIGBUS
;
794 static const struct vm_operations_struct mbochs_region_vm_ops
= {
795 .fault
= mbochs_region_vm_fault
,
798 static int mbochs_mmap(struct vfio_device
*vdev
, struct vm_area_struct
*vma
)
800 struct mdev_state
*mdev_state
=
801 container_of(vdev
, struct mdev_state
, vdev
);
803 if (vma
->vm_pgoff
!= MBOCHS_MEMORY_BAR_OFFSET
>> PAGE_SHIFT
)
805 if (vma
->vm_end
< vma
->vm_start
)
807 if (vma
->vm_end
- vma
->vm_start
> mdev_state
->memsize
)
809 if ((vma
->vm_flags
& VM_SHARED
) == 0)
812 vma
->vm_ops
= &mbochs_region_vm_ops
;
813 vma
->vm_private_data
= mdev_state
;
817 static vm_fault_t
mbochs_dmabuf_vm_fault(struct vm_fault
*vmf
)
819 struct vm_area_struct
*vma
= vmf
->vma
;
820 struct mbochs_dmabuf
*dmabuf
= vma
->vm_private_data
;
822 if (WARN_ON(vmf
->pgoff
>= dmabuf
->pagecount
))
823 return VM_FAULT_SIGBUS
;
825 vmf
->page
= dmabuf
->pages
[vmf
->pgoff
];
830 static const struct vm_operations_struct mbochs_dmabuf_vm_ops
= {
831 .fault
= mbochs_dmabuf_vm_fault
,
834 static int mbochs_mmap_dmabuf(struct dma_buf
*buf
, struct vm_area_struct
*vma
)
836 struct mbochs_dmabuf
*dmabuf
= buf
->priv
;
837 struct device
*dev
= mdev_dev(dmabuf
->mdev_state
->mdev
);
839 dev_dbg(dev
, "%s: %d\n", __func__
, dmabuf
->id
);
841 if ((vma
->vm_flags
& VM_SHARED
) == 0)
844 vma
->vm_ops
= &mbochs_dmabuf_vm_ops
;
845 vma
->vm_private_data
= dmabuf
;
849 static void mbochs_print_dmabuf(struct mbochs_dmabuf
*dmabuf
,
852 struct device
*dev
= mdev_dev(dmabuf
->mdev_state
->mdev
);
853 u32 fourcc
= dmabuf
->mode
.drm_format
;
855 dev_dbg(dev
, "%s/%d: %c%c%c%c, %dx%d, stride %d, off 0x%llx, size 0x%llx, pages %ld\n",
857 fourcc
? ((fourcc
>> 0) & 0xff) : '-',
858 fourcc
? ((fourcc
>> 8) & 0xff) : '-',
859 fourcc
? ((fourcc
>> 16) & 0xff) : '-',
860 fourcc
? ((fourcc
>> 24) & 0xff) : '-',
861 dmabuf
->mode
.width
, dmabuf
->mode
.height
, dmabuf
->mode
.stride
,
862 dmabuf
->mode
.offset
, dmabuf
->mode
.size
, dmabuf
->pagecount
);
865 static struct sg_table
*mbochs_map_dmabuf(struct dma_buf_attachment
*at
,
866 enum dma_data_direction direction
)
868 struct mbochs_dmabuf
*dmabuf
= at
->dmabuf
->priv
;
869 struct device
*dev
= mdev_dev(dmabuf
->mdev_state
->mdev
);
872 dev_dbg(dev
, "%s: %d\n", __func__
, dmabuf
->id
);
874 sg
= kzalloc(sizeof(*sg
), GFP_KERNEL
);
877 if (sg_alloc_table_from_pages(sg
, dmabuf
->pages
, dmabuf
->pagecount
,
878 0, dmabuf
->mode
.size
, GFP_KERNEL
) < 0)
880 if (dma_map_sgtable(at
->dev
, sg
, direction
, 0))
890 return ERR_PTR(-ENOMEM
);
893 static void mbochs_unmap_dmabuf(struct dma_buf_attachment
*at
,
895 enum dma_data_direction direction
)
897 struct mbochs_dmabuf
*dmabuf
= at
->dmabuf
->priv
;
898 struct device
*dev
= mdev_dev(dmabuf
->mdev_state
->mdev
);
900 dev_dbg(dev
, "%s: %d\n", __func__
, dmabuf
->id
);
902 dma_unmap_sgtable(at
->dev
, sg
, direction
, 0);
907 static void mbochs_release_dmabuf(struct dma_buf
*buf
)
909 struct mbochs_dmabuf
*dmabuf
= buf
->priv
;
910 struct mdev_state
*mdev_state
= dmabuf
->mdev_state
;
911 struct device
*dev
= mdev_dev(mdev_state
->mdev
);
914 dev_dbg(dev
, "%s: %d\n", __func__
, dmabuf
->id
);
916 for (pg
= 0; pg
< dmabuf
->pagecount
; pg
++)
917 put_page(dmabuf
->pages
[pg
]);
919 mutex_lock(&mdev_state
->ops_lock
);
921 if (dmabuf
->unlinked
)
923 mutex_unlock(&mdev_state
->ops_lock
);
926 static struct dma_buf_ops mbochs_dmabuf_ops
= {
927 .map_dma_buf
= mbochs_map_dmabuf
,
928 .unmap_dma_buf
= mbochs_unmap_dmabuf
,
929 .release
= mbochs_release_dmabuf
,
930 .mmap
= mbochs_mmap_dmabuf
,
933 static struct mbochs_dmabuf
*mbochs_dmabuf_alloc(struct mdev_state
*mdev_state
,
934 struct mbochs_mode
*mode
)
936 struct mbochs_dmabuf
*dmabuf
;
937 pgoff_t page_offset
, pg
;
939 WARN_ON(!mutex_is_locked(&mdev_state
->ops_lock
));
941 dmabuf
= kzalloc(sizeof(struct mbochs_dmabuf
), GFP_KERNEL
);
945 dmabuf
->mode
= *mode
;
946 dmabuf
->id
= mdev_state
->next_id
++;
947 dmabuf
->pagecount
= DIV_ROUND_UP(mode
->size
, PAGE_SIZE
);
948 dmabuf
->pages
= kcalloc(dmabuf
->pagecount
, sizeof(struct page
*),
951 goto err_free_dmabuf
;
953 page_offset
= dmabuf
->mode
.offset
>> PAGE_SHIFT
;
954 for (pg
= 0; pg
< dmabuf
->pagecount
; pg
++) {
955 dmabuf
->pages
[pg
] = __mbochs_get_page(mdev_state
,
957 if (!dmabuf
->pages
[pg
])
961 dmabuf
->mdev_state
= mdev_state
;
962 list_add(&dmabuf
->next
, &mdev_state
->dmabufs
);
964 mbochs_print_dmabuf(dmabuf
, __func__
);
969 put_page(dmabuf
->pages
[--pg
]);
970 kfree(dmabuf
->pages
);
976 static struct mbochs_dmabuf
*
977 mbochs_dmabuf_find_by_mode(struct mdev_state
*mdev_state
,
978 struct mbochs_mode
*mode
)
980 struct mbochs_dmabuf
*dmabuf
;
982 WARN_ON(!mutex_is_locked(&mdev_state
->ops_lock
));
984 list_for_each_entry(dmabuf
, &mdev_state
->dmabufs
, next
)
985 if (mbochs_modes_equal(&dmabuf
->mode
, mode
))
991 static struct mbochs_dmabuf
*
992 mbochs_dmabuf_find_by_id(struct mdev_state
*mdev_state
, u32 id
)
994 struct mbochs_dmabuf
*dmabuf
;
996 WARN_ON(!mutex_is_locked(&mdev_state
->ops_lock
));
998 list_for_each_entry(dmabuf
, &mdev_state
->dmabufs
, next
)
999 if (dmabuf
->id
== id
)
1005 static int mbochs_dmabuf_export(struct mbochs_dmabuf
*dmabuf
)
1007 struct mdev_state
*mdev_state
= dmabuf
->mdev_state
;
1008 struct device
*dev
= mdev_state
->vdev
.dev
;
1009 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
1010 struct dma_buf
*buf
;
1012 WARN_ON(!mutex_is_locked(&mdev_state
->ops_lock
));
1014 if (!IS_ALIGNED(dmabuf
->mode
.offset
, PAGE_SIZE
)) {
1015 dev_info_ratelimited(dev
, "%s: framebuffer not page-aligned\n",
1020 exp_info
.ops
= &mbochs_dmabuf_ops
;
1021 exp_info
.size
= dmabuf
->mode
.size
;
1022 exp_info
.priv
= dmabuf
;
1024 buf
= dma_buf_export(&exp_info
);
1026 dev_info_ratelimited(dev
, "%s: dma_buf_export failed: %ld\n",
1027 __func__
, PTR_ERR(buf
));
1028 return PTR_ERR(buf
);
1032 dev_dbg(dev
, "%s: %d\n", __func__
, dmabuf
->id
);
1036 static int mbochs_get_region_info(struct mdev_state
*mdev_state
,
1037 struct vfio_region_info_ext
*ext
)
1039 struct vfio_region_info
*region_info
= &ext
->base
;
1041 if (region_info
->index
>= MBOCHS_NUM_REGIONS
)
1044 switch (region_info
->index
) {
1045 case VFIO_PCI_CONFIG_REGION_INDEX
:
1046 region_info
->offset
= 0;
1047 region_info
->size
= MBOCHS_CONFIG_SPACE_SIZE
;
1048 region_info
->flags
= (VFIO_REGION_INFO_FLAG_READ
|
1049 VFIO_REGION_INFO_FLAG_WRITE
);
1051 case VFIO_PCI_BAR0_REGION_INDEX
:
1052 region_info
->offset
= MBOCHS_MEMORY_BAR_OFFSET
;
1053 region_info
->size
= mdev_state
->memsize
;
1054 region_info
->flags
= (VFIO_REGION_INFO_FLAG_READ
|
1055 VFIO_REGION_INFO_FLAG_WRITE
|
1056 VFIO_REGION_INFO_FLAG_MMAP
);
1058 case VFIO_PCI_BAR2_REGION_INDEX
:
1059 region_info
->offset
= MBOCHS_MMIO_BAR_OFFSET
;
1060 region_info
->size
= MBOCHS_MMIO_BAR_SIZE
;
1061 region_info
->flags
= (VFIO_REGION_INFO_FLAG_READ
|
1062 VFIO_REGION_INFO_FLAG_WRITE
);
1064 case MBOCHS_EDID_REGION_INDEX
:
1065 ext
->base
.argsz
= sizeof(*ext
);
1066 ext
->base
.offset
= MBOCHS_EDID_OFFSET
;
1067 ext
->base
.size
= MBOCHS_EDID_SIZE
;
1068 ext
->base
.flags
= (VFIO_REGION_INFO_FLAG_READ
|
1069 VFIO_REGION_INFO_FLAG_WRITE
|
1070 VFIO_REGION_INFO_FLAG_CAPS
);
1071 ext
->base
.cap_offset
= offsetof(typeof(*ext
), type
);
1072 ext
->type
.header
.id
= VFIO_REGION_INFO_CAP_TYPE
;
1073 ext
->type
.header
.version
= 1;
1074 ext
->type
.header
.next
= 0;
1075 ext
->type
.type
= VFIO_REGION_TYPE_GFX
;
1076 ext
->type
.subtype
= VFIO_REGION_SUBTYPE_GFX_EDID
;
1079 region_info
->size
= 0;
1080 region_info
->offset
= 0;
1081 region_info
->flags
= 0;
1087 static int mbochs_get_irq_info(struct vfio_irq_info
*irq_info
)
1089 irq_info
->count
= 0;
1093 static int mbochs_get_device_info(struct vfio_device_info
*dev_info
)
1095 dev_info
->flags
= VFIO_DEVICE_FLAGS_PCI
;
1096 dev_info
->num_regions
= MBOCHS_NUM_REGIONS
;
1097 dev_info
->num_irqs
= VFIO_PCI_NUM_IRQS
;
1101 static int mbochs_query_gfx_plane(struct mdev_state
*mdev_state
,
1102 struct vfio_device_gfx_plane_info
*plane
)
1104 struct mbochs_dmabuf
*dmabuf
;
1105 struct mbochs_mode mode
;
1108 if (plane
->flags
& VFIO_GFX_PLANE_TYPE_PROBE
) {
1109 if (plane
->flags
== (VFIO_GFX_PLANE_TYPE_PROBE
|
1110 VFIO_GFX_PLANE_TYPE_DMABUF
))
1115 if (plane
->flags
!= VFIO_GFX_PLANE_TYPE_DMABUF
)
1118 plane
->drm_format_mod
= 0;
1124 mutex_lock(&mdev_state
->ops_lock
);
1127 if (plane
->drm_plane_type
== DRM_PLANE_TYPE_PRIMARY
)
1128 ret
= mbochs_check_framebuffer(mdev_state
, &mode
);
1130 plane
->drm_format
= 0;
1135 plane
->dmabuf_id
= 0;
1139 dmabuf
= mbochs_dmabuf_find_by_mode(mdev_state
, &mode
);
1141 mbochs_dmabuf_alloc(mdev_state
, &mode
);
1143 mutex_unlock(&mdev_state
->ops_lock
);
1147 plane
->drm_format
= dmabuf
->mode
.drm_format
;
1148 plane
->width
= dmabuf
->mode
.width
;
1149 plane
->height
= dmabuf
->mode
.height
;
1150 plane
->stride
= dmabuf
->mode
.stride
;
1151 plane
->size
= dmabuf
->mode
.size
;
1152 plane
->dmabuf_id
= dmabuf
->id
;
1155 if (plane
->drm_plane_type
== DRM_PLANE_TYPE_PRIMARY
&&
1156 mdev_state
->active_id
!= plane
->dmabuf_id
) {
1157 dev_dbg(mdev_state
->vdev
.dev
, "%s: primary: %d => %d\n",
1158 __func__
, mdev_state
->active_id
, plane
->dmabuf_id
);
1159 mdev_state
->active_id
= plane
->dmabuf_id
;
1161 mutex_unlock(&mdev_state
->ops_lock
);
1165 static int mbochs_get_gfx_dmabuf(struct mdev_state
*mdev_state
, u32 id
)
1167 struct mbochs_dmabuf
*dmabuf
;
1169 mutex_lock(&mdev_state
->ops_lock
);
1171 dmabuf
= mbochs_dmabuf_find_by_id(mdev_state
, id
);
1173 mutex_unlock(&mdev_state
->ops_lock
);
1178 mbochs_dmabuf_export(dmabuf
);
1180 mutex_unlock(&mdev_state
->ops_lock
);
1185 return dma_buf_fd(dmabuf
->buf
, 0);
1188 static long mbochs_ioctl(struct vfio_device
*vdev
, unsigned int cmd
,
1191 struct mdev_state
*mdev_state
=
1192 container_of(vdev
, struct mdev_state
, vdev
);
1194 unsigned long minsz
, outsz
;
1197 case VFIO_DEVICE_GET_INFO
:
1199 struct vfio_device_info info
;
1201 minsz
= offsetofend(struct vfio_device_info
, num_irqs
);
1203 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
1206 if (info
.argsz
< minsz
)
1209 ret
= mbochs_get_device_info(&info
);
1213 if (copy_to_user((void __user
*)arg
, &info
, minsz
))
1218 case VFIO_DEVICE_GET_REGION_INFO
:
1220 struct vfio_region_info_ext info
;
1222 minsz
= offsetofend(typeof(info
), base
.offset
);
1224 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
1227 outsz
= info
.base
.argsz
;
1230 if (outsz
> sizeof(info
))
1233 ret
= mbochs_get_region_info(mdev_state
, &info
);
1237 if (copy_to_user((void __user
*)arg
, &info
, outsz
))
1243 case VFIO_DEVICE_GET_IRQ_INFO
:
1245 struct vfio_irq_info info
;
1247 minsz
= offsetofend(struct vfio_irq_info
, count
);
1249 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
1252 if ((info
.argsz
< minsz
) ||
1253 (info
.index
>= VFIO_PCI_NUM_IRQS
))
1256 ret
= mbochs_get_irq_info(&info
);
1260 if (copy_to_user((void __user
*)arg
, &info
, minsz
))
1266 case VFIO_DEVICE_QUERY_GFX_PLANE
:
1268 struct vfio_device_gfx_plane_info plane
= {};
1270 minsz
= offsetofend(struct vfio_device_gfx_plane_info
,
1273 if (copy_from_user(&plane
, (void __user
*)arg
, minsz
))
1276 if (plane
.argsz
< minsz
)
1279 ret
= mbochs_query_gfx_plane(mdev_state
, &plane
);
1283 if (copy_to_user((void __user
*)arg
, &plane
, minsz
))
1289 case VFIO_DEVICE_GET_GFX_DMABUF
:
1293 if (get_user(dmabuf_id
, (__u32 __user
*)arg
))
1296 return mbochs_get_gfx_dmabuf(mdev_state
, dmabuf_id
);
1299 case VFIO_DEVICE_SET_IRQS
:
1302 case VFIO_DEVICE_RESET
:
1303 return mbochs_reset(mdev_state
);
1308 static void mbochs_close_device(struct vfio_device
*vdev
)
1310 struct mdev_state
*mdev_state
=
1311 container_of(vdev
, struct mdev_state
, vdev
);
1312 struct mbochs_dmabuf
*dmabuf
, *tmp
;
1314 mutex_lock(&mdev_state
->ops_lock
);
1316 list_for_each_entry_safe(dmabuf
, tmp
, &mdev_state
->dmabufs
, next
) {
1317 list_del(&dmabuf
->next
);
1319 /* free in mbochs_release_dmabuf() */
1320 dmabuf
->unlinked
= true;
1325 mbochs_put_pages(mdev_state
);
1327 mutex_unlock(&mdev_state
->ops_lock
);
1331 memory_show(struct device
*dev
, struct device_attribute
*attr
,
1334 struct mdev_state
*mdev_state
= dev_get_drvdata(dev
);
1336 return sprintf(buf
, "%d MB\n", mdev_state
->type
->mbytes
);
1338 static DEVICE_ATTR_RO(memory
);
1340 static struct attribute
*mdev_dev_attrs
[] = {
1341 &dev_attr_memory
.attr
,
1345 static const struct attribute_group mdev_dev_group
= {
1347 .attrs
= mdev_dev_attrs
,
1350 static const struct attribute_group
*mdev_dev_groups
[] = {
1355 static ssize_t
mbochs_show_description(struct mdev_type
*mtype
, char *buf
)
1357 struct mbochs_type
*type
=
1358 container_of(mtype
, struct mbochs_type
, type
);
1360 return sprintf(buf
, "virtual display, %d MB video memory\n",
1361 type
? type
->mbytes
: 0);
1364 static unsigned int mbochs_get_available(struct mdev_type
*mtype
)
1366 struct mbochs_type
*type
=
1367 container_of(mtype
, struct mbochs_type
, type
);
1369 return atomic_read(&mbochs_avail_mbytes
) / type
->mbytes
;
1372 static const struct vfio_device_ops mbochs_dev_ops
= {
1373 .close_device
= mbochs_close_device
,
1374 .init
= mbochs_init_dev
,
1375 .release
= mbochs_release_dev
,
1376 .read
= mbochs_read
,
1377 .write
= mbochs_write
,
1378 .ioctl
= mbochs_ioctl
,
1379 .mmap
= mbochs_mmap
,
1380 .bind_iommufd
= vfio_iommufd_emulated_bind
,
1381 .unbind_iommufd
= vfio_iommufd_emulated_unbind
,
1382 .attach_ioas
= vfio_iommufd_emulated_attach_ioas
,
1383 .detach_ioas
= vfio_iommufd_emulated_detach_ioas
,
1386 static struct mdev_driver mbochs_driver
= {
1387 .device_api
= VFIO_DEVICE_API_PCI_STRING
,
1390 .owner
= THIS_MODULE
,
1391 .mod_name
= KBUILD_MODNAME
,
1392 .dev_groups
= mdev_dev_groups
,
1394 .probe
= mbochs_probe
,
1395 .remove
= mbochs_remove
,
1396 .get_available
= mbochs_get_available
,
1397 .show_description
= mbochs_show_description
,
1400 static const struct file_operations vd_fops
= {
1401 .owner
= THIS_MODULE
,
1404 static void mbochs_device_release(struct device
*dev
)
1409 static int __init
mbochs_dev_init(void)
1413 atomic_set(&mbochs_avail_mbytes
, max_mbytes
);
1415 ret
= alloc_chrdev_region(&mbochs_devt
, 0, MINORMASK
+ 1, MBOCHS_NAME
);
1417 pr_err("Error: failed to register mbochs_dev, err: %d\n", ret
);
1420 cdev_init(&mbochs_cdev
, &vd_fops
);
1421 cdev_add(&mbochs_cdev
, mbochs_devt
, MINORMASK
+ 1);
1422 pr_info("%s: major %d\n", __func__
, MAJOR(mbochs_devt
));
1424 ret
= mdev_register_driver(&mbochs_driver
);
1428 ret
= class_register(&mbochs_class
);
1431 mbochs_dev
.class = &mbochs_class
;
1432 mbochs_dev
.release
= mbochs_device_release
;
1433 dev_set_name(&mbochs_dev
, "%s", MBOCHS_NAME
);
1435 ret
= device_register(&mbochs_dev
);
1439 ret
= mdev_register_parent(&mbochs_parent
, &mbochs_dev
, &mbochs_driver
,
1441 ARRAY_SIZE(mbochs_mdev_types
));
1448 device_del(&mbochs_dev
);
1450 put_device(&mbochs_dev
);
1451 class_unregister(&mbochs_class
);
1453 mdev_unregister_driver(&mbochs_driver
);
1455 cdev_del(&mbochs_cdev
);
1456 unregister_chrdev_region(mbochs_devt
, MINORMASK
+ 1);
1460 static void __exit
mbochs_dev_exit(void)
1462 mbochs_dev
.bus
= NULL
;
1463 mdev_unregister_parent(&mbochs_parent
);
1465 device_unregister(&mbochs_dev
);
1466 mdev_unregister_driver(&mbochs_driver
);
1467 cdev_del(&mbochs_cdev
);
1468 unregister_chrdev_region(mbochs_devt
, MINORMASK
+ 1);
1469 class_unregister(&mbochs_class
);
1472 MODULE_IMPORT_NS("DMA_BUF");
1473 module_init(mbochs_dev_init
)
1474 module_exit(mbochs_dev_exit
)