2 * Mediated virtual PCI serial host device driver
4 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
5 * Author: Neo Jia <cjia@nvidia.com>
6 * Kirti Wankhede <kwankhede@nvidia.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * Sample driver that creates mdev device that simulates serial port over PCI
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/device.h>
20 #include <linux/kernel.h>
22 #include <linux/poll.h>
23 #include <linux/slab.h>
24 #include <linux/cdev.h>
25 #include <linux/sched.h>
26 #include <linux/wait.h>
27 #include <linux/uuid.h>
28 #include <linux/vfio.h>
29 #include <linux/iommu.h>
30 #include <linux/sysfs.h>
31 #include <linux/ctype.h>
32 #include <linux/file.h>
33 #include <linux/mdev.h>
34 #include <linux/pci.h>
35 #include <linux/serial.h>
36 #include <uapi/linux/serial_reg.h>
37 #include <linux/eventfd.h>
42 #define VERSION_STRING "0.1"
43 #define DRIVER_AUTHOR "NVIDIA Corporation"
45 #define MTTY_CLASS_NAME "mtty"
47 #define MTTY_NAME "mtty"
49 #define MTTY_STRING_LEN 16
51 #define MTTY_CONFIG_SPACE_SIZE 0xff
52 #define MTTY_IO_BAR_SIZE 0x8
53 #define MTTY_MMIO_BAR_SIZE 0x100000
55 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
56 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
58 #define MAX_FIFO_SIZE 16
60 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
62 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
64 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
65 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
66 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
67 #define MTTY_VFIO_PCI_OFFSET_MASK \
68 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
77 struct class *vd_class
;
83 struct mdev_region_info
{
90 #if defined(DEBUG_REGS)
91 const char *wr_reg
[] = {
102 const char *rd_reg
[] = {
114 /* loop back buffer */
116 u8 fifo
[MAX_FIFO_SIZE
];
122 u8 uart_reg
[8]; /* 8 registers */
123 struct rxtx rxtx
; /* loop back buffer */
127 u8 fcr
; /* FIFO control register */
129 u8 intr_trigger_level
; /* interrupt trigger level */
132 /* State of each mdev device */
135 struct eventfd_ctx
*intx_evtfd
;
136 struct eventfd_ctx
*msi_evtfd
;
139 struct mutex ops_lock
;
140 struct mdev_device
*mdev
;
141 struct mdev_region_info region_info
[VFIO_PCI_NUM_REGIONS
];
142 u32 bar_mask
[VFIO_PCI_NUM_REGIONS
];
143 struct list_head next
;
144 struct serial_port s
[2];
145 struct mutex rxtx_lock
;
146 struct vfio_device_info dev_info
;
150 struct mutex mdev_list_lock
;
151 struct list_head mdev_devices_list
;
153 static const struct file_operations vd_fops
= {
154 .owner
= THIS_MODULE
,
157 /* function prototypes */
159 static int mtty_trigger_interrupt(uuid_le uuid
);
161 /* Helper functions */
162 static struct mdev_state
*find_mdev_state_by_uuid(uuid_le uuid
)
164 struct mdev_state
*mds
;
166 list_for_each_entry(mds
, &mdev_devices_list
, next
) {
167 if (uuid_le_cmp(mdev_uuid(mds
->mdev
), uuid
) == 0)
174 void dump_buffer(u8
*buf
, uint32_t count
)
179 pr_info("Buffer:\n");
180 for (i
= 0; i
< count
; i
++) {
181 pr_info("%2x ", *(buf
+ i
));
182 if ((i
+ 1) % 16 == 0)
188 static void mtty_create_config_space(struct mdev_state
*mdev_state
)
191 STORE_LE32((u32
*) &mdev_state
->vconfig
[0x0], 0x32534348);
193 /* Control: I/O+, Mem-, BusMaster- */
194 STORE_LE16((u16
*) &mdev_state
->vconfig
[0x4], 0x0001);
196 /* Status: capabilities list absent */
197 STORE_LE16((u16
*) &mdev_state
->vconfig
[0x6], 0x0200);
200 mdev_state
->vconfig
[0x8] = 0x10;
202 /* programming interface class : 16550-compatible serial controller */
203 mdev_state
->vconfig
[0x9] = 0x02;
206 mdev_state
->vconfig
[0xa] = 0x00;
208 /* Base class : Simple Communication controllers */
209 mdev_state
->vconfig
[0xb] = 0x07;
211 /* base address registers */
213 STORE_LE32((u32
*) &mdev_state
->vconfig
[0x10], 0x000001);
214 mdev_state
->bar_mask
[0] = ~(MTTY_IO_BAR_SIZE
) + 1;
216 if (mdev_state
->nr_ports
== 2) {
218 STORE_LE32((u32
*) &mdev_state
->vconfig
[0x14], 0x000001);
219 mdev_state
->bar_mask
[1] = ~(MTTY_IO_BAR_SIZE
) + 1;
223 STORE_LE32((u32
*) &mdev_state
->vconfig
[0x2c], 0x32534348);
225 mdev_state
->vconfig
[0x34] = 0x00; /* Cap Ptr */
226 mdev_state
->vconfig
[0x3d] = 0x01; /* interrupt pin (INTA#) */
228 /* Vendor specific data */
229 mdev_state
->vconfig
[0x40] = 0x23;
230 mdev_state
->vconfig
[0x43] = 0x80;
231 mdev_state
->vconfig
[0x44] = 0x23;
232 mdev_state
->vconfig
[0x48] = 0x23;
233 mdev_state
->vconfig
[0x4c] = 0x23;
235 mdev_state
->vconfig
[0x60] = 0x50;
236 mdev_state
->vconfig
[0x61] = 0x43;
237 mdev_state
->vconfig
[0x62] = 0x49;
238 mdev_state
->vconfig
[0x63] = 0x20;
239 mdev_state
->vconfig
[0x64] = 0x53;
240 mdev_state
->vconfig
[0x65] = 0x65;
241 mdev_state
->vconfig
[0x66] = 0x72;
242 mdev_state
->vconfig
[0x67] = 0x69;
243 mdev_state
->vconfig
[0x68] = 0x61;
244 mdev_state
->vconfig
[0x69] = 0x6c;
245 mdev_state
->vconfig
[0x6a] = 0x2f;
246 mdev_state
->vconfig
[0x6b] = 0x55;
247 mdev_state
->vconfig
[0x6c] = 0x41;
248 mdev_state
->vconfig
[0x6d] = 0x52;
249 mdev_state
->vconfig
[0x6e] = 0x54;
252 static void handle_pci_cfg_write(struct mdev_state
*mdev_state
, u16 offset
,
255 u32 cfg_addr
, bar_mask
, bar_index
= 0;
258 case 0x04: /* device control */
259 case 0x06: /* device status */
262 case 0x3c: /* interrupt line */
263 mdev_state
->vconfig
[0x3c] = buf
[0];
267 * Interrupt Pin is hardwired to INTA.
268 * This field is write protected by hardware
271 case 0x10: /* BAR0 */
272 case 0x14: /* BAR1 */
275 else if (offset
== 0x14)
278 if ((mdev_state
->nr_ports
== 1) && (bar_index
== 1)) {
279 STORE_LE32(&mdev_state
->vconfig
[offset
], 0);
283 cfg_addr
= *(u32
*)buf
;
284 pr_info("BAR%d addr 0x%x\n", bar_index
, cfg_addr
);
286 if (cfg_addr
== 0xffffffff) {
287 bar_mask
= mdev_state
->bar_mask
[bar_index
];
288 cfg_addr
= (cfg_addr
& bar_mask
);
291 cfg_addr
|= (mdev_state
->vconfig
[offset
] & 0x3ul
);
292 STORE_LE32(&mdev_state
->vconfig
[offset
], cfg_addr
);
294 case 0x18: /* BAR2 */
295 case 0x1c: /* BAR3 */
296 case 0x20: /* BAR4 */
297 STORE_LE32(&mdev_state
->vconfig
[offset
], 0);
300 pr_info("PCI config write @0x%x of %d bytes not handled\n",
306 static void handle_bar_write(unsigned int index
, struct mdev_state
*mdev_state
,
307 u16 offset
, u8
*buf
, u32 count
)
311 /* Handle data written by guest */
314 /* if DLAB set, data is LSB of divisor */
315 if (mdev_state
->s
[index
].dlab
) {
316 mdev_state
->s
[index
].divisor
|= data
;
320 mutex_lock(&mdev_state
->rxtx_lock
);
322 /* save in TX buffer */
323 if (mdev_state
->s
[index
].rxtx
.count
<
324 mdev_state
->s
[index
].max_fifo_size
) {
325 mdev_state
->s
[index
].rxtx
.fifo
[
326 mdev_state
->s
[index
].rxtx
.head
] = data
;
327 mdev_state
->s
[index
].rxtx
.count
++;
328 CIRCULAR_BUF_INC_IDX(mdev_state
->s
[index
].rxtx
.head
);
329 mdev_state
->s
[index
].overrun
= false;
332 * Trigger interrupt if receive data interrupt is
333 * enabled and fifo reached trigger level
335 if ((mdev_state
->s
[index
].uart_reg
[UART_IER
] &
337 (mdev_state
->s
[index
].rxtx
.count
==
338 mdev_state
->s
[index
].intr_trigger_level
)) {
339 /* trigger interrupt */
340 #if defined(DEBUG_INTR)
341 pr_err("Serial port %d: Fifo level trigger\n",
344 mtty_trigger_interrupt(
345 mdev_uuid(mdev_state
->mdev
));
348 #if defined(DEBUG_INTR)
349 pr_err("Serial port %d: Buffer Overflow\n", index
);
351 mdev_state
->s
[index
].overrun
= true;
354 * Trigger interrupt if receiver line status interrupt
357 if (mdev_state
->s
[index
].uart_reg
[UART_IER
] &
359 mtty_trigger_interrupt(
360 mdev_uuid(mdev_state
->mdev
));
362 mutex_unlock(&mdev_state
->rxtx_lock
);
366 /* if DLAB set, data is MSB of divisor */
367 if (mdev_state
->s
[index
].dlab
)
368 mdev_state
->s
[index
].divisor
|= (u16
)data
<< 8;
370 mdev_state
->s
[index
].uart_reg
[offset
] = data
;
371 mutex_lock(&mdev_state
->rxtx_lock
);
372 if ((data
& UART_IER_THRI
) &&
373 (mdev_state
->s
[index
].rxtx
.head
==
374 mdev_state
->s
[index
].rxtx
.tail
)) {
375 #if defined(DEBUG_INTR)
376 pr_err("Serial port %d: IER_THRI write\n",
379 mtty_trigger_interrupt(
380 mdev_uuid(mdev_state
->mdev
));
383 mutex_unlock(&mdev_state
->rxtx_lock
);
389 mdev_state
->s
[index
].fcr
= data
;
391 mutex_lock(&mdev_state
->rxtx_lock
);
392 if (data
& (UART_FCR_CLEAR_RCVR
| UART_FCR_CLEAR_XMIT
)) {
393 /* clear loop back FIFO */
394 mdev_state
->s
[index
].rxtx
.count
= 0;
395 mdev_state
->s
[index
].rxtx
.head
= 0;
396 mdev_state
->s
[index
].rxtx
.tail
= 0;
398 mutex_unlock(&mdev_state
->rxtx_lock
);
400 switch (data
& UART_FCR_TRIGGER_MASK
) {
401 case UART_FCR_TRIGGER_1
:
402 mdev_state
->s
[index
].intr_trigger_level
= 1;
405 case UART_FCR_TRIGGER_4
:
406 mdev_state
->s
[index
].intr_trigger_level
= 4;
409 case UART_FCR_TRIGGER_8
:
410 mdev_state
->s
[index
].intr_trigger_level
= 8;
413 case UART_FCR_TRIGGER_14
:
414 mdev_state
->s
[index
].intr_trigger_level
= 14;
419 * Set trigger level to 1 otherwise or implement timer with
420 * timeout of 4 characters and on expiring that timer set
421 * Recevice data timeout in IIR register
423 mdev_state
->s
[index
].intr_trigger_level
= 1;
424 if (data
& UART_FCR_ENABLE_FIFO
)
425 mdev_state
->s
[index
].max_fifo_size
= MAX_FIFO_SIZE
;
427 mdev_state
->s
[index
].max_fifo_size
= 1;
428 mdev_state
->s
[index
].intr_trigger_level
= 1;
434 if (data
& UART_LCR_DLAB
) {
435 mdev_state
->s
[index
].dlab
= true;
436 mdev_state
->s
[index
].divisor
= 0;
438 mdev_state
->s
[index
].dlab
= false;
440 mdev_state
->s
[index
].uart_reg
[offset
] = data
;
444 mdev_state
->s
[index
].uart_reg
[offset
] = data
;
446 if ((mdev_state
->s
[index
].uart_reg
[UART_IER
] & UART_IER_MSI
) &&
447 (data
& UART_MCR_OUT2
)) {
448 #if defined(DEBUG_INTR)
449 pr_err("Serial port %d: MCR_OUT2 write\n", index
);
451 mtty_trigger_interrupt(mdev_uuid(mdev_state
->mdev
));
454 if ((mdev_state
->s
[index
].uart_reg
[UART_IER
] & UART_IER_MSI
) &&
455 (data
& (UART_MCR_RTS
| UART_MCR_DTR
))) {
456 #if defined(DEBUG_INTR)
457 pr_err("Serial port %d: MCR RTS/DTR write\n", index
);
459 mtty_trigger_interrupt(mdev_uuid(mdev_state
->mdev
));
469 mdev_state
->s
[index
].uart_reg
[offset
] = data
;
477 static void handle_bar_read(unsigned int index
, struct mdev_state
*mdev_state
,
478 u16 offset
, u8
*buf
, u32 count
)
480 /* Handle read requests by guest */
483 /* if DLAB set, data is LSB of divisor */
484 if (mdev_state
->s
[index
].dlab
) {
485 *buf
= (u8
)mdev_state
->s
[index
].divisor
;
489 mutex_lock(&mdev_state
->rxtx_lock
);
490 /* return data in tx buffer */
491 if (mdev_state
->s
[index
].rxtx
.head
!=
492 mdev_state
->s
[index
].rxtx
.tail
) {
493 *buf
= mdev_state
->s
[index
].rxtx
.fifo
[
494 mdev_state
->s
[index
].rxtx
.tail
];
495 mdev_state
->s
[index
].rxtx
.count
--;
496 CIRCULAR_BUF_INC_IDX(mdev_state
->s
[index
].rxtx
.tail
);
499 if (mdev_state
->s
[index
].rxtx
.head
==
500 mdev_state
->s
[index
].rxtx
.tail
) {
502 * Trigger interrupt if tx buffer empty interrupt is
503 * enabled and fifo is empty
505 #if defined(DEBUG_INTR)
506 pr_err("Serial port %d: Buffer Empty\n", index
);
508 if (mdev_state
->s
[index
].uart_reg
[UART_IER
] &
510 mtty_trigger_interrupt(
511 mdev_uuid(mdev_state
->mdev
));
513 mutex_unlock(&mdev_state
->rxtx_lock
);
518 if (mdev_state
->s
[index
].dlab
) {
519 *buf
= (u8
)(mdev_state
->s
[index
].divisor
>> 8);
522 *buf
= mdev_state
->s
[index
].uart_reg
[offset
] & 0x0f;
527 u8 ier
= mdev_state
->s
[index
].uart_reg
[UART_IER
];
530 mutex_lock(&mdev_state
->rxtx_lock
);
531 /* Interrupt priority 1: Parity, overrun, framing or break */
532 if ((ier
& UART_IER_RLSI
) && mdev_state
->s
[index
].overrun
)
533 *buf
|= UART_IIR_RLSI
;
535 /* Interrupt priority 2: Fifo trigger level reached */
536 if ((ier
& UART_IER_RDI
) &&
537 (mdev_state
->s
[index
].rxtx
.count
>=
538 mdev_state
->s
[index
].intr_trigger_level
))
539 *buf
|= UART_IIR_RDI
;
541 /* Interrupt priotiry 3: transmitter holding register empty */
542 if ((ier
& UART_IER_THRI
) &&
543 (mdev_state
->s
[index
].rxtx
.head
==
544 mdev_state
->s
[index
].rxtx
.tail
))
545 *buf
|= UART_IIR_THRI
;
547 /* Interrupt priotiry 4: Modem status: CTS, DSR, RI or DCD */
548 if ((ier
& UART_IER_MSI
) &&
549 (mdev_state
->s
[index
].uart_reg
[UART_MCR
] &
550 (UART_MCR_RTS
| UART_MCR_DTR
)))
551 *buf
|= UART_IIR_MSI
;
553 /* bit0: 0=> interrupt pending, 1=> no interrupt is pending */
555 *buf
= UART_IIR_NO_INT
;
557 /* set bit 6 & 7 to be 16550 compatible */
559 mutex_unlock(&mdev_state
->rxtx_lock
);
565 *buf
= mdev_state
->s
[index
].uart_reg
[offset
];
572 mutex_lock(&mdev_state
->rxtx_lock
);
573 /* atleast one char in FIFO */
574 if (mdev_state
->s
[index
].rxtx
.head
!=
575 mdev_state
->s
[index
].rxtx
.tail
)
578 /* if FIFO overrun */
579 if (mdev_state
->s
[index
].overrun
)
582 /* transmit FIFO empty and tramsitter empty */
583 if (mdev_state
->s
[index
].rxtx
.head
==
584 mdev_state
->s
[index
].rxtx
.tail
)
585 lsr
|= UART_LSR_TEMT
| UART_LSR_THRE
;
587 mutex_unlock(&mdev_state
->rxtx_lock
);
592 *buf
= UART_MSR_DSR
| UART_MSR_DDSR
| UART_MSR_DCD
;
594 mutex_lock(&mdev_state
->rxtx_lock
);
595 /* if AFE is 1 and FIFO have space, set CTS bit */
596 if (mdev_state
->s
[index
].uart_reg
[UART_MCR
] &
598 if (mdev_state
->s
[index
].rxtx
.count
<
599 mdev_state
->s
[index
].max_fifo_size
)
600 *buf
|= UART_MSR_CTS
| UART_MSR_DCTS
;
602 *buf
|= UART_MSR_CTS
| UART_MSR_DCTS
;
603 mutex_unlock(&mdev_state
->rxtx_lock
);
608 *buf
= mdev_state
->s
[index
].uart_reg
[offset
];
616 static void mdev_read_base(struct mdev_state
*mdev_state
)
619 u32 start_lo
, start_hi
;
622 pos
= PCI_BASE_ADDRESS_0
;
624 for (index
= 0; index
<= VFIO_PCI_BAR5_REGION_INDEX
; index
++) {
626 if (!mdev_state
->region_info
[index
].size
)
629 start_lo
= (*(u32
*)(mdev_state
->vconfig
+ pos
)) &
630 PCI_BASE_ADDRESS_MEM_MASK
;
631 mem_type
= (*(u32
*)(mdev_state
->vconfig
+ pos
)) &
632 PCI_BASE_ADDRESS_MEM_TYPE_MASK
;
635 case PCI_BASE_ADDRESS_MEM_TYPE_64
:
636 start_hi
= (*(u32
*)(mdev_state
->vconfig
+ pos
+ 4));
639 case PCI_BASE_ADDRESS_MEM_TYPE_32
:
640 case PCI_BASE_ADDRESS_MEM_TYPE_1M
:
641 /* 1M mem BAR treated as 32-bit BAR */
643 /* mem unknown type treated as 32-bit BAR */
648 mdev_state
->region_info
[index
].start
= ((u64
)start_hi
<< 32) |
653 static ssize_t
mdev_access(struct mdev_device
*mdev
, u8
*buf
, size_t count
,
654 loff_t pos
, bool is_write
)
656 struct mdev_state
*mdev_state
;
664 mdev_state
= mdev_get_drvdata(mdev
);
666 pr_err("%s mdev_state not found\n", __func__
);
670 mutex_lock(&mdev_state
->ops_lock
);
672 index
= MTTY_VFIO_PCI_OFFSET_TO_INDEX(pos
);
673 offset
= pos
& MTTY_VFIO_PCI_OFFSET_MASK
;
675 case VFIO_PCI_CONFIG_REGION_INDEX
:
678 pr_info("%s: PCI config space %s at offset 0x%llx\n",
679 __func__
, is_write
? "write" : "read", offset
);
682 dump_buffer(buf
, count
);
683 handle_pci_cfg_write(mdev_state
, offset
, buf
, count
);
685 memcpy(buf
, (mdev_state
->vconfig
+ offset
), count
);
686 dump_buffer(buf
, count
);
691 case VFIO_PCI_BAR0_REGION_INDEX
... VFIO_PCI_BAR5_REGION_INDEX
:
692 if (!mdev_state
->region_info
[index
].start
)
693 mdev_read_base(mdev_state
);
696 dump_buffer(buf
, count
);
698 #if defined(DEBUG_REGS)
699 pr_info("%s: BAR%d WR @0x%llx %s val:0x%02x dlab:%d\n",
700 __func__
, index
, offset
, wr_reg
[offset
],
701 *buf
, mdev_state
->s
[index
].dlab
);
703 handle_bar_write(index
, mdev_state
, offset
, buf
, count
);
705 handle_bar_read(index
, mdev_state
, offset
, buf
, count
);
706 dump_buffer(buf
, count
);
708 #if defined(DEBUG_REGS)
709 pr_info("%s: BAR%d RD @0x%llx %s val:0x%02x dlab:%d\n",
710 __func__
, index
, offset
, rd_reg
[offset
],
711 *buf
, mdev_state
->s
[index
].dlab
);
725 mutex_unlock(&mdev_state
->ops_lock
);
730 int mtty_create(struct kobject
*kobj
, struct mdev_device
*mdev
)
732 struct mdev_state
*mdev_state
;
733 char name
[MTTY_STRING_LEN
];
739 for (i
= 0; i
< 2; i
++) {
740 snprintf(name
, MTTY_STRING_LEN
, "%s-%d",
741 dev_driver_string(mdev_parent_dev(mdev
)), i
+ 1);
742 if (!strcmp(kobj
->name
, name
)) {
751 mdev_state
= kzalloc(sizeof(struct mdev_state
), GFP_KERNEL
);
752 if (mdev_state
== NULL
)
755 mdev_state
->nr_ports
= nr_ports
;
756 mdev_state
->irq_index
= -1;
757 mdev_state
->s
[0].max_fifo_size
= MAX_FIFO_SIZE
;
758 mdev_state
->s
[1].max_fifo_size
= MAX_FIFO_SIZE
;
759 mutex_init(&mdev_state
->rxtx_lock
);
760 mdev_state
->vconfig
= kzalloc(MTTY_CONFIG_SPACE_SIZE
, GFP_KERNEL
);
762 if (mdev_state
->vconfig
== NULL
) {
767 mutex_init(&mdev_state
->ops_lock
);
768 mdev_state
->mdev
= mdev
;
769 mdev_set_drvdata(mdev
, mdev_state
);
771 mtty_create_config_space(mdev_state
);
773 mutex_lock(&mdev_list_lock
);
774 list_add(&mdev_state
->next
, &mdev_devices_list
);
775 mutex_unlock(&mdev_list_lock
);
780 int mtty_remove(struct mdev_device
*mdev
)
782 struct mdev_state
*mds
, *tmp_mds
;
783 struct mdev_state
*mdev_state
= mdev_get_drvdata(mdev
);
786 mutex_lock(&mdev_list_lock
);
787 list_for_each_entry_safe(mds
, tmp_mds
, &mdev_devices_list
, next
) {
788 if (mdev_state
== mds
) {
789 list_del(&mdev_state
->next
);
790 mdev_set_drvdata(mdev
, NULL
);
791 kfree(mdev_state
->vconfig
);
797 mutex_unlock(&mdev_list_lock
);
802 int mtty_reset(struct mdev_device
*mdev
)
804 struct mdev_state
*mdev_state
;
809 mdev_state
= mdev_get_drvdata(mdev
);
813 pr_info("%s: called\n", __func__
);
818 ssize_t
mtty_read(struct mdev_device
*mdev
, char __user
*buf
, size_t count
,
821 unsigned int done
= 0;
827 if (count
>= 4 && !(*ppos
% 4)) {
830 ret
= mdev_access(mdev
, (u8
*)&val
, sizeof(val
),
835 if (copy_to_user(buf
, &val
, sizeof(val
)))
839 } else if (count
>= 2 && !(*ppos
% 2)) {
842 ret
= mdev_access(mdev
, (u8
*)&val
, sizeof(val
),
847 if (copy_to_user(buf
, &val
, sizeof(val
)))
854 ret
= mdev_access(mdev
, (u8
*)&val
, sizeof(val
),
859 if (copy_to_user(buf
, &val
, sizeof(val
)))
877 ssize_t
mtty_write(struct mdev_device
*mdev
, const char __user
*buf
,
878 size_t count
, loff_t
*ppos
)
880 unsigned int done
= 0;
886 if (count
>= 4 && !(*ppos
% 4)) {
889 if (copy_from_user(&val
, buf
, sizeof(val
)))
892 ret
= mdev_access(mdev
, (u8
*)&val
, sizeof(val
),
898 } else if (count
>= 2 && !(*ppos
% 2)) {
901 if (copy_from_user(&val
, buf
, sizeof(val
)))
904 ret
= mdev_access(mdev
, (u8
*)&val
, sizeof(val
),
913 if (copy_from_user(&val
, buf
, sizeof(val
)))
916 ret
= mdev_access(mdev
, (u8
*)&val
, sizeof(val
),
934 static int mtty_set_irqs(struct mdev_device
*mdev
, uint32_t flags
,
935 unsigned int index
, unsigned int start
,
936 unsigned int count
, void *data
)
939 struct mdev_state
*mdev_state
;
944 mdev_state
= mdev_get_drvdata(mdev
);
948 mutex_lock(&mdev_state
->ops_lock
);
950 case VFIO_PCI_INTX_IRQ_INDEX
:
951 switch (flags
& VFIO_IRQ_SET_ACTION_TYPE_MASK
) {
952 case VFIO_IRQ_SET_ACTION_MASK
:
953 case VFIO_IRQ_SET_ACTION_UNMASK
:
955 case VFIO_IRQ_SET_ACTION_TRIGGER
:
957 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
958 pr_info("%s: disable INTx\n", __func__
);
959 if (mdev_state
->intx_evtfd
)
960 eventfd_ctx_put(mdev_state
->intx_evtfd
);
964 if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
965 int fd
= *(int *)data
;
968 struct eventfd_ctx
*evt
;
970 evt
= eventfd_ctx_fdget(fd
);
975 mdev_state
->intx_evtfd
= evt
;
976 mdev_state
->irq_fd
= fd
;
977 mdev_state
->irq_index
= index
;
985 case VFIO_PCI_MSI_IRQ_INDEX
:
986 switch (flags
& VFIO_IRQ_SET_ACTION_TYPE_MASK
) {
987 case VFIO_IRQ_SET_ACTION_MASK
:
988 case VFIO_IRQ_SET_ACTION_UNMASK
:
990 case VFIO_IRQ_SET_ACTION_TRIGGER
:
991 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
992 if (mdev_state
->msi_evtfd
)
993 eventfd_ctx_put(mdev_state
->msi_evtfd
);
994 pr_info("%s: disable MSI\n", __func__
);
995 mdev_state
->irq_index
= VFIO_PCI_INTX_IRQ_INDEX
;
998 if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
999 int fd
= *(int *)data
;
1000 struct eventfd_ctx
*evt
;
1005 if (mdev_state
->msi_evtfd
)
1008 evt
= eventfd_ctx_fdget(fd
);
1013 mdev_state
->msi_evtfd
= evt
;
1014 mdev_state
->irq_fd
= fd
;
1015 mdev_state
->irq_index
= index
;
1020 case VFIO_PCI_MSIX_IRQ_INDEX
:
1021 pr_info("%s: MSIX_IRQ\n", __func__
);
1023 case VFIO_PCI_ERR_IRQ_INDEX
:
1024 pr_info("%s: ERR_IRQ\n", __func__
);
1026 case VFIO_PCI_REQ_IRQ_INDEX
:
1027 pr_info("%s: REQ_IRQ\n", __func__
);
1031 mutex_unlock(&mdev_state
->ops_lock
);
1035 static int mtty_trigger_interrupt(uuid_le uuid
)
1038 struct mdev_state
*mdev_state
;
1040 mdev_state
= find_mdev_state_by_uuid(uuid
);
1043 pr_info("%s: mdev not found\n", __func__
);
1047 if ((mdev_state
->irq_index
== VFIO_PCI_MSI_IRQ_INDEX
) &&
1048 (!mdev_state
->msi_evtfd
))
1050 else if ((mdev_state
->irq_index
== VFIO_PCI_INTX_IRQ_INDEX
) &&
1051 (!mdev_state
->intx_evtfd
)) {
1052 pr_info("%s: Intr eventfd not found\n", __func__
);
1056 if (mdev_state
->irq_index
== VFIO_PCI_MSI_IRQ_INDEX
)
1057 ret
= eventfd_signal(mdev_state
->msi_evtfd
, 1);
1059 ret
= eventfd_signal(mdev_state
->intx_evtfd
, 1);
1061 #if defined(DEBUG_INTR)
1062 pr_info("Intx triggered\n");
1065 pr_err("%s: eventfd signal failed (%d)\n", __func__
, ret
);
1070 int mtty_get_region_info(struct mdev_device
*mdev
,
1071 struct vfio_region_info
*region_info
,
1072 u16
*cap_type_id
, void **cap_type
)
1074 unsigned int size
= 0;
1075 struct mdev_state
*mdev_state
;
1081 mdev_state
= mdev_get_drvdata(mdev
);
1085 bar_index
= region_info
->index
;
1086 if (bar_index
>= VFIO_PCI_NUM_REGIONS
)
1089 mutex_lock(&mdev_state
->ops_lock
);
1091 switch (bar_index
) {
1092 case VFIO_PCI_CONFIG_REGION_INDEX
:
1093 size
= MTTY_CONFIG_SPACE_SIZE
;
1095 case VFIO_PCI_BAR0_REGION_INDEX
:
1096 size
= MTTY_IO_BAR_SIZE
;
1098 case VFIO_PCI_BAR1_REGION_INDEX
:
1099 if (mdev_state
->nr_ports
== 2)
1100 size
= MTTY_IO_BAR_SIZE
;
1107 mdev_state
->region_info
[bar_index
].size
= size
;
1108 mdev_state
->region_info
[bar_index
].vfio_offset
=
1109 MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index
);
1111 region_info
->size
= size
;
1112 region_info
->offset
= MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index
);
1113 region_info
->flags
= VFIO_REGION_INFO_FLAG_READ
|
1114 VFIO_REGION_INFO_FLAG_WRITE
;
1115 mutex_unlock(&mdev_state
->ops_lock
);
1119 int mtty_get_irq_info(struct mdev_device
*mdev
, struct vfio_irq_info
*irq_info
)
1121 switch (irq_info
->index
) {
1122 case VFIO_PCI_INTX_IRQ_INDEX
:
1123 case VFIO_PCI_MSI_IRQ_INDEX
:
1124 case VFIO_PCI_REQ_IRQ_INDEX
:
1131 irq_info
->flags
= VFIO_IRQ_INFO_EVENTFD
;
1132 irq_info
->count
= 1;
1134 if (irq_info
->index
== VFIO_PCI_INTX_IRQ_INDEX
)
1135 irq_info
->flags
|= (VFIO_IRQ_INFO_MASKABLE
|
1136 VFIO_IRQ_INFO_AUTOMASKED
);
1138 irq_info
->flags
|= VFIO_IRQ_INFO_NORESIZE
;
1143 int mtty_get_device_info(struct mdev_device
*mdev
,
1144 struct vfio_device_info
*dev_info
)
1146 dev_info
->flags
= VFIO_DEVICE_FLAGS_PCI
;
1147 dev_info
->num_regions
= VFIO_PCI_NUM_REGIONS
;
1148 dev_info
->num_irqs
= VFIO_PCI_NUM_IRQS
;
1153 static long mtty_ioctl(struct mdev_device
*mdev
, unsigned int cmd
,
1157 unsigned long minsz
;
1158 struct mdev_state
*mdev_state
;
1163 mdev_state
= mdev_get_drvdata(mdev
);
1168 case VFIO_DEVICE_GET_INFO
:
1170 struct vfio_device_info info
;
1172 minsz
= offsetofend(struct vfio_device_info
, num_irqs
);
1174 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
1177 if (info
.argsz
< minsz
)
1180 ret
= mtty_get_device_info(mdev
, &info
);
1184 memcpy(&mdev_state
->dev_info
, &info
, sizeof(info
));
1186 if (copy_to_user((void __user
*)arg
, &info
, minsz
))
1191 case VFIO_DEVICE_GET_REGION_INFO
:
1193 struct vfio_region_info info
;
1194 u16 cap_type_id
= 0;
1195 void *cap_type
= NULL
;
1197 minsz
= offsetofend(struct vfio_region_info
, offset
);
1199 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
1202 if (info
.argsz
< minsz
)
1205 ret
= mtty_get_region_info(mdev
, &info
, &cap_type_id
,
1210 if (copy_to_user((void __user
*)arg
, &info
, minsz
))
1216 case VFIO_DEVICE_GET_IRQ_INFO
:
1218 struct vfio_irq_info info
;
1220 minsz
= offsetofend(struct vfio_irq_info
, count
);
1222 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
1225 if ((info
.argsz
< minsz
) ||
1226 (info
.index
>= mdev_state
->dev_info
.num_irqs
))
1229 ret
= mtty_get_irq_info(mdev
, &info
);
1233 if (copy_to_user((void __user
*)arg
, &info
, minsz
))
1238 case VFIO_DEVICE_SET_IRQS
:
1240 struct vfio_irq_set hdr
;
1241 u8
*data
= NULL
, *ptr
= NULL
;
1242 size_t data_size
= 0;
1244 minsz
= offsetofend(struct vfio_irq_set
, count
);
1246 if (copy_from_user(&hdr
, (void __user
*)arg
, minsz
))
1249 ret
= vfio_set_irqs_validate_and_prepare(&hdr
,
1250 mdev_state
->dev_info
.num_irqs
,
1257 ptr
= data
= memdup_user((void __user
*)(arg
+ minsz
),
1260 return PTR_ERR(data
);
1263 ret
= mtty_set_irqs(mdev
, hdr
.flags
, hdr
.index
, hdr
.start
,
1269 case VFIO_DEVICE_RESET
:
1270 return mtty_reset(mdev
);
1275 int mtty_open(struct mdev_device
*mdev
)
1277 pr_info("%s\n", __func__
);
1281 void mtty_close(struct mdev_device
*mdev
)
1283 pr_info("%s\n", __func__
);
1287 sample_mtty_dev_show(struct device
*dev
, struct device_attribute
*attr
,
1290 return sprintf(buf
, "This is phy device\n");
1293 static DEVICE_ATTR_RO(sample_mtty_dev
);
1295 static struct attribute
*mtty_dev_attrs
[] = {
1296 &dev_attr_sample_mtty_dev
.attr
,
1300 static const struct attribute_group mtty_dev_group
= {
1302 .attrs
= mtty_dev_attrs
,
1305 const struct attribute_group
*mtty_dev_groups
[] = {
1311 sample_mdev_dev_show(struct device
*dev
, struct device_attribute
*attr
,
1314 if (mdev_from_dev(dev
))
1315 return sprintf(buf
, "This is MDEV %s\n", dev_name(dev
));
1317 return sprintf(buf
, "\n");
1320 static DEVICE_ATTR_RO(sample_mdev_dev
);
1322 static struct attribute
*mdev_dev_attrs
[] = {
1323 &dev_attr_sample_mdev_dev
.attr
,
1327 static const struct attribute_group mdev_dev_group
= {
1329 .attrs
= mdev_dev_attrs
,
1332 const struct attribute_group
*mdev_dev_groups
[] = {
1338 name_show(struct kobject
*kobj
, struct device
*dev
, char *buf
)
1340 char name
[MTTY_STRING_LEN
];
1342 const char *name_str
[2] = {"Single port serial", "Dual port serial"};
1344 for (i
= 0; i
< 2; i
++) {
1345 snprintf(name
, MTTY_STRING_LEN
, "%s-%d",
1346 dev_driver_string(dev
), i
+ 1);
1347 if (!strcmp(kobj
->name
, name
))
1348 return sprintf(buf
, "%s\n", name_str
[i
]);
1354 MDEV_TYPE_ATTR_RO(name
);
1357 available_instances_show(struct kobject
*kobj
, struct device
*dev
, char *buf
)
1359 char name
[MTTY_STRING_LEN
];
1361 struct mdev_state
*mds
;
1362 int ports
= 0, used
= 0;
1364 for (i
= 0; i
< 2; i
++) {
1365 snprintf(name
, MTTY_STRING_LEN
, "%s-%d",
1366 dev_driver_string(dev
), i
+ 1);
1367 if (!strcmp(kobj
->name
, name
)) {
1376 list_for_each_entry(mds
, &mdev_devices_list
, next
)
1377 used
+= mds
->nr_ports
;
1379 return sprintf(buf
, "%d\n", (MAX_MTTYS
- used
)/ports
);
1382 MDEV_TYPE_ATTR_RO(available_instances
);
1385 static ssize_t
device_api_show(struct kobject
*kobj
, struct device
*dev
,
1388 return sprintf(buf
, "%s\n", VFIO_DEVICE_API_PCI_STRING
);
1391 MDEV_TYPE_ATTR_RO(device_api
);
1393 static struct attribute
*mdev_types_attrs
[] = {
1394 &mdev_type_attr_name
.attr
,
1395 &mdev_type_attr_device_api
.attr
,
1396 &mdev_type_attr_available_instances
.attr
,
1400 static struct attribute_group mdev_type_group1
= {
1402 .attrs
= mdev_types_attrs
,
1405 static struct attribute_group mdev_type_group2
= {
1407 .attrs
= mdev_types_attrs
,
1410 struct attribute_group
*mdev_type_groups
[] = {
1416 static const struct mdev_parent_ops mdev_fops
= {
1417 .owner
= THIS_MODULE
,
1418 .dev_attr_groups
= mtty_dev_groups
,
1419 .mdev_attr_groups
= mdev_dev_groups
,
1420 .supported_type_groups
= mdev_type_groups
,
1421 .create
= mtty_create
,
1422 .remove
= mtty_remove
,
1424 .release
= mtty_close
,
1426 .write
= mtty_write
,
1427 .ioctl
= mtty_ioctl
,
1430 static void mtty_device_release(struct device
*dev
)
1432 dev_dbg(dev
, "mtty: released\n");
1435 static int __init
mtty_dev_init(void)
1439 pr_info("mtty_dev: %s\n", __func__
);
1441 memset(&mtty_dev
, 0, sizeof(mtty_dev
));
1443 idr_init(&mtty_dev
.vd_idr
);
1445 ret
= alloc_chrdev_region(&mtty_dev
.vd_devt
, 0, MINORMASK
, MTTY_NAME
);
1448 pr_err("Error: failed to register mtty_dev, err:%d\n", ret
);
1452 cdev_init(&mtty_dev
.vd_cdev
, &vd_fops
);
1453 cdev_add(&mtty_dev
.vd_cdev
, mtty_dev
.vd_devt
, MINORMASK
);
1455 pr_info("major_number:%d\n", MAJOR(mtty_dev
.vd_devt
));
1457 mtty_dev
.vd_class
= class_create(THIS_MODULE
, MTTY_CLASS_NAME
);
1459 if (IS_ERR(mtty_dev
.vd_class
)) {
1460 pr_err("Error: failed to register mtty_dev class\n");
1461 ret
= PTR_ERR(mtty_dev
.vd_class
);
1465 mtty_dev
.dev
.class = mtty_dev
.vd_class
;
1466 mtty_dev
.dev
.release
= mtty_device_release
;
1467 dev_set_name(&mtty_dev
.dev
, "%s", MTTY_NAME
);
1469 ret
= device_register(&mtty_dev
.dev
);
1473 ret
= mdev_register_device(&mtty_dev
.dev
, &mdev_fops
);
1477 mutex_init(&mdev_list_lock
);
1478 INIT_LIST_HEAD(&mdev_devices_list
);
1484 device_unregister(&mtty_dev
.dev
);
1486 class_destroy(mtty_dev
.vd_class
);
1489 cdev_del(&mtty_dev
.vd_cdev
);
1490 unregister_chrdev_region(mtty_dev
.vd_devt
, MINORMASK
);
1496 static void __exit
mtty_dev_exit(void)
1498 mtty_dev
.dev
.bus
= NULL
;
1499 mdev_unregister_device(&mtty_dev
.dev
);
1501 device_unregister(&mtty_dev
.dev
);
1502 idr_destroy(&mtty_dev
.vd_idr
);
1503 cdev_del(&mtty_dev
.vd_cdev
);
1504 unregister_chrdev_region(mtty_dev
.vd_devt
, MINORMASK
);
1505 class_destroy(mtty_dev
.vd_class
);
1506 mtty_dev
.vd_class
= NULL
;
1507 pr_info("mtty_dev: Unloaded!\n");
1510 module_init(mtty_dev_init
)
1511 module_exit(mtty_dev_exit
)
1513 MODULE_LICENSE("GPL v2");
1514 MODULE_INFO(supported
, "Test driver that simulate serial port over PCI");
1515 MODULE_VERSION(VERSION_STRING
);
1516 MODULE_AUTHOR(DRIVER_AUTHOR
);