2 * Copyright (c) 2007, Neocleus Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Assign a PCI device from the host to a guest VM.
20 * Adapted for KVM by Qumranet.
22 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
23 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
24 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
25 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
26 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
31 #include <sys/types.h>
36 #include "qemu-error.h"
38 #include "device-assignment.h"
42 #include <pci/header.h>
45 /* From linux/ioport.h */
46 #define IORESOURCE_IO 0x00000100 /* Resource type */
47 #define IORESOURCE_MEM 0x00000200
48 #define IORESOURCE_IRQ 0x00000400
49 #define IORESOURCE_DMA 0x00000800
50 #define IORESOURCE_PREFETCH 0x00002000 /* No side effects */
52 /* #define DEVICE_ASSIGNMENT_DEBUG 1 */
54 #ifdef DEVICE_ASSIGNMENT_DEBUG
55 #define DEBUG(fmt, ...) \
57 fprintf(stderr, "%s: " fmt, __func__ , __VA_ARGS__); \
60 #define DEBUG(fmt, ...) do { } while(0)
63 static void assigned_dev_load_option_rom(AssignedDevice
*dev
);
65 static void assigned_dev_unregister_msix_mmio(AssignedDevice
*dev
);
67 static void assigned_device_pci_cap_write_config(PCIDevice
*pci_dev
,
69 uint32_t val
, int len
);
71 static uint32_t assigned_device_pci_cap_read_config(PCIDevice
*pci_dev
,
72 uint32_t address
, int len
);
74 /* Merge the bits set in mask from mval into val. Both val and mval are
75 * at the same addr offset, pos is the starting offset of the mask. */
76 static uint32_t merge_bits(uint32_t val
, uint32_t mval
, uint8_t addr
,
77 int len
, uint8_t pos
, uint32_t mask
)
79 if (!ranges_overlap(addr
, len
, pos
, 4)) {
84 mask
>>= (addr
- pos
) * 8;
86 mask
<<= (pos
- addr
) * 8;
88 mask
&= 0xffffffffU
>> (4 - len
) * 8;
96 static uint32_t assigned_dev_ioport_rw(AssignedDevRegion
*dev_region
,
97 uint32_t addr
, int len
, uint32_t *val
)
100 uint32_t offset
= addr
- dev_region
->e_physbase
;
101 int fd
= dev_region
->region
->resource_fd
;
105 DEBUG("pwrite val=%x, len=%d, e_phys=%x, offset=%x\n",
106 *val
, len
, addr
, offset
);
107 if (pwrite(fd
, val
, len
, offset
) != len
) {
108 fprintf(stderr
, "%s - pwrite failed %s\n",
109 __func__
, strerror(errno
));
112 if (pread(fd
, &ret
, len
, offset
) != len
) {
113 fprintf(stderr
, "%s - pread failed %s\n",
114 __func__
, strerror(errno
));
115 ret
= (1UL << (len
* 8)) - 1;
117 DEBUG("pread ret=%x, len=%d, e_phys=%x, offset=%x\n",
118 ret
, len
, addr
, offset
);
121 uint32_t port
= offset
+ dev_region
->u
.r_baseport
;
124 DEBUG("out val=%x, len=%d, e_phys=%x, host=%x\n",
125 *val
, len
, addr
, port
);
149 DEBUG("in val=%x, len=%d, e_phys=%x, host=%x\n",
150 ret
, len
, addr
, port
);
156 static void assigned_dev_ioport_writeb(void *opaque
, uint32_t addr
,
159 assigned_dev_ioport_rw(opaque
, addr
, 1, &value
);
163 static void assigned_dev_ioport_writew(void *opaque
, uint32_t addr
,
166 assigned_dev_ioport_rw(opaque
, addr
, 2, &value
);
170 static void assigned_dev_ioport_writel(void *opaque
, uint32_t addr
,
173 assigned_dev_ioport_rw(opaque
, addr
, 4, &value
);
177 static uint32_t assigned_dev_ioport_readb(void *opaque
, uint32_t addr
)
179 return assigned_dev_ioport_rw(opaque
, addr
, 1, NULL
);
182 static uint32_t assigned_dev_ioport_readw(void *opaque
, uint32_t addr
)
184 return assigned_dev_ioport_rw(opaque
, addr
, 2, NULL
);
187 static uint32_t assigned_dev_ioport_readl(void *opaque
, uint32_t addr
)
189 return assigned_dev_ioport_rw(opaque
, addr
, 4, NULL
);
192 static uint32_t slow_bar_readb(void *opaque
, target_phys_addr_t addr
)
194 AssignedDevRegion
*d
= opaque
;
195 uint8_t *in
= d
->u
.r_virtbase
+ addr
;
199 DEBUG("slow_bar_readl addr=0x" TARGET_FMT_plx
" val=0x%08x\n", addr
, r
);
204 static uint32_t slow_bar_readw(void *opaque
, target_phys_addr_t addr
)
206 AssignedDevRegion
*d
= opaque
;
207 uint16_t *in
= d
->u
.r_virtbase
+ addr
;
211 DEBUG("slow_bar_readl addr=0x" TARGET_FMT_plx
" val=0x%08x\n", addr
, r
);
216 static uint32_t slow_bar_readl(void *opaque
, target_phys_addr_t addr
)
218 AssignedDevRegion
*d
= opaque
;
219 uint32_t *in
= d
->u
.r_virtbase
+ addr
;
223 DEBUG("slow_bar_readl addr=0x" TARGET_FMT_plx
" val=0x%08x\n", addr
, r
);
228 static void slow_bar_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
230 AssignedDevRegion
*d
= opaque
;
231 uint8_t *out
= d
->u
.r_virtbase
+ addr
;
233 DEBUG("slow_bar_writeb addr=0x" TARGET_FMT_plx
" val=0x%02x\n", addr
, val
);
237 static void slow_bar_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
239 AssignedDevRegion
*d
= opaque
;
240 uint16_t *out
= d
->u
.r_virtbase
+ addr
;
242 DEBUG("slow_bar_writew addr=0x" TARGET_FMT_plx
" val=0x%04x\n", addr
, val
);
246 static void slow_bar_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
248 AssignedDevRegion
*d
= opaque
;
249 uint32_t *out
= d
->u
.r_virtbase
+ addr
;
251 DEBUG("slow_bar_writel addr=0x" TARGET_FMT_plx
" val=0x%08x\n", addr
, val
);
255 static CPUWriteMemoryFunc
* const slow_bar_write
[] = {
261 static CPUReadMemoryFunc
* const slow_bar_read
[] = {
267 static void assigned_dev_iomem_map(PCIDevice
*pci_dev
, int region_num
,
268 pcibus_t e_phys
, pcibus_t e_size
, int type
)
270 AssignedDevice
*r_dev
= DO_UPCAST(AssignedDevice
, dev
, pci_dev
);
271 AssignedDevRegion
*region
= &r_dev
->v_addrs
[region_num
];
272 PCIRegion
*real_region
= &r_dev
->real_device
.regions
[region_num
];
274 DEBUG("e_phys=%08" FMT_PCIBUS
" r_virt=%p type=%d len=%08" FMT_PCIBUS
" region_num=%d \n",
275 e_phys
, region
->u
.r_virtbase
, type
, e_size
, region_num
);
277 region
->e_physbase
= e_phys
;
278 region
->e_size
= e_size
;
281 cpu_register_physical_memory(e_phys
, e_size
, region
->memory_index
);
283 /* deal with MSI-X MMIO page */
284 if (real_region
->base_addr
<= r_dev
->msix_table_addr
&&
285 real_region
->base_addr
+ real_region
->size
>=
286 r_dev
->msix_table_addr
) {
287 int offset
= r_dev
->msix_table_addr
- real_region
->base_addr
;
289 cpu_register_physical_memory(e_phys
+ offset
,
290 TARGET_PAGE_SIZE
, r_dev
->mmio_index
);
295 static void assigned_dev_ioport_map(PCIDevice
*pci_dev
, int region_num
,
296 pcibus_t addr
, pcibus_t size
, int type
)
298 AssignedDevice
*r_dev
= DO_UPCAST(AssignedDevice
, dev
, pci_dev
);
299 AssignedDevRegion
*region
= &r_dev
->v_addrs
[region_num
];
300 int first_map
= (region
->e_size
== 0);
303 region
->e_physbase
= addr
;
304 region
->e_size
= size
;
306 DEBUG("e_phys=0x%" FMT_PCIBUS
" r_baseport=%x type=0x%x len=%" FMT_PCIBUS
" region_num=%d \n",
307 addr
, region
->u
.r_baseport
, type
, size
, region_num
);
309 if (first_map
&& region
->region
->resource_fd
< 0) {
310 r
= kvm_add_ioport_region(region
->u
.r_baseport
, region
->r_size
);
312 fprintf(stderr
, "%s: failed to enable ioport access (%m)\n",
317 register_ioport_read(addr
, size
, 1, assigned_dev_ioport_readb
,
318 (r_dev
->v_addrs
+ region_num
));
319 register_ioport_read(addr
, size
, 2, assigned_dev_ioport_readw
,
320 (r_dev
->v_addrs
+ region_num
));
321 register_ioport_read(addr
, size
, 4, assigned_dev_ioport_readl
,
322 (r_dev
->v_addrs
+ region_num
));
323 register_ioport_write(addr
, size
, 1, assigned_dev_ioport_writeb
,
324 (r_dev
->v_addrs
+ region_num
));
325 register_ioport_write(addr
, size
, 2, assigned_dev_ioport_writew
,
326 (r_dev
->v_addrs
+ region_num
));
327 register_ioport_write(addr
, size
, 4, assigned_dev_ioport_writel
,
328 (r_dev
->v_addrs
+ region_num
));
331 static uint32_t assigned_dev_pci_read(PCIDevice
*d
, int pos
, int len
)
333 AssignedDevice
*pci_dev
= DO_UPCAST(AssignedDevice
, dev
, d
);
336 int fd
= pci_dev
->real_device
.config_fd
;
339 ret
= pread(fd
, &val
, len
, pos
);
341 if ((ret
< 0) && (errno
== EINTR
|| errno
== EAGAIN
))
344 fprintf(stderr
, "%s: pread failed, ret = %zd errno = %d\n",
345 __func__
, ret
, errno
);
353 static uint8_t assigned_dev_pci_read_byte(PCIDevice
*d
, int pos
)
355 return (uint8_t)assigned_dev_pci_read(d
, pos
, 1);
358 static void assigned_dev_pci_write(PCIDevice
*d
, int pos
, uint32_t val
, int len
)
360 AssignedDevice
*pci_dev
= DO_UPCAST(AssignedDevice
, dev
, d
);
362 int fd
= pci_dev
->real_device
.config_fd
;
365 ret
= pwrite(fd
, &val
, len
, pos
);
367 if ((ret
< 0) && (errno
== EINTR
|| errno
== EAGAIN
))
370 fprintf(stderr
, "%s: pwrite failed, ret = %zd errno = %d\n",
371 __func__
, ret
, errno
);
379 static uint8_t pci_find_cap_offset(PCIDevice
*d
, uint8_t cap
, uint8_t start
)
383 int pos
= start
? start
: PCI_CAPABILITY_LIST
;
386 status
= assigned_dev_pci_read_byte(d
, PCI_STATUS
);
387 if ((status
& PCI_STATUS_CAP_LIST
) == 0)
391 pos
= assigned_dev_pci_read_byte(d
, pos
);
396 id
= assigned_dev_pci_read_byte(d
, pos
+ PCI_CAP_LIST_ID
);
403 pos
+= PCI_CAP_LIST_NEXT
;
408 static void assigned_dev_pci_write_config(PCIDevice
*d
, uint32_t address
,
409 uint32_t val
, int len
)
413 AssignedDevice
*pci_dev
= DO_UPCAST(AssignedDevice
, dev
, d
);
415 DEBUG("(%x.%x): address=%04x val=0x%08x len=%d\n",
416 ((d
->devfn
>> 3) & 0x1F), (d
->devfn
& 0x7),
417 (uint16_t) address
, val
, len
);
419 if (address
>= PCI_CONFIG_HEADER_SIZE
&& d
->config_map
[address
]) {
420 return assigned_device_pci_cap_write_config(d
, address
, val
, len
);
423 if (ranges_overlap(address
, len
, PCI_COMMAND
, 2)) {
424 pci_default_write_config(d
, address
, val
, len
);
425 /* Continue to program the card */
430 * - base address registers
431 * - ROM base address & capability pointer
432 * - interrupt line & pin
434 if (ranges_overlap(address
, len
, PCI_BASE_ADDRESS_0
, 24) ||
435 ranges_overlap(address
, len
, PCI_ROM_ADDRESS
, 4)) {
436 pci_default_write_config(d
, address
, val
, len
);
438 } else if (ranges_overlap(address
, len
, PCI_CAPABILITY_LIST
, 1) ||
439 ranges_overlap(address
, len
, PCI_INTERRUPT_LINE
, 2)) {
442 pci_default_write_config(d
, address
, val
, len
);
444 /* Ensure that writes to overlapping areas we don't virtualize still
446 real_val
= assigned_dev_pci_read(d
, address
, len
);
447 val
= merge_bits(val
, real_val
, address
, len
,
448 PCI_CAPABILITY_LIST
, 0xff);
449 val
= merge_bits(val
, real_val
, address
, len
,
450 PCI_INTERRUPT_LINE
, 0xffff);
453 DEBUG("NON BAR (%x.%x): address=%04x val=0x%08x len=%d\n",
454 ((d
->devfn
>> 3) & 0x1F), (d
->devfn
& 0x7),
455 (uint16_t) address
, val
, len
);
457 fd
= pci_dev
->real_device
.config_fd
;
460 ret
= pwrite(fd
, &val
, len
, address
);
462 if ((ret
< 0) && (errno
== EINTR
|| errno
== EAGAIN
))
465 fprintf(stderr
, "%s: pwrite failed, ret = %zd errno = %d\n",
466 __func__
, ret
, errno
);
472 static uint32_t assigned_dev_pci_read_config(PCIDevice
*d
, uint32_t address
,
475 uint32_t val
= 0, virt_val
;
478 AssignedDevice
*pci_dev
= DO_UPCAST(AssignedDevice
, dev
, d
);
480 if (address
>= PCI_CONFIG_HEADER_SIZE
&& d
->config_map
[address
]) {
481 val
= assigned_device_pci_cap_read_config(d
, address
, len
);
482 DEBUG("(%x.%x): address=%04x val=0x%08x len=%d\n",
483 (d
->devfn
>> 3) & 0x1F, (d
->devfn
& 0x7), address
, val
, len
);
489 * - vendor & device ID
490 * - base address registers
493 if (ranges_overlap(address
, len
, PCI_VENDOR_ID
, 4) ||
494 ranges_overlap(address
, len
, PCI_BASE_ADDRESS_0
, 24) ||
495 ranges_overlap(address
, len
, PCI_ROM_ADDRESS
, 4)) {
496 val
= pci_default_read_config(d
, address
, len
);
497 DEBUG("(%x.%x): address=%04x val=0x%08x len=%d\n",
498 (d
->devfn
>> 3) & 0x1F, (d
->devfn
& 0x7), address
, val
, len
);
502 fd
= pci_dev
->real_device
.config_fd
;
505 ret
= pread(fd
, &val
, len
, address
);
507 if ((ret
< 0) && (errno
== EINTR
|| errno
== EAGAIN
))
510 fprintf(stderr
, "%s: pread failed, ret = %zd errno = %d\n",
511 __func__
, ret
, errno
);
516 DEBUG("(%x.%x): address=%04x val=0x%08x len=%d\n",
517 (d
->devfn
>> 3) & 0x1F, (d
->devfn
& 0x7), address
, val
, len
);
519 if (pci_dev
->emulate_cmd_mask
) {
520 val
= merge_bits(val
, pci_default_read_config(d
, address
, len
),
521 address
, len
, PCI_COMMAND
, pci_dev
->emulate_cmd_mask
);
525 * Merge bits from virtualized
526 * - capability pointer
527 * - interrupt line & pin
529 virt_val
= pci_default_read_config(d
, address
, len
);
530 val
= merge_bits(val
, virt_val
, address
, len
, PCI_CAPABILITY_LIST
, 0xff);
531 val
= merge_bits(val
, virt_val
, address
, len
, PCI_INTERRUPT_LINE
, 0xffff);
533 if (!pci_dev
->cap
.available
) {
534 /* kill the special capabilities */
535 if (address
== PCI_COMMAND
&& len
== 4) {
536 val
&= ~(PCI_STATUS_CAP_LIST
<< 16);
537 } else if (address
== PCI_STATUS
) {
538 val
&= ~PCI_STATUS_CAP_LIST
;
545 static int assigned_dev_register_regions(PCIRegion
*io_regions
,
546 unsigned long regions_num
,
547 AssignedDevice
*pci_dev
)
550 PCIRegion
*cur_region
= io_regions
;
552 for (i
= 0; i
< regions_num
; i
++, cur_region
++) {
553 if (!cur_region
->valid
)
555 pci_dev
->v_addrs
[i
].num
= i
;
557 /* handle memory io regions */
558 if (cur_region
->type
& IORESOURCE_MEM
) {
559 int t
= cur_region
->type
& IORESOURCE_PREFETCH
560 ? PCI_BASE_ADDRESS_MEM_PREFETCH
561 : PCI_BASE_ADDRESS_SPACE_MEMORY
;
563 /* map physical memory */
564 pci_dev
->v_addrs
[i
].e_physbase
= cur_region
->base_addr
;
565 pci_dev
->v_addrs
[i
].u
.r_virtbase
= mmap(NULL
, cur_region
->size
,
566 PROT_WRITE
| PROT_READ
,
568 cur_region
->resource_fd
,
571 if (pci_dev
->v_addrs
[i
].u
.r_virtbase
== MAP_FAILED
) {
572 pci_dev
->v_addrs
[i
].u
.r_virtbase
= NULL
;
573 fprintf(stderr
, "%s: Error: Couldn't mmap 0x%x!"
575 (uint32_t) (cur_region
->base_addr
));
579 pci_dev
->v_addrs
[i
].r_size
= cur_region
->size
;
580 pci_dev
->v_addrs
[i
].e_size
= 0;
583 pci_dev
->v_addrs
[i
].u
.r_virtbase
+=
584 (cur_region
->base_addr
& 0xFFF);
586 if (cur_region
->size
& 0xFFF) {
587 fprintf(stderr
, "PCI region %d at address 0x%llx "
588 "has size 0x%x, which is not a multiple of 4K. "
589 "You might experience some performance hit "
591 i
, (unsigned long long)cur_region
->base_addr
,
593 pci_dev
->v_addrs
[i
].memory_index
=
594 cpu_register_io_memory(slow_bar_read
, slow_bar_write
,
595 &pci_dev
->v_addrs
[i
],
596 DEVICE_NATIVE_ENDIAN
);
598 void *virtbase
= pci_dev
->v_addrs
[i
].u
.r_virtbase
;
600 snprintf(name
, sizeof(name
), "%s.bar%d",
601 pci_dev
->dev
.qdev
.info
->name
, i
);
602 pci_dev
->v_addrs
[i
].memory_index
=
603 qemu_ram_alloc_from_ptr(
605 name
, cur_region
->size
,
609 pci_register_bar((PCIDevice
*) pci_dev
, i
, cur_region
->size
, t
,
610 assigned_dev_iomem_map
);
613 /* handle port io regions */
617 /* Test kernel support for ioport resource read/write. Old
618 * kernels return EIO. New kernels only allow 1/2/4 byte reads
619 * so should return EINVAL for a 3 byte read */
620 ret
= pread(pci_dev
->v_addrs
[i
].region
->resource_fd
, &val
, 3, 0);
622 fprintf(stderr
, "I/O port resource supports 3 byte read?!\n");
624 } else if (errno
!= EINVAL
) {
625 fprintf(stderr
, "Using raw in/out ioport access (sysfs - %s)\n",
627 close(pci_dev
->v_addrs
[i
].region
->resource_fd
);
628 pci_dev
->v_addrs
[i
].region
->resource_fd
= -1;
631 pci_dev
->v_addrs
[i
].e_physbase
= cur_region
->base_addr
;
632 pci_dev
->v_addrs
[i
].u
.r_baseport
= cur_region
->base_addr
;
633 pci_dev
->v_addrs
[i
].r_size
= cur_region
->size
;
634 pci_dev
->v_addrs
[i
].e_size
= 0;
636 pci_register_bar((PCIDevice
*) pci_dev
, i
,
637 cur_region
->size
, PCI_BASE_ADDRESS_SPACE_IO
,
638 assigned_dev_ioport_map
);
640 /* not relevant for port io */
641 pci_dev
->v_addrs
[i
].memory_index
= 0;
649 static int get_real_id(const char *devpath
, const char *idname
, uint16_t *val
)
655 snprintf(name
, sizeof(name
), "%s%s", devpath
, idname
);
656 f
= fopen(name
, "r");
658 fprintf(stderr
, "%s: %s: %m\n", __func__
, name
);
661 if (fscanf(f
, "%li\n", &id
) == 1) {
671 static int get_real_vendor_id(const char *devpath
, uint16_t *val
)
673 return get_real_id(devpath
, "vendor", val
);
676 static int get_real_device_id(const char *devpath
, uint16_t *val
)
678 return get_real_id(devpath
, "device", val
);
681 static int get_real_device(AssignedDevice
*pci_dev
, uint16_t r_seg
,
682 uint8_t r_bus
, uint8_t r_dev
, uint8_t r_func
)
684 char dir
[128], name
[128];
687 unsigned long long start
, end
, size
, flags
;
691 PCIDevRegions
*dev
= &pci_dev
->real_device
;
693 dev
->region_number
= 0;
695 snprintf(dir
, sizeof(dir
), "/sys/bus/pci/devices/%04x:%02x:%02x.%x/",
696 r_seg
, r_bus
, r_dev
, r_func
);
698 snprintf(name
, sizeof(name
), "%sconfig", dir
);
700 if (pci_dev
->configfd_name
&& *pci_dev
->configfd_name
) {
701 if (qemu_isdigit(pci_dev
->configfd_name
[0])) {
702 dev
->config_fd
= strtol(pci_dev
->configfd_name
, NULL
, 0);
704 dev
->config_fd
= monitor_get_fd(cur_mon
, pci_dev
->configfd_name
);
705 if (dev
->config_fd
< 0) {
706 fprintf(stderr
, "%s: (%s) unkown\n", __func__
,
707 pci_dev
->configfd_name
);
712 dev
->config_fd
= open(name
, O_RDWR
);
714 if (dev
->config_fd
== -1) {
715 fprintf(stderr
, "%s: %s: %m\n", __func__
, name
);
720 r
= read(dev
->config_fd
, pci_dev
->dev
.config
,
721 pci_config_size(&pci_dev
->dev
));
723 if (errno
== EINTR
|| errno
== EAGAIN
)
725 fprintf(stderr
, "%s: read failed, errno = %d\n", __func__
, errno
);
728 /* Clear host resource mapping info. If we choose not to register a
729 * BAR, such as might be the case with the option ROM, we can get
730 * confusing, unwritable, residual addresses from the host here. */
731 memset(&pci_dev
->dev
.config
[PCI_BASE_ADDRESS_0
], 0, 24);
732 memset(&pci_dev
->dev
.config
[PCI_ROM_ADDRESS
], 0, 4);
734 snprintf(name
, sizeof(name
), "%sresource", dir
);
736 f
= fopen(name
, "r");
738 fprintf(stderr
, "%s: %s: %m\n", __func__
, name
);
742 for (r
= 0; r
< PCI_ROM_SLOT
; r
++) {
743 if (fscanf(f
, "%lli %lli %lli\n", &start
, &end
, &flags
) != 3)
746 rp
= dev
->regions
+ r
;
748 rp
->resource_fd
= -1;
749 size
= end
- start
+ 1;
750 flags
&= IORESOURCE_IO
| IORESOURCE_MEM
| IORESOURCE_PREFETCH
;
751 if (size
== 0 || (flags
& ~IORESOURCE_PREFETCH
) == 0)
753 if (flags
& IORESOURCE_MEM
) {
754 flags
&= ~IORESOURCE_IO
;
756 flags
&= ~IORESOURCE_PREFETCH
;
758 snprintf(name
, sizeof(name
), "%sresource%d", dir
, r
);
759 fd
= open(name
, O_RDWR
);
762 rp
->resource_fd
= fd
;
766 rp
->base_addr
= start
;
768 pci_dev
->v_addrs
[r
].region
= rp
;
769 DEBUG("region %d size %d start 0x%llx type %d resource_fd %d\n",
770 r
, rp
->size
, start
, rp
->type
, rp
->resource_fd
);
775 /* read and fill vendor ID */
776 v
= get_real_vendor_id(dir
, &id
);
780 pci_dev
->dev
.config
[0] = id
& 0xff;
781 pci_dev
->dev
.config
[1] = (id
& 0xff00) >> 8;
783 /* read and fill device ID */
784 v
= get_real_device_id(dir
, &id
);
788 pci_dev
->dev
.config
[2] = id
& 0xff;
789 pci_dev
->dev
.config
[3] = (id
& 0xff00) >> 8;
791 /* dealing with virtual function device */
792 snprintf(name
, sizeof(name
), "%sphysfn/", dir
);
793 if (!stat(name
, &statbuf
)) {
794 pci_dev
->emulate_cmd_mask
= 0xffff;
797 dev
->region_number
= r
;
801 static QLIST_HEAD(, AssignedDevice
) devs
= QLIST_HEAD_INITIALIZER(devs
);
803 #ifdef KVM_CAP_IRQ_ROUTING
804 static void free_dev_irq_entries(AssignedDevice
*dev
)
808 for (i
= 0; i
< dev
->irq_entries_nr
; i
++)
809 kvm_del_routing_entry(&dev
->entry
[i
]);
812 dev
->irq_entries_nr
= 0;
816 static void free_assigned_device(AssignedDevice
*dev
)
820 for (i
= 0; i
< dev
->real_device
.region_number
; i
++) {
821 PCIRegion
*pci_region
= &dev
->real_device
.regions
[i
];
822 AssignedDevRegion
*region
= &dev
->v_addrs
[i
];
824 if (!pci_region
->valid
) {
827 if (pci_region
->type
& IORESOURCE_IO
) {
828 if (pci_region
->resource_fd
< 0) {
829 kvm_remove_ioport_region(region
->u
.r_baseport
, region
->r_size
);
831 } else if (pci_region
->type
& IORESOURCE_MEM
) {
832 if (region
->u
.r_virtbase
) {
833 if (region
->e_size
> 0) {
834 cpu_register_physical_memory(region
->e_physbase
,
838 if (region
->r_size
& 0xFFF) {
839 cpu_unregister_io_memory(region
->memory_index
);
841 qemu_ram_unmap(region
->memory_index
);
843 if (munmap(region
->u
.r_virtbase
,
844 (pci_region
->size
+ 0xFFF) & 0xFFFFF000)) {
846 "Failed to unmap assigned device region: %s\n",
851 if (pci_region
->resource_fd
>= 0) {
852 close(pci_region
->resource_fd
);
856 if (dev
->cap
.available
& ASSIGNED_DEVICE_CAP_MSIX
) {
857 assigned_dev_unregister_msix_mmio(dev
);
859 if (dev
->real_device
.config_fd
>= 0) {
860 close(dev
->real_device
.config_fd
);
863 #ifdef KVM_CAP_IRQ_ROUTING
864 free_dev_irq_entries(dev
);
868 static uint32_t calc_assigned_dev_id(uint16_t seg
, uint8_t bus
, uint8_t devfn
)
870 return (uint32_t)seg
<< 16 | (uint32_t)bus
<< 8 | (uint32_t)devfn
;
873 static void assign_failed_examine(AssignedDevice
*dev
)
875 char name
[PATH_MAX
], dir
[PATH_MAX
], driver
[PATH_MAX
] = {}, *ns
;
876 uint16_t vendor_id
, device_id
;
879 sprintf(dir
, "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/",
880 dev
->host
.seg
, dev
->host
.bus
, dev
->host
.dev
, dev
->host
.func
);
882 sprintf(name
, "%sdriver", dir
);
884 r
= readlink(name
, driver
, sizeof(driver
));
885 if ((r
<= 0) || r
>= sizeof(driver
) || !(ns
= strrchr(driver
, '/'))) {
891 if (get_real_vendor_id(dir
, &vendor_id
) ||
892 get_real_device_id(dir
, &device_id
)) {
896 fprintf(stderr
, "*** The driver '%s' is occupying your device "
897 "%04x:%02x:%02x.%x.\n",
898 ns
, dev
->host
.seg
, dev
->host
.bus
, dev
->host
.dev
, dev
->host
.func
);
899 fprintf(stderr
, "***\n");
900 fprintf(stderr
, "*** You can try the following commands to free it:\n");
901 fprintf(stderr
, "***\n");
902 fprintf(stderr
, "*** $ echo \"%04x %04x\" > /sys/bus/pci/drivers/pci-stub/"
903 "new_id\n", vendor_id
, device_id
);
904 fprintf(stderr
, "*** $ echo \"%04x:%02x:%02x.%x\" > /sys/bus/pci/drivers/"
906 dev
->host
.seg
, dev
->host
.bus
, dev
->host
.dev
, dev
->host
.func
, ns
);
907 fprintf(stderr
, "*** $ echo \"%04x:%02x:%02x.%x\" > /sys/bus/pci/drivers/"
909 dev
->host
.seg
, dev
->host
.bus
, dev
->host
.dev
, dev
->host
.func
);
910 fprintf(stderr
, "*** $ echo \"%04x %04x\" > /sys/bus/pci/drivers/pci-stub"
911 "/remove_id\n", vendor_id
, device_id
);
912 fprintf(stderr
, "***\n");
917 fprintf(stderr
, "Couldn't find out why.\n");
920 static int assign_device(AssignedDevice
*dev
)
922 struct kvm_assigned_pci_dev assigned_dev_data
;
925 #ifdef KVM_CAP_PCI_SEGMENT
926 /* Only pass non-zero PCI segment to capable module */
927 if (!kvm_check_extension(kvm_state
, KVM_CAP_PCI_SEGMENT
) &&
929 fprintf(stderr
, "Can't assign device inside non-zero PCI segment "
930 "as this KVM module doesn't support it.\n");
935 memset(&assigned_dev_data
, 0, sizeof(assigned_dev_data
));
936 assigned_dev_data
.assigned_dev_id
=
937 calc_assigned_dev_id(dev
->h_segnr
, dev
->h_busnr
, dev
->h_devfn
);
938 #ifdef KVM_CAP_PCI_SEGMENT
939 assigned_dev_data
.segnr
= dev
->h_segnr
;
941 assigned_dev_data
.busnr
= dev
->h_busnr
;
942 assigned_dev_data
.devfn
= dev
->h_devfn
;
945 /* We always enable the IOMMU unless disabled on the command line */
946 if (dev
->features
& ASSIGNED_DEVICE_USE_IOMMU_MASK
) {
947 if (!kvm_check_extension(kvm_state
, KVM_CAP_IOMMU
)) {
948 fprintf(stderr
, "No IOMMU found. Unable to assign device \"%s\"\n",
952 assigned_dev_data
.flags
|= KVM_DEV_ASSIGN_ENABLE_IOMMU
;
955 dev
->features
&= ~ASSIGNED_DEVICE_USE_IOMMU_MASK
;
957 if (!(dev
->features
& ASSIGNED_DEVICE_USE_IOMMU_MASK
)) {
959 "WARNING: Assigning a device without IOMMU protection can "
960 "cause host memory corruption if the device issues DMA write "
964 r
= kvm_assign_pci_device(kvm_state
, &assigned_dev_data
);
966 fprintf(stderr
, "Failed to assign device \"%s\" : %s\n",
967 dev
->dev
.qdev
.id
, strerror(-r
));
971 assign_failed_examine(dev
);
980 static int assign_irq(AssignedDevice
*dev
)
982 struct kvm_assigned_irq assigned_irq_data
;
985 /* Interrupt PIN 0 means don't use INTx */
986 if (assigned_dev_pci_read_byte(&dev
->dev
, PCI_INTERRUPT_PIN
) == 0)
989 irq
= pci_map_irq(&dev
->dev
, dev
->intpin
);
990 irq
= piix_get_irq(irq
);
993 irq
= ipf_map_irq(&dev
->dev
, irq
);
996 if (dev
->girq
== irq
)
999 memset(&assigned_irq_data
, 0, sizeof(assigned_irq_data
));
1000 assigned_irq_data
.assigned_dev_id
=
1001 calc_assigned_dev_id(dev
->h_segnr
, dev
->h_busnr
, dev
->h_devfn
);
1002 assigned_irq_data
.guest_irq
= irq
;
1003 assigned_irq_data
.host_irq
= dev
->real_device
.irq
;
1004 #ifdef KVM_CAP_ASSIGN_DEV_IRQ
1005 if (dev
->irq_requested_type
) {
1006 assigned_irq_data
.flags
= dev
->irq_requested_type
;
1007 r
= kvm_deassign_irq(kvm_state
, &assigned_irq_data
);
1008 /* -ENXIO means no assigned irq */
1009 if (r
&& r
!= -ENXIO
)
1010 perror("assign_irq: deassign");
1013 assigned_irq_data
.flags
= KVM_DEV_IRQ_GUEST_INTX
;
1014 if (dev
->features
& ASSIGNED_DEVICE_PREFER_MSI_MASK
&&
1015 dev
->cap
.available
& ASSIGNED_DEVICE_CAP_MSI
)
1016 assigned_irq_data
.flags
|= KVM_DEV_IRQ_HOST_MSI
;
1018 assigned_irq_data
.flags
|= KVM_DEV_IRQ_HOST_INTX
;
1021 r
= kvm_assign_irq(kvm_state
, &assigned_irq_data
);
1023 fprintf(stderr
, "Failed to assign irq for \"%s\": %s\n",
1024 dev
->dev
.qdev
.id
, strerror(-r
));
1025 fprintf(stderr
, "Perhaps you are assigning a device "
1026 "that shares an IRQ with another device?\n");
1031 dev
->irq_requested_type
= assigned_irq_data
.flags
;
1035 static void deassign_device(AssignedDevice
*dev
)
1037 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
1038 struct kvm_assigned_pci_dev assigned_dev_data
;
1041 memset(&assigned_dev_data
, 0, sizeof(assigned_dev_data
));
1042 assigned_dev_data
.assigned_dev_id
=
1043 calc_assigned_dev_id(dev
->h_segnr
, dev
->h_busnr
, dev
->h_devfn
);
1045 r
= kvm_deassign_pci_device(kvm_state
, &assigned_dev_data
);
1047 fprintf(stderr
, "Failed to deassign device \"%s\" : %s\n",
1048 dev
->dev
.qdev
.id
, strerror(-r
));
1053 AssignedDevInfo
*get_assigned_device(int pcibus
, int slot
)
1055 AssignedDevice
*assigned_dev
= NULL
;
1056 AssignedDevInfo
*adev
= NULL
;
1058 QLIST_FOREACH(adev
, &adev_head
, next
) {
1059 assigned_dev
= adev
->assigned_dev
;
1060 if (pci_bus_num(assigned_dev
->dev
.bus
) == pcibus
&&
1061 PCI_SLOT(assigned_dev
->dev
.devfn
) == slot
)
1069 /* The pci config space got updated. Check if irq numbers have changed
1072 void assigned_dev_update_irqs(void)
1074 AssignedDevice
*dev
, *next
;
1077 dev
= QLIST_FIRST(&devs
);
1079 next
= QLIST_NEXT(dev
, next
);
1080 r
= assign_irq(dev
);
1082 qdev_unplug(&dev
->dev
.qdev
);
1087 #ifdef KVM_CAP_IRQ_ROUTING
1089 #ifdef KVM_CAP_DEVICE_MSI
1090 static void assigned_dev_update_msi(PCIDevice
*pci_dev
, unsigned int ctrl_pos
)
1092 struct kvm_assigned_irq assigned_irq_data
;
1093 AssignedDevice
*assigned_dev
= DO_UPCAST(AssignedDevice
, dev
, pci_dev
);
1094 uint8_t ctrl_byte
= pci_dev
->config
[ctrl_pos
];
1097 memset(&assigned_irq_data
, 0, sizeof assigned_irq_data
);
1098 assigned_irq_data
.assigned_dev_id
=
1099 calc_assigned_dev_id(assigned_dev
->h_segnr
, assigned_dev
->h_busnr
,
1100 (uint8_t)assigned_dev
->h_devfn
);
1102 /* Some guests gratuitously disable MSI even if they're not using it,
1103 * try to catch this by only deassigning irqs if the guest is using
1104 * MSI or intends to start. */
1105 if ((assigned_dev
->irq_requested_type
& KVM_DEV_IRQ_GUEST_MSI
) ||
1106 (ctrl_byte
& PCI_MSI_FLAGS_ENABLE
)) {
1108 assigned_irq_data
.flags
= assigned_dev
->irq_requested_type
;
1109 free_dev_irq_entries(assigned_dev
);
1110 r
= kvm_deassign_irq(kvm_state
, &assigned_irq_data
);
1111 /* -ENXIO means no assigned irq */
1112 if (r
&& r
!= -ENXIO
)
1113 perror("assigned_dev_update_msi: deassign irq");
1115 assigned_dev
->irq_requested_type
= 0;
1118 if (ctrl_byte
& PCI_MSI_FLAGS_ENABLE
) {
1119 int pos
= ctrl_pos
- PCI_MSI_FLAGS
;
1120 assigned_dev
->entry
= qemu_mallocz(sizeof(*(assigned_dev
->entry
)));
1121 assigned_dev
->entry
->u
.msi
.address_lo
=
1122 pci_get_long(pci_dev
->config
+ pos
+ PCI_MSI_ADDRESS_LO
);
1123 assigned_dev
->entry
->u
.msi
.address_hi
= 0;
1124 assigned_dev
->entry
->u
.msi
.data
=
1125 pci_get_word(pci_dev
->config
+ pos
+ PCI_MSI_DATA_32
);
1126 assigned_dev
->entry
->type
= KVM_IRQ_ROUTING_MSI
;
1127 r
= kvm_get_irq_route_gsi();
1129 perror("assigned_dev_update_msi: kvm_get_irq_route_gsi");
1132 assigned_dev
->entry
->gsi
= r
;
1134 kvm_add_routing_entry(assigned_dev
->entry
);
1135 if (kvm_commit_irq_routes() < 0) {
1136 perror("assigned_dev_update_msi: kvm_commit_irq_routes");
1137 assigned_dev
->cap
.state
&= ~ASSIGNED_DEVICE_MSI_ENABLED
;
1140 assigned_dev
->irq_entries_nr
= 1;
1142 assigned_irq_data
.guest_irq
= assigned_dev
->entry
->gsi
;
1143 assigned_irq_data
.flags
= KVM_DEV_IRQ_HOST_MSI
| KVM_DEV_IRQ_GUEST_MSI
;
1144 if (kvm_assign_irq(kvm_state
, &assigned_irq_data
) < 0) {
1145 perror("assigned_dev_enable_msi: assign irq");
1148 assigned_dev
->girq
= -1;
1149 assigned_dev
->irq_requested_type
= assigned_irq_data
.flags
;
1151 assign_irq(assigned_dev
);
1156 #ifdef KVM_CAP_DEVICE_MSIX
1157 static int assigned_dev_update_msix_mmio(PCIDevice
*pci_dev
)
1159 AssignedDevice
*adev
= DO_UPCAST(AssignedDevice
, dev
, pci_dev
);
1160 uint16_t entries_nr
= 0, entries_max_nr
;
1161 int pos
= 0, i
, r
= 0;
1162 uint32_t msg_addr
, msg_upper_addr
, msg_data
, msg_ctrl
;
1163 struct kvm_assigned_msix_nr msix_nr
;
1164 struct kvm_assigned_msix_entry msix_entry
;
1165 void *va
= adev
->msix_table_page
;
1167 pos
= pci_find_capability(pci_dev
, PCI_CAP_ID_MSIX
);
1169 entries_max_nr
= *(uint16_t *)(pci_dev
->config
+ pos
+ 2);
1170 entries_max_nr
&= PCI_MSIX_TABSIZE
;
1171 entries_max_nr
+= 1;
1173 /* Get the usable entry number for allocating */
1174 for (i
= 0; i
< entries_max_nr
; i
++) {
1175 memcpy(&msg_ctrl
, va
+ i
* 16 + 12, 4);
1176 memcpy(&msg_data
, va
+ i
* 16 + 8, 4);
1177 /* Ignore unused entry even it's unmasked */
1183 if (entries_nr
== 0) {
1184 fprintf(stderr
, "MSI-X entry number is zero!\n");
1187 msix_nr
.assigned_dev_id
= calc_assigned_dev_id(adev
->h_segnr
, adev
->h_busnr
,
1188 (uint8_t)adev
->h_devfn
);
1189 msix_nr
.entry_nr
= entries_nr
;
1190 r
= kvm_assign_set_msix_nr(kvm_state
, &msix_nr
);
1192 fprintf(stderr
, "fail to set MSI-X entry number for MSIX! %s\n",
1197 free_dev_irq_entries(adev
);
1198 adev
->irq_entries_nr
= entries_nr
;
1199 adev
->entry
= qemu_mallocz(entries_nr
* sizeof(*(adev
->entry
)));
1201 msix_entry
.assigned_dev_id
= msix_nr
.assigned_dev_id
;
1203 for (i
= 0; i
< entries_max_nr
; i
++) {
1204 if (entries_nr
>= msix_nr
.entry_nr
)
1206 memcpy(&msg_ctrl
, va
+ i
* 16 + 12, 4);
1207 memcpy(&msg_data
, va
+ i
* 16 + 8, 4);
1211 memcpy(&msg_addr
, va
+ i
* 16, 4);
1212 memcpy(&msg_upper_addr
, va
+ i
* 16 + 4, 4);
1214 r
= kvm_get_irq_route_gsi();
1218 adev
->entry
[entries_nr
].gsi
= r
;
1219 adev
->entry
[entries_nr
].type
= KVM_IRQ_ROUTING_MSI
;
1220 adev
->entry
[entries_nr
].flags
= 0;
1221 adev
->entry
[entries_nr
].u
.msi
.address_lo
= msg_addr
;
1222 adev
->entry
[entries_nr
].u
.msi
.address_hi
= msg_upper_addr
;
1223 adev
->entry
[entries_nr
].u
.msi
.data
= msg_data
;
1224 DEBUG("MSI-X data 0x%x, MSI-X addr_lo 0x%x\n!", msg_data
, msg_addr
);
1225 kvm_add_routing_entry(&adev
->entry
[entries_nr
]);
1227 msix_entry
.gsi
= adev
->entry
[entries_nr
].gsi
;
1228 msix_entry
.entry
= i
;
1229 r
= kvm_assign_set_msix_entry(kvm_state
, &msix_entry
);
1231 fprintf(stderr
, "fail to set MSI-X entry! %s\n", strerror(-r
));
1234 DEBUG("MSI-X entry gsi 0x%x, entry %d\n!",
1235 msix_entry
.gsi
, msix_entry
.entry
);
1239 if (r
== 0 && kvm_commit_irq_routes() < 0) {
1240 perror("assigned_dev_update_msix_mmio: kvm_commit_irq_routes");
1247 static void assigned_dev_update_msix(PCIDevice
*pci_dev
, unsigned int ctrl_pos
)
1249 struct kvm_assigned_irq assigned_irq_data
;
1250 AssignedDevice
*assigned_dev
= DO_UPCAST(AssignedDevice
, dev
, pci_dev
);
1251 uint16_t *ctrl_word
= (uint16_t *)(pci_dev
->config
+ ctrl_pos
);
1254 memset(&assigned_irq_data
, 0, sizeof assigned_irq_data
);
1255 assigned_irq_data
.assigned_dev_id
=
1256 calc_assigned_dev_id(assigned_dev
->h_segnr
, assigned_dev
->h_busnr
,
1257 (uint8_t)assigned_dev
->h_devfn
);
1259 /* Some guests gratuitously disable MSIX even if they're not using it,
1260 * try to catch this by only deassigning irqs if the guest is using
1261 * MSIX or intends to start. */
1262 if ((assigned_dev
->irq_requested_type
& KVM_DEV_IRQ_GUEST_MSIX
) ||
1263 (*ctrl_word
& PCI_MSIX_ENABLE
)) {
1265 assigned_irq_data
.flags
= assigned_dev
->irq_requested_type
;
1266 free_dev_irq_entries(assigned_dev
);
1267 r
= kvm_deassign_irq(kvm_state
, &assigned_irq_data
);
1268 /* -ENXIO means no assigned irq */
1269 if (r
&& r
!= -ENXIO
)
1270 perror("assigned_dev_update_msix: deassign irq");
1272 assigned_dev
->irq_requested_type
= 0;
1275 if (*ctrl_word
& PCI_MSIX_ENABLE
) {
1276 assigned_irq_data
.flags
= KVM_DEV_IRQ_HOST_MSIX
|
1277 KVM_DEV_IRQ_GUEST_MSIX
;
1279 if (assigned_dev_update_msix_mmio(pci_dev
) < 0) {
1280 perror("assigned_dev_update_msix_mmio");
1283 if (kvm_assign_irq(kvm_state
, &assigned_irq_data
) < 0) {
1284 perror("assigned_dev_enable_msix: assign irq");
1287 assigned_dev
->girq
= -1;
1288 assigned_dev
->irq_requested_type
= assigned_irq_data
.flags
;
1290 assign_irq(assigned_dev
);
1296 /* There can be multiple VNDR capabilities per device, we need to find the
1297 * one that starts closet to the given address without going over. */
1298 static uint8_t find_vndr_start(PCIDevice
*pci_dev
, uint32_t address
)
1303 (pos
= pci_find_cap_offset(pci_dev
, PCI_CAP_ID_VNDR
, pos
));
1304 pos
+= PCI_CAP_LIST_NEXT
) {
1305 if (pos
<= address
) {
1306 cap
= MAX(pos
, cap
);
1312 static uint32_t assigned_device_pci_cap_read_config(PCIDevice
*pci_dev
,
1313 uint32_t address
, int len
)
1315 uint8_t cap
, cap_id
= pci_dev
->config_map
[address
];
1320 case PCI_CAP_ID_VPD
:
1321 cap
= pci_find_capability(pci_dev
, cap_id
);
1322 val
= assigned_dev_pci_read(pci_dev
, address
, len
);
1323 return merge_bits(val
, pci_get_long(pci_dev
->config
+ address
),
1324 address
, len
, cap
+ PCI_CAP_LIST_NEXT
, 0xff);
1326 case PCI_CAP_ID_VNDR
:
1327 cap
= find_vndr_start(pci_dev
, address
);
1328 val
= assigned_dev_pci_read(pci_dev
, address
, len
);
1329 return merge_bits(val
, pci_get_long(pci_dev
->config
+ address
),
1330 address
, len
, cap
+ PCI_CAP_LIST_NEXT
, 0xff);
1333 return pci_default_read_config(pci_dev
, address
, len
);
1336 static void assigned_device_pci_cap_write_config(PCIDevice
*pci_dev
,
1338 uint32_t val
, int len
)
1340 uint8_t cap_id
= pci_dev
->config_map
[address
];
1342 pci_default_write_config(pci_dev
, address
, val
, len
);
1344 #ifdef KVM_CAP_IRQ_ROUTING
1345 case PCI_CAP_ID_MSI
:
1346 #ifdef KVM_CAP_DEVICE_MSI
1348 uint8_t cap
= pci_find_capability(pci_dev
, cap_id
);
1349 if (ranges_overlap(address
- cap
, len
, PCI_MSI_FLAGS
, 1)) {
1350 assigned_dev_update_msi(pci_dev
, cap
+ PCI_MSI_FLAGS
);
1356 case PCI_CAP_ID_MSIX
:
1357 #ifdef KVM_CAP_DEVICE_MSIX
1359 uint8_t cap
= pci_find_capability(pci_dev
, cap_id
);
1360 if (ranges_overlap(address
- cap
, len
, PCI_MSIX_FLAGS
+ 1, 1)) {
1361 assigned_dev_update_msix(pci_dev
, cap
+ PCI_MSIX_FLAGS
);
1368 case PCI_CAP_ID_VPD
:
1369 case PCI_CAP_ID_VNDR
:
1370 assigned_dev_pci_write(pci_dev
, address
, val
, len
);
1375 static int assigned_device_pci_cap_init(PCIDevice
*pci_dev
)
1377 AssignedDevice
*dev
= DO_UPCAST(AssignedDevice
, dev
, pci_dev
);
1378 PCIRegion
*pci_region
= dev
->real_device
.regions
;
1381 /* Clear initial capabilities pointer and status copied from hw */
1382 pci_set_byte(pci_dev
->config
+ PCI_CAPABILITY_LIST
, 0);
1383 pci_set_word(pci_dev
->config
+ PCI_STATUS
,
1384 pci_get_word(pci_dev
->config
+ PCI_STATUS
) &
1385 ~PCI_STATUS_CAP_LIST
);
1387 #ifdef KVM_CAP_IRQ_ROUTING
1388 #ifdef KVM_CAP_DEVICE_MSI
1389 /* Expose MSI capability
1390 * MSI capability is the 1st capability in capability config */
1391 if ((pos
= pci_find_cap_offset(pci_dev
, PCI_CAP_ID_MSI
, 0))) {
1392 dev
->cap
.available
|= ASSIGNED_DEVICE_CAP_MSI
;
1393 /* Only 32-bit/no-mask currently supported */
1394 if ((ret
= pci_add_capability(pci_dev
, PCI_CAP_ID_MSI
, pos
, 10)) < 0) {
1398 pci_set_word(pci_dev
->config
+ pos
+ PCI_MSI_FLAGS
,
1399 pci_get_word(pci_dev
->config
+ pos
+ PCI_MSI_FLAGS
) &
1400 PCI_MSI_FLAGS_QMASK
);
1401 pci_set_long(pci_dev
->config
+ pos
+ PCI_MSI_ADDRESS_LO
, 0);
1402 pci_set_word(pci_dev
->config
+ pos
+ PCI_MSI_DATA_32
, 0);
1404 /* Set writable fields */
1405 pci_set_word(pci_dev
->wmask
+ pos
+ PCI_MSI_FLAGS
,
1406 PCI_MSI_FLAGS_QSIZE
| PCI_MSI_FLAGS_ENABLE
);
1407 pci_set_long(pci_dev
->wmask
+ pos
+ PCI_MSI_ADDRESS_LO
, 0xfffffffc);
1408 pci_set_word(pci_dev
->wmask
+ pos
+ PCI_MSI_DATA_32
, 0xffff);
1411 #ifdef KVM_CAP_DEVICE_MSIX
1412 /* Expose MSI-X capability */
1413 if ((pos
= pci_find_cap_offset(pci_dev
, PCI_CAP_ID_MSIX
, 0))) {
1415 uint32_t msix_table_entry
;
1417 dev
->cap
.available
|= ASSIGNED_DEVICE_CAP_MSIX
;
1418 if ((ret
= pci_add_capability(pci_dev
, PCI_CAP_ID_MSIX
, pos
, 12)) < 0) {
1422 pci_set_word(pci_dev
->config
+ pos
+ PCI_MSIX_FLAGS
,
1423 pci_get_word(pci_dev
->config
+ pos
+ PCI_MSIX_FLAGS
) &
1426 /* Only enable and function mask bits are writable */
1427 pci_set_word(pci_dev
->wmask
+ pos
+ PCI_MSIX_FLAGS
,
1428 PCI_MSIX_FLAGS_ENABLE
| PCI_MSIX_FLAGS_MASKALL
);
1430 msix_table_entry
= pci_get_long(pci_dev
->config
+ pos
+ PCI_MSIX_TABLE
);
1431 bar_nr
= msix_table_entry
& PCI_MSIX_BIR
;
1432 msix_table_entry
&= ~PCI_MSIX_BIR
;
1433 dev
->msix_table_addr
= pci_region
[bar_nr
].base_addr
+ msix_table_entry
;
1438 /* Minimal PM support, nothing writable, device appears to NAK changes */
1439 if ((pos
= pci_find_cap_offset(pci_dev
, PCI_CAP_ID_PM
, 0))) {
1441 if ((ret
= pci_add_capability(pci_dev
, PCI_CAP_ID_PM
, pos
,
1442 PCI_PM_SIZEOF
)) < 0) {
1446 pmc
= pci_get_word(pci_dev
->config
+ pos
+ PCI_CAP_FLAGS
);
1447 pmc
&= (PCI_PM_CAP_VER_MASK
| PCI_PM_CAP_DSI
);
1448 pci_set_word(pci_dev
->config
+ pos
+ PCI_CAP_FLAGS
, pmc
);
1450 /* assign_device will bring the device up to D0, so we don't need
1451 * to worry about doing that ourselves here. */
1452 pci_set_word(pci_dev
->config
+ pos
+ PCI_PM_CTRL
,
1453 PCI_PM_CTRL_NO_SOFT_RESET
);
1455 pci_set_byte(pci_dev
->config
+ pos
+ PCI_PM_PPB_EXTENSIONS
, 0);
1456 pci_set_byte(pci_dev
->config
+ pos
+ PCI_PM_DATA_REGISTER
, 0);
1459 if ((pos
= pci_find_cap_offset(pci_dev
, PCI_CAP_ID_EXP
, 0))) {
1461 uint16_t type
, devctl
, lnkcap
, lnksta
;
1463 int size
= 0x3c; /* version 2 size */
1465 version
= pci_get_byte(pci_dev
->config
+ pos
+ PCI_EXP_FLAGS
);
1466 version
&= PCI_EXP_FLAGS_VERS
;
1469 } else if (version
> 2) {
1470 fprintf(stderr
, "Unsupported PCI express capability version %d\n",
1475 if ((ret
= pci_add_capability(pci_dev
, PCI_CAP_ID_EXP
,
1480 type
= pci_get_word(pci_dev
->config
+ pos
+ PCI_EXP_FLAGS
);
1481 type
= (type
& PCI_EXP_FLAGS_TYPE
) >> 8;
1482 if (type
!= PCI_EXP_TYPE_ENDPOINT
&&
1483 type
!= PCI_EXP_TYPE_LEG_END
&& type
!= PCI_EXP_TYPE_RC_END
) {
1485 "Device assignment only supports endpoint assignment, "
1486 "device type %d\n", type
);
1490 /* capabilities, pass existing read-only copy
1491 * PCI_EXP_FLAGS_IRQ: updated by hardware, should be direct read */
1493 /* device capabilities: hide FLR */
1494 devcap
= pci_get_long(pci_dev
->config
+ pos
+ PCI_EXP_DEVCAP
);
1495 devcap
&= ~PCI_EXP_DEVCAP_FLR
;
1496 pci_set_long(pci_dev
->config
+ pos
+ PCI_EXP_DEVCAP
, devcap
);
1498 /* device control: clear all error reporting enable bits, leaving
1499 * leaving only a few host values. Note, these are
1500 * all writable, but not passed to hw.
1502 devctl
= pci_get_word(pci_dev
->config
+ pos
+ PCI_EXP_DEVCTL
);
1503 devctl
= (devctl
& (PCI_EXP_DEVCTL_READRQ
| PCI_EXP_DEVCTL_PAYLOAD
)) |
1504 PCI_EXP_DEVCTL_RELAX_EN
| PCI_EXP_DEVCTL_NOSNOOP_EN
;
1505 pci_set_word(pci_dev
->config
+ pos
+ PCI_EXP_DEVCTL
, devctl
);
1506 devctl
= PCI_EXP_DEVCTL_BCR_FLR
| PCI_EXP_DEVCTL_AUX_PME
;
1507 pci_set_word(pci_dev
->wmask
+ pos
+ PCI_EXP_DEVCTL
, ~devctl
);
1509 /* Clear device status */
1510 pci_set_word(pci_dev
->config
+ pos
+ PCI_EXP_DEVSTA
, 0);
1512 /* Link capabilities, expose links and latencues, clear reporting */
1513 lnkcap
= pci_get_word(pci_dev
->config
+ pos
+ PCI_EXP_LNKCAP
);
1514 lnkcap
&= (PCI_EXP_LNKCAP_SLS
| PCI_EXP_LNKCAP_MLW
|
1515 PCI_EXP_LNKCAP_ASPMS
| PCI_EXP_LNKCAP_L0SEL
|
1516 PCI_EXP_LNKCAP_L1EL
);
1517 pci_set_word(pci_dev
->config
+ pos
+ PCI_EXP_LNKCAP
, lnkcap
);
1518 pci_set_word(pci_dev
->wmask
+ pos
+ PCI_EXP_LNKCAP
,
1519 PCI_EXP_LNKCTL_ASPMC
| PCI_EXP_LNKCTL_RCB
|
1520 PCI_EXP_LNKCTL_CCC
| PCI_EXP_LNKCTL_ES
|
1521 PCI_EXP_LNKCTL_CLKREQ_EN
| PCI_EXP_LNKCTL_HAWD
);
1523 /* Link control, pass existing read-only copy. Should be writable? */
1525 /* Link status, only expose current speed and width */
1526 lnksta
= pci_get_word(pci_dev
->config
+ pos
+ PCI_EXP_LNKSTA
);
1527 lnksta
&= (PCI_EXP_LNKSTA_CLS
| PCI_EXP_LNKSTA_NLW
);
1528 pci_set_word(pci_dev
->config
+ pos
+ PCI_EXP_LNKSTA
, lnksta
);
1531 /* Slot capabilities, control, status - not needed for endpoints */
1532 pci_set_long(pci_dev
->config
+ pos
+ PCI_EXP_SLTCAP
, 0);
1533 pci_set_word(pci_dev
->config
+ pos
+ PCI_EXP_SLTCTL
, 0);
1534 pci_set_word(pci_dev
->config
+ pos
+ PCI_EXP_SLTSTA
, 0);
1536 /* Root control, capabilities, status - not needed for endpoints */
1537 pci_set_word(pci_dev
->config
+ pos
+ PCI_EXP_RTCTL
, 0);
1538 pci_set_word(pci_dev
->config
+ pos
+ PCI_EXP_RTCAP
, 0);
1539 pci_set_long(pci_dev
->config
+ pos
+ PCI_EXP_RTSTA
, 0);
1541 /* Device capabilities/control 2, pass existing read-only copy */
1542 /* Link control 2, pass existing read-only copy */
1546 if ((pos
= pci_find_cap_offset(pci_dev
, PCI_CAP_ID_PCIX
, 0))) {
1550 /* Only expose the minimum, 8 byte capability */
1551 if ((ret
= pci_add_capability(pci_dev
, PCI_CAP_ID_PCIX
, pos
, 8)) < 0) {
1555 /* Command register, clear upper bits, including extended modes */
1556 cmd
= pci_get_word(pci_dev
->config
+ pos
+ PCI_X_CMD
);
1557 cmd
&= (PCI_X_CMD_DPERR_E
| PCI_X_CMD_ERO
| PCI_X_CMD_MAX_READ
|
1558 PCI_X_CMD_MAX_SPLIT
);
1559 pci_set_word(pci_dev
->config
+ pos
+ PCI_X_CMD
, cmd
);
1561 /* Status register, update with emulated PCI bus location, clear
1562 * error bits, leave the rest. */
1563 status
= pci_get_long(pci_dev
->config
+ pos
+ PCI_X_STATUS
);
1564 status
&= ~(PCI_X_STATUS_BUS
| PCI_X_STATUS_DEVFN
);
1565 status
|= (pci_bus_num(pci_dev
->bus
) << 8) | pci_dev
->devfn
;
1566 status
&= ~(PCI_X_STATUS_SPL_DISC
| PCI_X_STATUS_UNX_SPL
|
1567 PCI_X_STATUS_SPL_ERR
);
1568 pci_set_long(pci_dev
->config
+ pos
+ PCI_X_STATUS
, status
);
1571 if ((pos
= pci_find_cap_offset(pci_dev
, PCI_CAP_ID_VPD
, 0))) {
1572 /* Direct R/W passthrough */
1573 if ((ret
= pci_add_capability(pci_dev
, PCI_CAP_ID_VPD
, pos
, 8)) < 0) {
1578 /* Devices can have multiple vendor capabilities, get them all */
1579 for (pos
= 0; (pos
= pci_find_cap_offset(pci_dev
, PCI_CAP_ID_VNDR
, pos
));
1580 pos
+= PCI_CAP_LIST_NEXT
) {
1581 uint8_t len
= pci_get_byte(pci_dev
->config
+ pos
+ PCI_CAP_FLAGS
);
1582 /* Direct R/W passthrough */
1583 if ((ret
= pci_add_capability(pci_dev
, PCI_CAP_ID_VNDR
,
1592 static uint32_t msix_mmio_readl(void *opaque
, target_phys_addr_t addr
)
1594 AssignedDevice
*adev
= opaque
;
1595 unsigned int offset
= addr
& 0xfff;
1596 void *page
= adev
->msix_table_page
;
1599 memcpy(&val
, (void *)((char *)page
+ offset
), 4);
1604 static uint32_t msix_mmio_readb(void *opaque
, target_phys_addr_t addr
)
1606 return ((msix_mmio_readl(opaque
, addr
& ~3)) >>
1607 (8 * (addr
& 3))) & 0xff;
1610 static uint32_t msix_mmio_readw(void *opaque
, target_phys_addr_t addr
)
1612 return ((msix_mmio_readl(opaque
, addr
& ~3)) >>
1613 (8 * (addr
& 3))) & 0xffff;
1616 static void msix_mmio_writel(void *opaque
,
1617 target_phys_addr_t addr
, uint32_t val
)
1619 AssignedDevice
*adev
= opaque
;
1620 unsigned int offset
= addr
& 0xfff;
1621 void *page
= adev
->msix_table_page
;
1623 DEBUG("write to MSI-X entry table mmio offset 0x%lx, val 0x%x\n",
1625 memcpy((void *)((char *)page
+ offset
), &val
, 4);
1628 static void msix_mmio_writew(void *opaque
,
1629 target_phys_addr_t addr
, uint32_t val
)
1631 msix_mmio_writel(opaque
, addr
& ~3,
1632 (val
& 0xffff) << (8*(addr
& 3)));
1635 static void msix_mmio_writeb(void *opaque
,
1636 target_phys_addr_t addr
, uint32_t val
)
1638 msix_mmio_writel(opaque
, addr
& ~3,
1639 (val
& 0xff) << (8*(addr
& 3)));
1642 static CPUWriteMemoryFunc
*msix_mmio_write
[] = {
1643 msix_mmio_writeb
, msix_mmio_writew
, msix_mmio_writel
1646 static CPUReadMemoryFunc
*msix_mmio_read
[] = {
1647 msix_mmio_readb
, msix_mmio_readw
, msix_mmio_readl
1650 static int assigned_dev_register_msix_mmio(AssignedDevice
*dev
)
1652 dev
->msix_table_page
= mmap(NULL
, 0x1000,
1653 PROT_READ
|PROT_WRITE
,
1654 MAP_ANONYMOUS
|MAP_PRIVATE
, 0, 0);
1655 if (dev
->msix_table_page
== MAP_FAILED
) {
1656 fprintf(stderr
, "fail allocate msix_table_page! %s\n",
1660 memset(dev
->msix_table_page
, 0, 0x1000);
1661 dev
->mmio_index
= cpu_register_io_memory(
1662 msix_mmio_read
, msix_mmio_write
, dev
,
1663 DEVICE_NATIVE_ENDIAN
);
1667 static void assigned_dev_unregister_msix_mmio(AssignedDevice
*dev
)
1669 if (!dev
->msix_table_page
)
1672 cpu_unregister_io_memory(dev
->mmio_index
);
1673 dev
->mmio_index
= 0;
1675 if (munmap(dev
->msix_table_page
, 0x1000) == -1) {
1676 fprintf(stderr
, "error unmapping msix_table_page! %s\n",
1679 dev
->msix_table_page
= NULL
;
1682 static const VMStateDescription vmstate_assigned_device
= {
1683 .name
= "pci-assign",
1684 .fields
= (VMStateField
[]) {
1685 VMSTATE_END_OF_LIST()
1689 static void reset_assigned_device(DeviceState
*dev
)
1691 PCIDevice
*pci_dev
= DO_UPCAST(PCIDevice
, qdev
, dev
);
1692 AssignedDevice
*adev
= DO_UPCAST(AssignedDevice
, dev
, pci_dev
);
1693 char reset_file
[64];
1694 const char reset
[] = "1";
1697 snprintf(reset_file
, sizeof(reset_file
),
1698 "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/reset",
1699 adev
->host
.seg
, adev
->host
.bus
, adev
->host
.dev
, adev
->host
.func
);
1702 * Issue a device reset via pci-sysfs. Note that we use write(2) here
1703 * and ignore the return value because some kernels have a bug that
1704 * returns 0 rather than bytes written on success, sending us into an
1705 * infinite retry loop using other write mechanisms.
1707 fd
= open(reset_file
, O_WRONLY
);
1709 ret
= write(fd
, reset
, strlen(reset
));
1714 * When a 0 is written to the command register, the device is logically
1715 * disconnected from the PCI bus. This avoids further DMA transfers.
1717 assigned_dev_pci_write_config(pci_dev
, PCI_COMMAND
, 0, 2);
1720 static int assigned_initfn(struct PCIDevice
*pci_dev
)
1722 AssignedDevice
*dev
= DO_UPCAST(AssignedDevice
, dev
, pci_dev
);
1723 uint8_t e_device
, e_intx
;
1726 if (!kvm_enabled()) {
1727 error_report("pci-assign: error: requires KVM support");
1731 if (!dev
->host
.seg
&& !dev
->host
.bus
&& !dev
->host
.dev
&& !dev
->host
.func
) {
1732 error_report("pci-assign: error: no host device specified");
1736 if (get_real_device(dev
, dev
->host
.seg
, dev
->host
.bus
,
1737 dev
->host
.dev
, dev
->host
.func
)) {
1738 error_report("pci-assign: Error: Couldn't get real device (%s)!",
1743 /* handle real device's MMIO/PIO BARs */
1744 if (assigned_dev_register_regions(dev
->real_device
.regions
,
1745 dev
->real_device
.region_number
,
1749 /* handle interrupt routing */
1750 e_device
= (dev
->dev
.devfn
>> 3) & 0x1f;
1751 e_intx
= dev
->dev
.config
[0x3d] - 1;
1752 dev
->intpin
= e_intx
;
1755 dev
->h_segnr
= dev
->host
.seg
;
1756 dev
->h_busnr
= dev
->host
.bus
;
1757 dev
->h_devfn
= PCI_DEVFN(dev
->host
.dev
, dev
->host
.func
);
1759 if (assigned_device_pci_cap_init(pci_dev
) < 0)
1762 /* assign device to guest */
1763 r
= assign_device(dev
);
1767 /* assign irq for the device */
1768 r
= assign_irq(dev
);
1772 /* intercept MSI-X entry page in the MMIO */
1773 if (dev
->cap
.available
& ASSIGNED_DEVICE_CAP_MSIX
)
1774 if (assigned_dev_register_msix_mmio(dev
))
1777 assigned_dev_load_option_rom(dev
);
1778 QLIST_INSERT_HEAD(&devs
, dev
, next
);
1780 add_boot_device_path(dev
->bootindex
, &pci_dev
->qdev
, NULL
);
1782 /* Register a vmsd so that we can mark it unmigratable. */
1783 vmstate_register(&dev
->dev
.qdev
, 0, &vmstate_assigned_device
, dev
);
1784 register_device_unmigratable(&dev
->dev
.qdev
,
1785 vmstate_assigned_device
.name
, dev
);
1790 deassign_device(dev
);
1792 free_assigned_device(dev
);
1796 static int assigned_exitfn(struct PCIDevice
*pci_dev
)
1798 AssignedDevice
*dev
= DO_UPCAST(AssignedDevice
, dev
, pci_dev
);
1800 vmstate_unregister(&dev
->dev
.qdev
, &vmstate_assigned_device
, dev
);
1801 QLIST_REMOVE(dev
, next
);
1802 deassign_device(dev
);
1803 free_assigned_device(dev
);
1807 static int parse_hostaddr(DeviceState
*dev
, Property
*prop
, const char *str
)
1809 PCIHostDevice
*ptr
= qdev_get_prop_ptr(dev
, prop
);
1812 rc
= pci_parse_host_devaddr(str
, &ptr
->seg
, &ptr
->bus
, &ptr
->dev
, &ptr
->func
);
1818 static int print_hostaddr(DeviceState
*dev
, Property
*prop
, char *dest
, size_t len
)
1820 PCIHostDevice
*ptr
= qdev_get_prop_ptr(dev
, prop
);
1822 return snprintf(dest
, len
, "%02x:%02x.%x", ptr
->bus
, ptr
->dev
, ptr
->func
);
1825 PropertyInfo qdev_prop_hostaddr
= {
1826 .name
= "pci-hostaddr",
1828 .size
= sizeof(PCIHostDevice
),
1829 .parse
= parse_hostaddr
,
1830 .print
= print_hostaddr
,
1833 static PCIDeviceInfo assign_info
= {
1834 .qdev
.name
= "pci-assign",
1835 .qdev
.desc
= "pass through host pci devices to the guest",
1836 .qdev
.size
= sizeof(AssignedDevice
),
1837 .qdev
.reset
= reset_assigned_device
,
1838 .init
= assigned_initfn
,
1839 .exit
= assigned_exitfn
,
1840 .config_read
= assigned_dev_pci_read_config
,
1841 .config_write
= assigned_dev_pci_write_config
,
1842 .qdev
.props
= (Property
[]) {
1843 DEFINE_PROP("host", AssignedDevice
, host
, qdev_prop_hostaddr
, PCIHostDevice
),
1844 DEFINE_PROP_BIT("iommu", AssignedDevice
, features
,
1845 ASSIGNED_DEVICE_USE_IOMMU_BIT
, true),
1846 DEFINE_PROP_BIT("prefer_msi", AssignedDevice
, features
,
1847 ASSIGNED_DEVICE_PREFER_MSI_BIT
, true),
1848 DEFINE_PROP_INT32("bootindex", AssignedDevice
, bootindex
, -1),
1849 DEFINE_PROP_STRING("configfd", AssignedDevice
, configfd_name
),
1850 DEFINE_PROP_END_OF_LIST(),
1854 static void assign_register_devices(void)
1856 pci_qdev_register(&assign_info
);
1859 device_init(assign_register_devices
)
1862 * Scan the assigned devices for the devices that have an option ROM, and then
1863 * load the corresponding ROM data to RAM. If an error occurs while loading an
1864 * option ROM, we just ignore that option ROM and continue with the next one.
1866 static void assigned_dev_load_option_rom(AssignedDevice
*dev
)
1868 char name
[32], rom_file
[64];
1874 /* If loading ROM from file, pci handles it */
1875 if (dev
->dev
.romfile
|| !dev
->dev
.rom_bar
)
1878 snprintf(rom_file
, sizeof(rom_file
),
1879 "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/rom",
1880 dev
->host
.seg
, dev
->host
.bus
, dev
->host
.dev
, dev
->host
.func
);
1882 if (stat(rom_file
, &st
)) {
1886 if (access(rom_file
, F_OK
)) {
1887 fprintf(stderr
, "pci-assign: Insufficient privileges for %s\n",
1892 /* Write "1" to the ROM file to enable it */
1893 fp
= fopen(rom_file
, "r+");
1898 if (fwrite(&val
, 1, 1, fp
) != 1) {
1901 fseek(fp
, 0, SEEK_SET
);
1903 snprintf(name
, sizeof(name
), "%s.rom", dev
->dev
.qdev
.info
->name
);
1904 dev
->dev
.rom_offset
= qemu_ram_alloc(&dev
->dev
.qdev
, name
, st
.st_size
);
1905 ptr
= qemu_get_ram_ptr(dev
->dev
.rom_offset
);
1906 memset(ptr
, 0xff, st
.st_size
);
1908 if (!fread(ptr
, 1, st
.st_size
, fp
)) {
1909 fprintf(stderr
, "pci-assign: Cannot read from host %s\n"
1910 "\tDevice option ROM contents are probably invalid "
1911 "(check dmesg).\n\tSkip option ROM probe with rombar=0, "
1912 "or load from file with romfile=\n", rom_file
);
1913 qemu_ram_free(dev
->dev
.rom_offset
);
1914 dev
->dev
.rom_offset
= 0;
1918 pci_register_bar(&dev
->dev
, PCI_ROM_SLOT
,
1919 st
.st_size
, 0, pci_map_option_rom
);
1921 /* Write "0" to disable ROM */
1922 fseek(fp
, 0, SEEK_SET
);
1924 if (!fwrite(&val
, 1, 1, fp
)) {
1925 DEBUG("%s\n", "Failed to disable pci-sysfs rom file");