qapi/parser: enable pylint checks
[qemu/armbru.git] / hw / vfio / common.c
bloba784b219e6d49218bca924edbae06e9c8fc91e48
1 /*
2 * generic functions used by VFIO devices
4 * Copyright Red Hat, Inc. 2012
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 #ifdef CONFIG_KVM
24 #include <linux/kvm.h>
25 #endif
26 #include <linux/vfio.h>
28 #include "hw/vfio/vfio-common.h"
29 #include "hw/vfio/vfio.h"
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
32 #include "exec/ram_addr.h"
33 #include "hw/hw.h"
34 #include "qemu/error-report.h"
35 #include "qemu/main-loop.h"
36 #include "qemu/range.h"
37 #include "sysemu/kvm.h"
38 #include "sysemu/reset.h"
39 #include "sysemu/runstate.h"
40 #include "trace.h"
41 #include "qapi/error.h"
42 #include "migration/migration.h"
44 VFIOGroupList vfio_group_list =
45 QLIST_HEAD_INITIALIZER(vfio_group_list);
46 static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces =
47 QLIST_HEAD_INITIALIZER(vfio_address_spaces);
49 #ifdef CONFIG_KVM
51 * We have a single VFIO pseudo device per KVM VM. Once created it lives
52 * for the life of the VM. Closing the file descriptor only drops our
53 * reference to it and the device's reference to kvm. Therefore once
54 * initialized, this file descriptor is only released on QEMU exit and
55 * we'll re-use it should another vfio device be attached before then.
57 static int vfio_kvm_device_fd = -1;
58 #endif
61 * Common VFIO interrupt disable
63 void vfio_disable_irqindex(VFIODevice *vbasedev, int index)
65 struct vfio_irq_set irq_set = {
66 .argsz = sizeof(irq_set),
67 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
68 .index = index,
69 .start = 0,
70 .count = 0,
73 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
76 void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index)
78 struct vfio_irq_set irq_set = {
79 .argsz = sizeof(irq_set),
80 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
81 .index = index,
82 .start = 0,
83 .count = 1,
86 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
89 void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index)
91 struct vfio_irq_set irq_set = {
92 .argsz = sizeof(irq_set),
93 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
94 .index = index,
95 .start = 0,
96 .count = 1,
99 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
102 static inline const char *action_to_str(int action)
104 switch (action) {
105 case VFIO_IRQ_SET_ACTION_MASK:
106 return "MASK";
107 case VFIO_IRQ_SET_ACTION_UNMASK:
108 return "UNMASK";
109 case VFIO_IRQ_SET_ACTION_TRIGGER:
110 return "TRIGGER";
111 default:
112 return "UNKNOWN ACTION";
116 static const char *index_to_str(VFIODevice *vbasedev, int index)
118 if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
119 return NULL;
122 switch (index) {
123 case VFIO_PCI_INTX_IRQ_INDEX:
124 return "INTX";
125 case VFIO_PCI_MSI_IRQ_INDEX:
126 return "MSI";
127 case VFIO_PCI_MSIX_IRQ_INDEX:
128 return "MSIX";
129 case VFIO_PCI_ERR_IRQ_INDEX:
130 return "ERR";
131 case VFIO_PCI_REQ_IRQ_INDEX:
132 return "REQ";
133 default:
134 return NULL;
138 static int vfio_ram_block_discard_disable(VFIOContainer *container, bool state)
140 switch (container->iommu_type) {
141 case VFIO_TYPE1v2_IOMMU:
142 case VFIO_TYPE1_IOMMU:
144 * We support coordinated discarding of RAM via the RamDiscardManager.
146 return ram_block_uncoordinated_discard_disable(state);
147 default:
149 * VFIO_SPAPR_TCE_IOMMU most probably works just fine with
150 * RamDiscardManager, however, it is completely untested.
152 * VFIO_SPAPR_TCE_v2_IOMMU with "DMA memory preregistering" does
153 * completely the opposite of managing mapping/pinning dynamically as
154 * required by RamDiscardManager. We would have to special-case sections
155 * with a RamDiscardManager.
157 return ram_block_discard_disable(state);
161 int vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex,
162 int action, int fd, Error **errp)
164 struct vfio_irq_set *irq_set;
165 int argsz, ret = 0;
166 const char *name;
167 int32_t *pfd;
169 argsz = sizeof(*irq_set) + sizeof(*pfd);
171 irq_set = g_malloc0(argsz);
172 irq_set->argsz = argsz;
173 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action;
174 irq_set->index = index;
175 irq_set->start = subindex;
176 irq_set->count = 1;
177 pfd = (int32_t *)&irq_set->data;
178 *pfd = fd;
180 if (ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
181 ret = -errno;
183 g_free(irq_set);
185 if (!ret) {
186 return 0;
189 error_setg_errno(errp, -ret, "VFIO_DEVICE_SET_IRQS failure");
191 name = index_to_str(vbasedev, index);
192 if (name) {
193 error_prepend(errp, "%s-%d: ", name, subindex);
194 } else {
195 error_prepend(errp, "index %d-%d: ", index, subindex);
197 error_prepend(errp,
198 "Failed to %s %s eventfd signaling for interrupt ",
199 fd < 0 ? "tear down" : "set up", action_to_str(action));
200 return ret;
204 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
206 void vfio_region_write(void *opaque, hwaddr addr,
207 uint64_t data, unsigned size)
209 VFIORegion *region = opaque;
210 VFIODevice *vbasedev = region->vbasedev;
211 union {
212 uint8_t byte;
213 uint16_t word;
214 uint32_t dword;
215 uint64_t qword;
216 } buf;
218 switch (size) {
219 case 1:
220 buf.byte = data;
221 break;
222 case 2:
223 buf.word = cpu_to_le16(data);
224 break;
225 case 4:
226 buf.dword = cpu_to_le32(data);
227 break;
228 case 8:
229 buf.qword = cpu_to_le64(data);
230 break;
231 default:
232 hw_error("vfio: unsupported write size, %u bytes", size);
233 break;
236 if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
237 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
238 ",%d) failed: %m",
239 __func__, vbasedev->name, region->nr,
240 addr, data, size);
243 trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
246 * A read or write to a BAR always signals an INTx EOI. This will
247 * do nothing if not pending (including not in INTx mode). We assume
248 * that a BAR access is in response to an interrupt and that BAR
249 * accesses will service the interrupt. Unfortunately, we don't know
250 * which access will service the interrupt, so we're potentially
251 * getting quite a few host interrupts per guest interrupt.
253 vbasedev->ops->vfio_eoi(vbasedev);
256 uint64_t vfio_region_read(void *opaque,
257 hwaddr addr, unsigned size)
259 VFIORegion *region = opaque;
260 VFIODevice *vbasedev = region->vbasedev;
261 union {
262 uint8_t byte;
263 uint16_t word;
264 uint32_t dword;
265 uint64_t qword;
266 } buf;
267 uint64_t data = 0;
269 if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
270 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m",
271 __func__, vbasedev->name, region->nr,
272 addr, size);
273 return (uint64_t)-1;
275 switch (size) {
276 case 1:
277 data = buf.byte;
278 break;
279 case 2:
280 data = le16_to_cpu(buf.word);
281 break;
282 case 4:
283 data = le32_to_cpu(buf.dword);
284 break;
285 case 8:
286 data = le64_to_cpu(buf.qword);
287 break;
288 default:
289 hw_error("vfio: unsupported read size, %u bytes", size);
290 break;
293 trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
295 /* Same as write above */
296 vbasedev->ops->vfio_eoi(vbasedev);
298 return data;
301 const MemoryRegionOps vfio_region_ops = {
302 .read = vfio_region_read,
303 .write = vfio_region_write,
304 .endianness = DEVICE_LITTLE_ENDIAN,
305 .valid = {
306 .min_access_size = 1,
307 .max_access_size = 8,
309 .impl = {
310 .min_access_size = 1,
311 .max_access_size = 8,
316 * Device state interfaces
319 bool vfio_mig_active(void)
321 VFIOGroup *group;
322 VFIODevice *vbasedev;
324 if (QLIST_EMPTY(&vfio_group_list)) {
325 return false;
328 QLIST_FOREACH(group, &vfio_group_list, next) {
329 QLIST_FOREACH(vbasedev, &group->device_list, next) {
330 if (vbasedev->migration_blocker) {
331 return false;
335 return true;
338 static bool vfio_devices_all_dirty_tracking(VFIOContainer *container)
340 VFIOGroup *group;
341 VFIODevice *vbasedev;
342 MigrationState *ms = migrate_get_current();
344 if (!migration_is_setup_or_active(ms->state)) {
345 return false;
348 QLIST_FOREACH(group, &container->group_list, container_next) {
349 QLIST_FOREACH(vbasedev, &group->device_list, next) {
350 VFIOMigration *migration = vbasedev->migration;
352 if (!migration) {
353 return false;
356 if ((vbasedev->pre_copy_dirty_page_tracking == ON_OFF_AUTO_OFF)
357 && (migration->device_state & VFIO_DEVICE_STATE_RUNNING)) {
358 return false;
362 return true;
365 static bool vfio_devices_all_running_and_saving(VFIOContainer *container)
367 VFIOGroup *group;
368 VFIODevice *vbasedev;
369 MigrationState *ms = migrate_get_current();
371 if (!migration_is_setup_or_active(ms->state)) {
372 return false;
375 QLIST_FOREACH(group, &container->group_list, container_next) {
376 QLIST_FOREACH(vbasedev, &group->device_list, next) {
377 VFIOMigration *migration = vbasedev->migration;
379 if (!migration) {
380 return false;
383 if ((migration->device_state & VFIO_DEVICE_STATE_SAVING) &&
384 (migration->device_state & VFIO_DEVICE_STATE_RUNNING)) {
385 continue;
386 } else {
387 return false;
391 return true;
394 static int vfio_dma_unmap_bitmap(VFIOContainer *container,
395 hwaddr iova, ram_addr_t size,
396 IOMMUTLBEntry *iotlb)
398 struct vfio_iommu_type1_dma_unmap *unmap;
399 struct vfio_bitmap *bitmap;
400 uint64_t pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size;
401 int ret;
403 unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap));
405 unmap->argsz = sizeof(*unmap) + sizeof(*bitmap);
406 unmap->iova = iova;
407 unmap->size = size;
408 unmap->flags |= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP;
409 bitmap = (struct vfio_bitmap *)&unmap->data;
412 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
413 * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize
414 * to qemu_real_host_page_size.
417 bitmap->pgsize = qemu_real_host_page_size;
418 bitmap->size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) /
419 BITS_PER_BYTE;
421 if (bitmap->size > container->max_dirty_bitmap_size) {
422 error_report("UNMAP: Size of bitmap too big 0x%"PRIx64,
423 (uint64_t)bitmap->size);
424 ret = -E2BIG;
425 goto unmap_exit;
428 bitmap->data = g_try_malloc0(bitmap->size);
429 if (!bitmap->data) {
430 ret = -ENOMEM;
431 goto unmap_exit;
434 ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap);
435 if (!ret) {
436 cpu_physical_memory_set_dirty_lebitmap((unsigned long *)bitmap->data,
437 iotlb->translated_addr, pages);
438 } else {
439 error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m");
442 g_free(bitmap->data);
443 unmap_exit:
444 g_free(unmap);
445 return ret;
449 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
451 static int vfio_dma_unmap(VFIOContainer *container,
452 hwaddr iova, ram_addr_t size,
453 IOMMUTLBEntry *iotlb)
455 struct vfio_iommu_type1_dma_unmap unmap = {
456 .argsz = sizeof(unmap),
457 .flags = 0,
458 .iova = iova,
459 .size = size,
462 if (iotlb && container->dirty_pages_supported &&
463 vfio_devices_all_running_and_saving(container)) {
464 return vfio_dma_unmap_bitmap(container, iova, size, iotlb);
467 while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
469 * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c
470 * v4.15) where an overflow in its wrap-around check prevents us from
471 * unmapping the last page of the address space. Test for the error
472 * condition and re-try the unmap excluding the last page. The
473 * expectation is that we've never mapped the last page anyway and this
474 * unmap request comes via vIOMMU support which also makes it unlikely
475 * that this page is used. This bug was introduced well after type1 v2
476 * support was introduced, so we shouldn't need to test for v1. A fix
477 * is queued for kernel v5.0 so this workaround can be removed once
478 * affected kernels are sufficiently deprecated.
480 if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) &&
481 container->iommu_type == VFIO_TYPE1v2_IOMMU) {
482 trace_vfio_dma_unmap_overflow_workaround();
483 unmap.size -= 1ULL << ctz64(container->pgsizes);
484 continue;
486 error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno));
487 return -errno;
490 return 0;
493 static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
494 ram_addr_t size, void *vaddr, bool readonly)
496 struct vfio_iommu_type1_dma_map map = {
497 .argsz = sizeof(map),
498 .flags = VFIO_DMA_MAP_FLAG_READ,
499 .vaddr = (__u64)(uintptr_t)vaddr,
500 .iova = iova,
501 .size = size,
504 if (!readonly) {
505 map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
509 * Try the mapping, if it fails with EBUSY, unmap the region and try
510 * again. This shouldn't be necessary, but we sometimes see it in
511 * the VGA ROM space.
513 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
514 (errno == EBUSY && vfio_dma_unmap(container, iova, size, NULL) == 0 &&
515 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
516 return 0;
519 error_report("VFIO_MAP_DMA failed: %s", strerror(errno));
520 return -errno;
523 static void vfio_host_win_add(VFIOContainer *container,
524 hwaddr min_iova, hwaddr max_iova,
525 uint64_t iova_pgsizes)
527 VFIOHostDMAWindow *hostwin;
529 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
530 if (ranges_overlap(hostwin->min_iova,
531 hostwin->max_iova - hostwin->min_iova + 1,
532 min_iova,
533 max_iova - min_iova + 1)) {
534 hw_error("%s: Overlapped IOMMU are not enabled", __func__);
538 hostwin = g_malloc0(sizeof(*hostwin));
540 hostwin->min_iova = min_iova;
541 hostwin->max_iova = max_iova;
542 hostwin->iova_pgsizes = iova_pgsizes;
543 QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next);
546 static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova,
547 hwaddr max_iova)
549 VFIOHostDMAWindow *hostwin;
551 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
552 if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) {
553 QLIST_REMOVE(hostwin, hostwin_next);
554 return 0;
558 return -1;
561 static bool vfio_listener_skipped_section(MemoryRegionSection *section)
563 return (!memory_region_is_ram(section->mr) &&
564 !memory_region_is_iommu(section->mr)) ||
565 memory_region_is_protected(section->mr) ||
567 * Sizing an enabled 64-bit BAR can cause spurious mappings to
568 * addresses in the upper part of the 64-bit address space. These
569 * are never accessed by the CPU and beyond the address width of
570 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
572 section->offset_within_address_space & (1ULL << 63);
575 /* Called with rcu_read_lock held. */
576 static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
577 ram_addr_t *ram_addr, bool *read_only)
579 MemoryRegion *mr;
580 hwaddr xlat;
581 hwaddr len = iotlb->addr_mask + 1;
582 bool writable = iotlb->perm & IOMMU_WO;
585 * The IOMMU TLB entry we have just covers translation through
586 * this IOMMU to its immediate target. We need to translate
587 * it the rest of the way through to memory.
589 mr = address_space_translate(&address_space_memory,
590 iotlb->translated_addr,
591 &xlat, &len, writable,
592 MEMTXATTRS_UNSPECIFIED);
593 if (!memory_region_is_ram(mr)) {
594 error_report("iommu map to non memory area %"HWADDR_PRIx"",
595 xlat);
596 return false;
597 } else if (memory_region_has_ram_discard_manager(mr)) {
598 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr);
599 MemoryRegionSection tmp = {
600 .mr = mr,
601 .offset_within_region = xlat,
602 .size = int128_make64(len),
606 * Malicious VMs can map memory into the IOMMU, which is expected
607 * to remain discarded. vfio will pin all pages, populating memory.
608 * Disallow that. vmstate priorities make sure any RamDiscardManager
609 * were already restored before IOMMUs are restored.
611 if (!ram_discard_manager_is_populated(rdm, &tmp)) {
612 error_report("iommu map to discarded memory (e.g., unplugged via"
613 " virtio-mem): %"HWADDR_PRIx"",
614 iotlb->translated_addr);
615 return false;
619 * Malicious VMs might trigger discarding of IOMMU-mapped memory. The
620 * pages will remain pinned inside vfio until unmapped, resulting in a
621 * higher memory consumption than expected. If memory would get
622 * populated again later, there would be an inconsistency between pages
623 * pinned by vfio and pages seen by QEMU. This is the case until
624 * unmapped from the IOMMU (e.g., during device reset).
626 * With malicious guests, we really only care about pinning more memory
627 * than expected. RLIMIT_MEMLOCK set for the user/process can never be
628 * exceeded and can be used to mitigate this problem.
630 warn_report_once("Using vfio with vIOMMUs and coordinated discarding of"
631 " RAM (e.g., virtio-mem) works, however, malicious"
632 " guests can trigger pinning of more memory than"
633 " intended via an IOMMU. It's possible to mitigate "
634 " by setting/adjusting RLIMIT_MEMLOCK.");
638 * Translation truncates length to the IOMMU page size,
639 * check that it did not truncate too much.
641 if (len & iotlb->addr_mask) {
642 error_report("iommu has granularity incompatible with target AS");
643 return false;
646 if (vaddr) {
647 *vaddr = memory_region_get_ram_ptr(mr) + xlat;
650 if (ram_addr) {
651 *ram_addr = memory_region_get_ram_addr(mr) + xlat;
654 if (read_only) {
655 *read_only = !writable || mr->readonly;
658 return true;
661 static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
663 VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
664 VFIOContainer *container = giommu->container;
665 hwaddr iova = iotlb->iova + giommu->iommu_offset;
666 void *vaddr;
667 int ret;
669 trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP",
670 iova, iova + iotlb->addr_mask);
672 if (iotlb->target_as != &address_space_memory) {
673 error_report("Wrong target AS \"%s\", only system memory is allowed",
674 iotlb->target_as->name ? iotlb->target_as->name : "none");
675 return;
678 rcu_read_lock();
680 if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
681 bool read_only;
683 if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only)) {
684 goto out;
687 * vaddr is only valid until rcu_read_unlock(). But after
688 * vfio_dma_map has set up the mapping the pages will be
689 * pinned by the kernel. This makes sure that the RAM backend
690 * of vaddr will always be there, even if the memory object is
691 * destroyed and its backing memory munmap-ed.
693 ret = vfio_dma_map(container, iova,
694 iotlb->addr_mask + 1, vaddr,
695 read_only);
696 if (ret) {
697 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
698 "0x%"HWADDR_PRIx", %p) = %d (%m)",
699 container, iova,
700 iotlb->addr_mask + 1, vaddr, ret);
702 } else {
703 ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1, iotlb);
704 if (ret) {
705 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
706 "0x%"HWADDR_PRIx") = %d (%m)",
707 container, iova,
708 iotlb->addr_mask + 1, ret);
711 out:
712 rcu_read_unlock();
715 static void vfio_ram_discard_notify_discard(RamDiscardListener *rdl,
716 MemoryRegionSection *section)
718 VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
719 listener);
720 const hwaddr size = int128_get64(section->size);
721 const hwaddr iova = section->offset_within_address_space;
722 int ret;
724 /* Unmap with a single call. */
725 ret = vfio_dma_unmap(vrdl->container, iova, size , NULL);
726 if (ret) {
727 error_report("%s: vfio_dma_unmap() failed: %s", __func__,
728 strerror(-ret));
732 static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl,
733 MemoryRegionSection *section)
735 VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
736 listener);
737 const hwaddr end = section->offset_within_region +
738 int128_get64(section->size);
739 hwaddr start, next, iova;
740 void *vaddr;
741 int ret;
744 * Map in (aligned within memory region) minimum granularity, so we can
745 * unmap in minimum granularity later.
747 for (start = section->offset_within_region; start < end; start = next) {
748 next = ROUND_UP(start + 1, vrdl->granularity);
749 next = MIN(next, end);
751 iova = start - section->offset_within_region +
752 section->offset_within_address_space;
753 vaddr = memory_region_get_ram_ptr(section->mr) + start;
755 ret = vfio_dma_map(vrdl->container, iova, next - start,
756 vaddr, section->readonly);
757 if (ret) {
758 /* Rollback */
759 vfio_ram_discard_notify_discard(rdl, section);
760 return ret;
763 return 0;
766 static void vfio_register_ram_discard_listener(VFIOContainer *container,
767 MemoryRegionSection *section)
769 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
770 VFIORamDiscardListener *vrdl;
772 /* Ignore some corner cases not relevant in practice. */
773 g_assert(QEMU_IS_ALIGNED(section->offset_within_region, TARGET_PAGE_SIZE));
774 g_assert(QEMU_IS_ALIGNED(section->offset_within_address_space,
775 TARGET_PAGE_SIZE));
776 g_assert(QEMU_IS_ALIGNED(int128_get64(section->size), TARGET_PAGE_SIZE));
778 vrdl = g_new0(VFIORamDiscardListener, 1);
779 vrdl->container = container;
780 vrdl->mr = section->mr;
781 vrdl->offset_within_address_space = section->offset_within_address_space;
782 vrdl->size = int128_get64(section->size);
783 vrdl->granularity = ram_discard_manager_get_min_granularity(rdm,
784 section->mr);
786 g_assert(vrdl->granularity && is_power_of_2(vrdl->granularity));
787 g_assert(container->pgsizes &&
788 vrdl->granularity >= 1ULL << ctz64(container->pgsizes));
790 ram_discard_listener_init(&vrdl->listener,
791 vfio_ram_discard_notify_populate,
792 vfio_ram_discard_notify_discard, true);
793 ram_discard_manager_register_listener(rdm, &vrdl->listener, section);
794 QLIST_INSERT_HEAD(&container->vrdl_list, vrdl, next);
797 * Sanity-check if we have a theoretically problematic setup where we could
798 * exceed the maximum number of possible DMA mappings over time. We assume
799 * that each mapped section in the same address space as a RamDiscardManager
800 * section consumes exactly one DMA mapping, with the exception of
801 * RamDiscardManager sections; i.e., we don't expect to have gIOMMU sections
802 * in the same address space as RamDiscardManager sections.
804 * We assume that each section in the address space consumes one memslot.
805 * We take the number of KVM memory slots as a best guess for the maximum
806 * number of sections in the address space we could have over time,
807 * also consuming DMA mappings.
809 if (container->dma_max_mappings) {
810 unsigned int vrdl_count = 0, vrdl_mappings = 0, max_memslots = 512;
812 #ifdef CONFIG_KVM
813 if (kvm_enabled()) {
814 max_memslots = kvm_get_max_memslots();
816 #endif
818 QLIST_FOREACH(vrdl, &container->vrdl_list, next) {
819 hwaddr start, end;
821 start = QEMU_ALIGN_DOWN(vrdl->offset_within_address_space,
822 vrdl->granularity);
823 end = ROUND_UP(vrdl->offset_within_address_space + vrdl->size,
824 vrdl->granularity);
825 vrdl_mappings += (end - start) / vrdl->granularity;
826 vrdl_count++;
829 if (vrdl_mappings + max_memslots - vrdl_count >
830 container->dma_max_mappings) {
831 warn_report("%s: possibly running out of DMA mappings. E.g., try"
832 " increasing the 'block-size' of virtio-mem devies."
833 " Maximum possible DMA mappings: %d, Maximum possible"
834 " memslots: %d", __func__, container->dma_max_mappings,
835 max_memslots);
840 static void vfio_unregister_ram_discard_listener(VFIOContainer *container,
841 MemoryRegionSection *section)
843 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
844 VFIORamDiscardListener *vrdl = NULL;
846 QLIST_FOREACH(vrdl, &container->vrdl_list, next) {
847 if (vrdl->mr == section->mr &&
848 vrdl->offset_within_address_space ==
849 section->offset_within_address_space) {
850 break;
854 if (!vrdl) {
855 hw_error("vfio: Trying to unregister missing RAM discard listener");
858 ram_discard_manager_unregister_listener(rdm, &vrdl->listener);
859 QLIST_REMOVE(vrdl, next);
860 g_free(vrdl);
863 static void vfio_listener_region_add(MemoryListener *listener,
864 MemoryRegionSection *section)
866 VFIOContainer *container = container_of(listener, VFIOContainer, listener);
867 hwaddr iova, end;
868 Int128 llend, llsize;
869 void *vaddr;
870 int ret;
871 VFIOHostDMAWindow *hostwin;
872 bool hostwin_found;
873 Error *err = NULL;
875 if (vfio_listener_skipped_section(section)) {
876 trace_vfio_listener_region_add_skip(
877 section->offset_within_address_space,
878 section->offset_within_address_space +
879 int128_get64(int128_sub(section->size, int128_one())));
880 return;
883 if (unlikely((section->offset_within_address_space &
884 ~qemu_real_host_page_mask) !=
885 (section->offset_within_region & ~qemu_real_host_page_mask))) {
886 error_report("%s received unaligned region", __func__);
887 return;
890 iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space);
891 llend = int128_make64(section->offset_within_address_space);
892 llend = int128_add(llend, section->size);
893 llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask));
895 if (int128_ge(int128_make64(iova), llend)) {
896 return;
898 end = int128_get64(int128_sub(llend, int128_one()));
900 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
901 hwaddr pgsize = 0;
903 /* For now intersections are not allowed, we may relax this later */
904 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
905 if (ranges_overlap(hostwin->min_iova,
906 hostwin->max_iova - hostwin->min_iova + 1,
907 section->offset_within_address_space,
908 int128_get64(section->size))) {
909 error_setg(&err,
910 "region [0x%"PRIx64",0x%"PRIx64"] overlaps with existing"
911 "host DMA window [0x%"PRIx64",0x%"PRIx64"]",
912 section->offset_within_address_space,
913 section->offset_within_address_space +
914 int128_get64(section->size) - 1,
915 hostwin->min_iova, hostwin->max_iova);
916 goto fail;
920 ret = vfio_spapr_create_window(container, section, &pgsize);
921 if (ret) {
922 error_setg_errno(&err, -ret, "Failed to create SPAPR window");
923 goto fail;
926 vfio_host_win_add(container, section->offset_within_address_space,
927 section->offset_within_address_space +
928 int128_get64(section->size) - 1, pgsize);
929 #ifdef CONFIG_KVM
930 if (kvm_enabled()) {
931 VFIOGroup *group;
932 IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
933 struct kvm_vfio_spapr_tce param;
934 struct kvm_device_attr attr = {
935 .group = KVM_DEV_VFIO_GROUP,
936 .attr = KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE,
937 .addr = (uint64_t)(unsigned long)&param,
940 if (!memory_region_iommu_get_attr(iommu_mr, IOMMU_ATTR_SPAPR_TCE_FD,
941 &param.tablefd)) {
942 QLIST_FOREACH(group, &container->group_list, container_next) {
943 param.groupfd = group->fd;
944 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
945 error_report("vfio: failed to setup fd %d "
946 "for a group with fd %d: %s",
947 param.tablefd, param.groupfd,
948 strerror(errno));
949 return;
951 trace_vfio_spapr_group_attach(param.groupfd, param.tablefd);
955 #endif
958 hostwin_found = false;
959 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
960 if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
961 hostwin_found = true;
962 break;
966 if (!hostwin_found) {
967 error_setg(&err, "Container %p can't map guest IOVA region"
968 " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, container, iova, end);
969 goto fail;
972 memory_region_ref(section->mr);
974 if (memory_region_is_iommu(section->mr)) {
975 VFIOGuestIOMMU *giommu;
976 IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
977 int iommu_idx;
979 trace_vfio_listener_region_add_iommu(iova, end);
981 * FIXME: For VFIO iommu types which have KVM acceleration to
982 * avoid bouncing all map/unmaps through qemu this way, this
983 * would be the right place to wire that up (tell the KVM
984 * device emulation the VFIO iommu handles to use).
986 giommu = g_malloc0(sizeof(*giommu));
987 giommu->iommu = iommu_mr;
988 giommu->iommu_offset = section->offset_within_address_space -
989 section->offset_within_region;
990 giommu->container = container;
991 llend = int128_add(int128_make64(section->offset_within_region),
992 section->size);
993 llend = int128_sub(llend, int128_one());
994 iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
995 MEMTXATTRS_UNSPECIFIED);
996 iommu_notifier_init(&giommu->n, vfio_iommu_map_notify,
997 IOMMU_NOTIFIER_IOTLB_EVENTS,
998 section->offset_within_region,
999 int128_get64(llend),
1000 iommu_idx);
1002 ret = memory_region_iommu_set_page_size_mask(giommu->iommu,
1003 container->pgsizes,
1004 &err);
1005 if (ret) {
1006 g_free(giommu);
1007 goto fail;
1010 ret = memory_region_register_iommu_notifier(section->mr, &giommu->n,
1011 &err);
1012 if (ret) {
1013 g_free(giommu);
1014 goto fail;
1016 QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
1017 memory_region_iommu_replay(giommu->iommu, &giommu->n);
1019 return;
1022 /* Here we assume that memory_region_is_ram(section->mr)==true */
1025 * For RAM memory regions with a RamDiscardManager, we only want to map the
1026 * actually populated parts - and update the mapping whenever we're notified
1027 * about changes.
1029 if (memory_region_has_ram_discard_manager(section->mr)) {
1030 vfio_register_ram_discard_listener(container, section);
1031 return;
1034 vaddr = memory_region_get_ram_ptr(section->mr) +
1035 section->offset_within_region +
1036 (iova - section->offset_within_address_space);
1038 trace_vfio_listener_region_add_ram(iova, end, vaddr);
1040 llsize = int128_sub(llend, int128_make64(iova));
1042 if (memory_region_is_ram_device(section->mr)) {
1043 hwaddr pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
1045 if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) {
1046 trace_vfio_listener_region_add_no_dma_map(
1047 memory_region_name(section->mr),
1048 section->offset_within_address_space,
1049 int128_getlo(section->size),
1050 pgmask + 1);
1051 return;
1055 ret = vfio_dma_map(container, iova, int128_get64(llsize),
1056 vaddr, section->readonly);
1057 if (ret) {
1058 error_setg(&err, "vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
1059 "0x%"HWADDR_PRIx", %p) = %d (%m)",
1060 container, iova, int128_get64(llsize), vaddr, ret);
1061 if (memory_region_is_ram_device(section->mr)) {
1062 /* Allow unexpected mappings not to be fatal for RAM devices */
1063 error_report_err(err);
1064 return;
1066 goto fail;
1069 return;
1071 fail:
1072 if (memory_region_is_ram_device(section->mr)) {
1073 error_report("failed to vfio_dma_map. pci p2p may not work");
1074 return;
1077 * On the initfn path, store the first error in the container so we
1078 * can gracefully fail. Runtime, there's not much we can do other
1079 * than throw a hardware error.
1081 if (!container->initialized) {
1082 if (!container->error) {
1083 error_propagate_prepend(&container->error, err,
1084 "Region %s: ",
1085 memory_region_name(section->mr));
1086 } else {
1087 error_free(err);
1089 } else {
1090 error_report_err(err);
1091 hw_error("vfio: DMA mapping failed, unable to continue");
1095 static void vfio_listener_region_del(MemoryListener *listener,
1096 MemoryRegionSection *section)
1098 VFIOContainer *container = container_of(listener, VFIOContainer, listener);
1099 hwaddr iova, end;
1100 Int128 llend, llsize;
1101 int ret;
1102 bool try_unmap = true;
1104 if (vfio_listener_skipped_section(section)) {
1105 trace_vfio_listener_region_del_skip(
1106 section->offset_within_address_space,
1107 section->offset_within_address_space +
1108 int128_get64(int128_sub(section->size, int128_one())));
1109 return;
1112 if (unlikely((section->offset_within_address_space &
1113 ~qemu_real_host_page_mask) !=
1114 (section->offset_within_region & ~qemu_real_host_page_mask))) {
1115 error_report("%s received unaligned region", __func__);
1116 return;
1119 if (memory_region_is_iommu(section->mr)) {
1120 VFIOGuestIOMMU *giommu;
1122 QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
1123 if (MEMORY_REGION(giommu->iommu) == section->mr &&
1124 giommu->n.start == section->offset_within_region) {
1125 memory_region_unregister_iommu_notifier(section->mr,
1126 &giommu->n);
1127 QLIST_REMOVE(giommu, giommu_next);
1128 g_free(giommu);
1129 break;
1134 * FIXME: We assume the one big unmap below is adequate to
1135 * remove any individual page mappings in the IOMMU which
1136 * might have been copied into VFIO. This works for a page table
1137 * based IOMMU where a big unmap flattens a large range of IO-PTEs.
1138 * That may not be true for all IOMMU types.
1142 iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space);
1143 llend = int128_make64(section->offset_within_address_space);
1144 llend = int128_add(llend, section->size);
1145 llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask));
1147 if (int128_ge(int128_make64(iova), llend)) {
1148 return;
1150 end = int128_get64(int128_sub(llend, int128_one()));
1152 llsize = int128_sub(llend, int128_make64(iova));
1154 trace_vfio_listener_region_del(iova, end);
1156 if (memory_region_is_ram_device(section->mr)) {
1157 hwaddr pgmask;
1158 VFIOHostDMAWindow *hostwin;
1159 bool hostwin_found = false;
1161 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
1162 if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
1163 hostwin_found = true;
1164 break;
1167 assert(hostwin_found); /* or region_add() would have failed */
1169 pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
1170 try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask));
1171 } else if (memory_region_has_ram_discard_manager(section->mr)) {
1172 vfio_unregister_ram_discard_listener(container, section);
1173 /* Unregistering will trigger an unmap. */
1174 try_unmap = false;
1177 if (try_unmap) {
1178 if (int128_eq(llsize, int128_2_64())) {
1179 /* The unmap ioctl doesn't accept a full 64-bit span. */
1180 llsize = int128_rshift(llsize, 1);
1181 ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL);
1182 if (ret) {
1183 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
1184 "0x%"HWADDR_PRIx") = %d (%m)",
1185 container, iova, int128_get64(llsize), ret);
1187 iova += int128_get64(llsize);
1189 ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL);
1190 if (ret) {
1191 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
1192 "0x%"HWADDR_PRIx") = %d (%m)",
1193 container, iova, int128_get64(llsize), ret);
1197 memory_region_unref(section->mr);
1199 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
1200 vfio_spapr_remove_window(container,
1201 section->offset_within_address_space);
1202 if (vfio_host_win_del(container,
1203 section->offset_within_address_space,
1204 section->offset_within_address_space +
1205 int128_get64(section->size) - 1) < 0) {
1206 hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx,
1207 __func__, section->offset_within_address_space);
1212 static void vfio_set_dirty_page_tracking(VFIOContainer *container, bool start)
1214 int ret;
1215 struct vfio_iommu_type1_dirty_bitmap dirty = {
1216 .argsz = sizeof(dirty),
1219 if (start) {
1220 dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_START;
1221 } else {
1222 dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP;
1225 ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, &dirty);
1226 if (ret) {
1227 error_report("Failed to set dirty tracking flag 0x%x errno: %d",
1228 dirty.flags, errno);
1232 static void vfio_listener_log_global_start(MemoryListener *listener)
1234 VFIOContainer *container = container_of(listener, VFIOContainer, listener);
1236 vfio_set_dirty_page_tracking(container, true);
1239 static void vfio_listener_log_global_stop(MemoryListener *listener)
1241 VFIOContainer *container = container_of(listener, VFIOContainer, listener);
1243 vfio_set_dirty_page_tracking(container, false);
1246 static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
1247 uint64_t size, ram_addr_t ram_addr)
1249 struct vfio_iommu_type1_dirty_bitmap *dbitmap;
1250 struct vfio_iommu_type1_dirty_bitmap_get *range;
1251 uint64_t pages;
1252 int ret;
1254 dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range));
1256 dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range);
1257 dbitmap->flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
1258 range = (struct vfio_iommu_type1_dirty_bitmap_get *)&dbitmap->data;
1259 range->iova = iova;
1260 range->size = size;
1263 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
1264 * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize
1265 * to qemu_real_host_page_size.
1267 range->bitmap.pgsize = qemu_real_host_page_size;
1269 pages = REAL_HOST_PAGE_ALIGN(range->size) / qemu_real_host_page_size;
1270 range->bitmap.size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) /
1271 BITS_PER_BYTE;
1272 range->bitmap.data = g_try_malloc0(range->bitmap.size);
1273 if (!range->bitmap.data) {
1274 ret = -ENOMEM;
1275 goto err_out;
1278 ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap);
1279 if (ret) {
1280 error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64
1281 " size: 0x%"PRIx64" err: %d", (uint64_t)range->iova,
1282 (uint64_t)range->size, errno);
1283 goto err_out;
1286 cpu_physical_memory_set_dirty_lebitmap((unsigned long *)range->bitmap.data,
1287 ram_addr, pages);
1289 trace_vfio_get_dirty_bitmap(container->fd, range->iova, range->size,
1290 range->bitmap.size, ram_addr);
1291 err_out:
1292 g_free(range->bitmap.data);
1293 g_free(dbitmap);
1295 return ret;
1298 typedef struct {
1299 IOMMUNotifier n;
1300 VFIOGuestIOMMU *giommu;
1301 } vfio_giommu_dirty_notifier;
1303 static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
1305 vfio_giommu_dirty_notifier *gdn = container_of(n,
1306 vfio_giommu_dirty_notifier, n);
1307 VFIOGuestIOMMU *giommu = gdn->giommu;
1308 VFIOContainer *container = giommu->container;
1309 hwaddr iova = iotlb->iova + giommu->iommu_offset;
1310 ram_addr_t translated_addr;
1312 trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask);
1314 if (iotlb->target_as != &address_space_memory) {
1315 error_report("Wrong target AS \"%s\", only system memory is allowed",
1316 iotlb->target_as->name ? iotlb->target_as->name : "none");
1317 return;
1320 rcu_read_lock();
1321 if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) {
1322 int ret;
1324 ret = vfio_get_dirty_bitmap(container, iova, iotlb->addr_mask + 1,
1325 translated_addr);
1326 if (ret) {
1327 error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", "
1328 "0x%"HWADDR_PRIx") = %d (%m)",
1329 container, iova,
1330 iotlb->addr_mask + 1, ret);
1333 rcu_read_unlock();
1336 static int vfio_ram_discard_get_dirty_bitmap(MemoryRegionSection *section,
1337 void *opaque)
1339 const hwaddr size = int128_get64(section->size);
1340 const hwaddr iova = section->offset_within_address_space;
1341 const ram_addr_t ram_addr = memory_region_get_ram_addr(section->mr) +
1342 section->offset_within_region;
1343 VFIORamDiscardListener *vrdl = opaque;
1346 * Sync the whole mapped region (spanning multiple individual mappings)
1347 * in one go.
1349 return vfio_get_dirty_bitmap(vrdl->container, iova, size, ram_addr);
1352 static int vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainer *container,
1353 MemoryRegionSection *section)
1355 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
1356 VFIORamDiscardListener *vrdl = NULL;
1358 QLIST_FOREACH(vrdl, &container->vrdl_list, next) {
1359 if (vrdl->mr == section->mr &&
1360 vrdl->offset_within_address_space ==
1361 section->offset_within_address_space) {
1362 break;
1366 if (!vrdl) {
1367 hw_error("vfio: Trying to sync missing RAM discard listener");
1371 * We only want/can synchronize the bitmap for actually mapped parts -
1372 * which correspond to populated parts. Replay all populated parts.
1374 return ram_discard_manager_replay_populated(rdm, section,
1375 vfio_ram_discard_get_dirty_bitmap,
1376 &vrdl);
1379 static int vfio_sync_dirty_bitmap(VFIOContainer *container,
1380 MemoryRegionSection *section)
1382 ram_addr_t ram_addr;
1384 if (memory_region_is_iommu(section->mr)) {
1385 VFIOGuestIOMMU *giommu;
1387 QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
1388 if (MEMORY_REGION(giommu->iommu) == section->mr &&
1389 giommu->n.start == section->offset_within_region) {
1390 Int128 llend;
1391 vfio_giommu_dirty_notifier gdn = { .giommu = giommu };
1392 int idx = memory_region_iommu_attrs_to_index(giommu->iommu,
1393 MEMTXATTRS_UNSPECIFIED);
1395 llend = int128_add(int128_make64(section->offset_within_region),
1396 section->size);
1397 llend = int128_sub(llend, int128_one());
1399 iommu_notifier_init(&gdn.n,
1400 vfio_iommu_map_dirty_notify,
1401 IOMMU_NOTIFIER_MAP,
1402 section->offset_within_region,
1403 int128_get64(llend),
1404 idx);
1405 memory_region_iommu_replay(giommu->iommu, &gdn.n);
1406 break;
1409 return 0;
1410 } else if (memory_region_has_ram_discard_manager(section->mr)) {
1411 return vfio_sync_ram_discard_listener_dirty_bitmap(container, section);
1414 ram_addr = memory_region_get_ram_addr(section->mr) +
1415 section->offset_within_region;
1417 return vfio_get_dirty_bitmap(container,
1418 REAL_HOST_PAGE_ALIGN(section->offset_within_address_space),
1419 int128_get64(section->size), ram_addr);
1422 static void vfio_listener_log_sync(MemoryListener *listener,
1423 MemoryRegionSection *section)
1425 VFIOContainer *container = container_of(listener, VFIOContainer, listener);
1427 if (vfio_listener_skipped_section(section) ||
1428 !container->dirty_pages_supported) {
1429 return;
1432 if (vfio_devices_all_dirty_tracking(container)) {
1433 vfio_sync_dirty_bitmap(container, section);
1437 static const MemoryListener vfio_memory_listener = {
1438 .name = "vfio",
1439 .region_add = vfio_listener_region_add,
1440 .region_del = vfio_listener_region_del,
1441 .log_global_start = vfio_listener_log_global_start,
1442 .log_global_stop = vfio_listener_log_global_stop,
1443 .log_sync = vfio_listener_log_sync,
1446 static void vfio_listener_release(VFIOContainer *container)
1448 memory_listener_unregister(&container->listener);
1449 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
1450 memory_listener_unregister(&container->prereg_listener);
1454 static struct vfio_info_cap_header *
1455 vfio_get_cap(void *ptr, uint32_t cap_offset, uint16_t id)
1457 struct vfio_info_cap_header *hdr;
1459 for (hdr = ptr + cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
1460 if (hdr->id == id) {
1461 return hdr;
1465 return NULL;
1468 struct vfio_info_cap_header *
1469 vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id)
1471 if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) {
1472 return NULL;
1475 return vfio_get_cap((void *)info, info->cap_offset, id);
1478 static struct vfio_info_cap_header *
1479 vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info *info, uint16_t id)
1481 if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) {
1482 return NULL;
1485 return vfio_get_cap((void *)info, info->cap_offset, id);
1488 struct vfio_info_cap_header *
1489 vfio_get_device_info_cap(struct vfio_device_info *info, uint16_t id)
1491 if (!(info->flags & VFIO_DEVICE_FLAGS_CAPS)) {
1492 return NULL;
1495 return vfio_get_cap((void *)info, info->cap_offset, id);
1498 bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info,
1499 unsigned int *avail)
1501 struct vfio_info_cap_header *hdr;
1502 struct vfio_iommu_type1_info_dma_avail *cap;
1504 /* If the capability cannot be found, assume no DMA limiting */
1505 hdr = vfio_get_iommu_type1_info_cap(info,
1506 VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL);
1507 if (hdr == NULL) {
1508 return false;
1511 if (avail != NULL) {
1512 cap = (void *) hdr;
1513 *avail = cap->avail;
1516 return true;
1519 static int vfio_setup_region_sparse_mmaps(VFIORegion *region,
1520 struct vfio_region_info *info)
1522 struct vfio_info_cap_header *hdr;
1523 struct vfio_region_info_cap_sparse_mmap *sparse;
1524 int i, j;
1526 hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP);
1527 if (!hdr) {
1528 return -ENODEV;
1531 sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header);
1533 trace_vfio_region_sparse_mmap_header(region->vbasedev->name,
1534 region->nr, sparse->nr_areas);
1536 region->mmaps = g_new0(VFIOMmap, sparse->nr_areas);
1538 for (i = 0, j = 0; i < sparse->nr_areas; i++) {
1539 trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset,
1540 sparse->areas[i].offset +
1541 sparse->areas[i].size);
1543 if (sparse->areas[i].size) {
1544 region->mmaps[j].offset = sparse->areas[i].offset;
1545 region->mmaps[j].size = sparse->areas[i].size;
1546 j++;
1550 region->nr_mmaps = j;
1551 region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap));
1553 return 0;
1556 int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
1557 int index, const char *name)
1559 struct vfio_region_info *info;
1560 int ret;
1562 ret = vfio_get_region_info(vbasedev, index, &info);
1563 if (ret) {
1564 return ret;
1567 region->vbasedev = vbasedev;
1568 region->flags = info->flags;
1569 region->size = info->size;
1570 region->fd_offset = info->offset;
1571 region->nr = index;
1573 if (region->size) {
1574 region->mem = g_new0(MemoryRegion, 1);
1575 memory_region_init_io(region->mem, obj, &vfio_region_ops,
1576 region, name, region->size);
1578 if (!vbasedev->no_mmap &&
1579 region->flags & VFIO_REGION_INFO_FLAG_MMAP) {
1581 ret = vfio_setup_region_sparse_mmaps(region, info);
1583 if (ret) {
1584 region->nr_mmaps = 1;
1585 region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
1586 region->mmaps[0].offset = 0;
1587 region->mmaps[0].size = region->size;
1592 g_free(info);
1594 trace_vfio_region_setup(vbasedev->name, index, name,
1595 region->flags, region->fd_offset, region->size);
1596 return 0;
1599 static void vfio_subregion_unmap(VFIORegion *region, int index)
1601 trace_vfio_region_unmap(memory_region_name(&region->mmaps[index].mem),
1602 region->mmaps[index].offset,
1603 region->mmaps[index].offset +
1604 region->mmaps[index].size - 1);
1605 memory_region_del_subregion(region->mem, &region->mmaps[index].mem);
1606 munmap(region->mmaps[index].mmap, region->mmaps[index].size);
1607 object_unparent(OBJECT(&region->mmaps[index].mem));
1608 region->mmaps[index].mmap = NULL;
1611 int vfio_region_mmap(VFIORegion *region)
1613 int i, prot = 0;
1614 char *name;
1616 if (!region->mem) {
1617 return 0;
1620 prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0;
1621 prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0;
1623 for (i = 0; i < region->nr_mmaps; i++) {
1624 region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot,
1625 MAP_SHARED, region->vbasedev->fd,
1626 region->fd_offset +
1627 region->mmaps[i].offset);
1628 if (region->mmaps[i].mmap == MAP_FAILED) {
1629 int ret = -errno;
1631 trace_vfio_region_mmap_fault(memory_region_name(region->mem), i,
1632 region->fd_offset +
1633 region->mmaps[i].offset,
1634 region->fd_offset +
1635 region->mmaps[i].offset +
1636 region->mmaps[i].size - 1, ret);
1638 region->mmaps[i].mmap = NULL;
1640 for (i--; i >= 0; i--) {
1641 vfio_subregion_unmap(region, i);
1644 return ret;
1647 name = g_strdup_printf("%s mmaps[%d]",
1648 memory_region_name(region->mem), i);
1649 memory_region_init_ram_device_ptr(&region->mmaps[i].mem,
1650 memory_region_owner(region->mem),
1651 name, region->mmaps[i].size,
1652 region->mmaps[i].mmap);
1653 g_free(name);
1654 memory_region_add_subregion(region->mem, region->mmaps[i].offset,
1655 &region->mmaps[i].mem);
1657 trace_vfio_region_mmap(memory_region_name(&region->mmaps[i].mem),
1658 region->mmaps[i].offset,
1659 region->mmaps[i].offset +
1660 region->mmaps[i].size - 1);
1663 return 0;
1666 void vfio_region_unmap(VFIORegion *region)
1668 int i;
1670 if (!region->mem) {
1671 return;
1674 for (i = 0; i < region->nr_mmaps; i++) {
1675 if (region->mmaps[i].mmap) {
1676 vfio_subregion_unmap(region, i);
1681 void vfio_region_exit(VFIORegion *region)
1683 int i;
1685 if (!region->mem) {
1686 return;
1689 for (i = 0; i < region->nr_mmaps; i++) {
1690 if (region->mmaps[i].mmap) {
1691 memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
1695 trace_vfio_region_exit(region->vbasedev->name, region->nr);
1698 void vfio_region_finalize(VFIORegion *region)
1700 int i;
1702 if (!region->mem) {
1703 return;
1706 for (i = 0; i < region->nr_mmaps; i++) {
1707 if (region->mmaps[i].mmap) {
1708 munmap(region->mmaps[i].mmap, region->mmaps[i].size);
1709 object_unparent(OBJECT(&region->mmaps[i].mem));
1713 object_unparent(OBJECT(region->mem));
1715 g_free(region->mem);
1716 g_free(region->mmaps);
1718 trace_vfio_region_finalize(region->vbasedev->name, region->nr);
1720 region->mem = NULL;
1721 region->mmaps = NULL;
1722 region->nr_mmaps = 0;
1723 region->size = 0;
1724 region->flags = 0;
1725 region->nr = 0;
1728 void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled)
1730 int i;
1732 if (!region->mem) {
1733 return;
1736 for (i = 0; i < region->nr_mmaps; i++) {
1737 if (region->mmaps[i].mmap) {
1738 memory_region_set_enabled(&region->mmaps[i].mem, enabled);
1742 trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem),
1743 enabled);
1746 void vfio_reset_handler(void *opaque)
1748 VFIOGroup *group;
1749 VFIODevice *vbasedev;
1751 QLIST_FOREACH(group, &vfio_group_list, next) {
1752 QLIST_FOREACH(vbasedev, &group->device_list, next) {
1753 if (vbasedev->dev->realized) {
1754 vbasedev->ops->vfio_compute_needs_reset(vbasedev);
1759 QLIST_FOREACH(group, &vfio_group_list, next) {
1760 QLIST_FOREACH(vbasedev, &group->device_list, next) {
1761 if (vbasedev->dev->realized && vbasedev->needs_reset) {
1762 vbasedev->ops->vfio_hot_reset_multi(vbasedev);
1768 static void vfio_kvm_device_add_group(VFIOGroup *group)
1770 #ifdef CONFIG_KVM
1771 struct kvm_device_attr attr = {
1772 .group = KVM_DEV_VFIO_GROUP,
1773 .attr = KVM_DEV_VFIO_GROUP_ADD,
1774 .addr = (uint64_t)(unsigned long)&group->fd,
1777 if (!kvm_enabled()) {
1778 return;
1781 if (vfio_kvm_device_fd < 0) {
1782 struct kvm_create_device cd = {
1783 .type = KVM_DEV_TYPE_VFIO,
1786 if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) {
1787 error_report("Failed to create KVM VFIO device: %m");
1788 return;
1791 vfio_kvm_device_fd = cd.fd;
1794 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
1795 error_report("Failed to add group %d to KVM VFIO device: %m",
1796 group->groupid);
1798 #endif
1801 static void vfio_kvm_device_del_group(VFIOGroup *group)
1803 #ifdef CONFIG_KVM
1804 struct kvm_device_attr attr = {
1805 .group = KVM_DEV_VFIO_GROUP,
1806 .attr = KVM_DEV_VFIO_GROUP_DEL,
1807 .addr = (uint64_t)(unsigned long)&group->fd,
1810 if (vfio_kvm_device_fd < 0) {
1811 return;
1814 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
1815 error_report("Failed to remove group %d from KVM VFIO device: %m",
1816 group->groupid);
1818 #endif
1821 static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
1823 VFIOAddressSpace *space;
1825 QLIST_FOREACH(space, &vfio_address_spaces, list) {
1826 if (space->as == as) {
1827 return space;
1831 /* No suitable VFIOAddressSpace, create a new one */
1832 space = g_malloc0(sizeof(*space));
1833 space->as = as;
1834 QLIST_INIT(&space->containers);
1836 QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
1838 return space;
1841 static void vfio_put_address_space(VFIOAddressSpace *space)
1843 if (QLIST_EMPTY(&space->containers)) {
1844 QLIST_REMOVE(space, list);
1845 g_free(space);
1850 * vfio_get_iommu_type - selects the richest iommu_type (v2 first)
1852 static int vfio_get_iommu_type(VFIOContainer *container,
1853 Error **errp)
1855 int iommu_types[] = { VFIO_TYPE1v2_IOMMU, VFIO_TYPE1_IOMMU,
1856 VFIO_SPAPR_TCE_v2_IOMMU, VFIO_SPAPR_TCE_IOMMU };
1857 int i;
1859 for (i = 0; i < ARRAY_SIZE(iommu_types); i++) {
1860 if (ioctl(container->fd, VFIO_CHECK_EXTENSION, iommu_types[i])) {
1861 return iommu_types[i];
1864 error_setg(errp, "No available IOMMU models");
1865 return -EINVAL;
1868 static int vfio_init_container(VFIOContainer *container, int group_fd,
1869 Error **errp)
1871 int iommu_type, ret;
1873 iommu_type = vfio_get_iommu_type(container, errp);
1874 if (iommu_type < 0) {
1875 return iommu_type;
1878 ret = ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container->fd);
1879 if (ret) {
1880 error_setg_errno(errp, errno, "Failed to set group container");
1881 return -errno;
1884 while (ioctl(container->fd, VFIO_SET_IOMMU, iommu_type)) {
1885 if (iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
1887 * On sPAPR, despite the IOMMU subdriver always advertises v1 and
1888 * v2, the running platform may not support v2 and there is no
1889 * way to guess it until an IOMMU group gets added to the container.
1890 * So in case it fails with v2, try v1 as a fallback.
1892 iommu_type = VFIO_SPAPR_TCE_IOMMU;
1893 continue;
1895 error_setg_errno(errp, errno, "Failed to set iommu for container");
1896 return -errno;
1899 container->iommu_type = iommu_type;
1900 return 0;
1903 static int vfio_get_iommu_info(VFIOContainer *container,
1904 struct vfio_iommu_type1_info **info)
1907 size_t argsz = sizeof(struct vfio_iommu_type1_info);
1909 *info = g_new0(struct vfio_iommu_type1_info, 1);
1910 again:
1911 (*info)->argsz = argsz;
1913 if (ioctl(container->fd, VFIO_IOMMU_GET_INFO, *info)) {
1914 g_free(*info);
1915 *info = NULL;
1916 return -errno;
1919 if (((*info)->argsz > argsz)) {
1920 argsz = (*info)->argsz;
1921 *info = g_realloc(*info, argsz);
1922 goto again;
1925 return 0;
1928 static struct vfio_info_cap_header *
1929 vfio_get_iommu_info_cap(struct vfio_iommu_type1_info *info, uint16_t id)
1931 struct vfio_info_cap_header *hdr;
1932 void *ptr = info;
1934 if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) {
1935 return NULL;
1938 for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
1939 if (hdr->id == id) {
1940 return hdr;
1944 return NULL;
1947 static void vfio_get_iommu_info_migration(VFIOContainer *container,
1948 struct vfio_iommu_type1_info *info)
1950 struct vfio_info_cap_header *hdr;
1951 struct vfio_iommu_type1_info_cap_migration *cap_mig;
1953 hdr = vfio_get_iommu_info_cap(info, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION);
1954 if (!hdr) {
1955 return;
1958 cap_mig = container_of(hdr, struct vfio_iommu_type1_info_cap_migration,
1959 header);
1962 * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
1963 * qemu_real_host_page_size to mark those dirty.
1965 if (cap_mig->pgsize_bitmap & qemu_real_host_page_size) {
1966 container->dirty_pages_supported = true;
1967 container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size;
1968 container->dirty_pgsizes = cap_mig->pgsize_bitmap;
1972 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
1973 Error **errp)
1975 VFIOContainer *container;
1976 int ret, fd;
1977 VFIOAddressSpace *space;
1979 space = vfio_get_address_space(as);
1982 * VFIO is currently incompatible with discarding of RAM insofar as the
1983 * madvise to purge (zap) the page from QEMU's address space does not
1984 * interact with the memory API and therefore leaves stale virtual to
1985 * physical mappings in the IOMMU if the page was previously pinned. We
1986 * therefore set discarding broken for each group added to a container,
1987 * whether the container is used individually or shared. This provides
1988 * us with options to allow devices within a group to opt-in and allow
1989 * discarding, so long as it is done consistently for a group (for instance
1990 * if the device is an mdev device where it is known that the host vendor
1991 * driver will never pin pages outside of the working set of the guest
1992 * driver, which would thus not be discarding candidates).
1994 * The first opportunity to induce pinning occurs here where we attempt to
1995 * attach the group to existing containers within the AddressSpace. If any
1996 * pages are already zapped from the virtual address space, such as from
1997 * previous discards, new pinning will cause valid mappings to be
1998 * re-established. Likewise, when the overall MemoryListener for a new
1999 * container is registered, a replay of mappings within the AddressSpace
2000 * will occur, re-establishing any previously zapped pages as well.
2002 * Especially virtio-balloon is currently only prevented from discarding
2003 * new memory, it will not yet set ram_block_discard_set_required() and
2004 * therefore, neither stops us here or deals with the sudden memory
2005 * consumption of inflated memory.
2007 * We do support discarding of memory coordinated via the RamDiscardManager
2008 * with some IOMMU types. vfio_ram_block_discard_disable() handles the
2009 * details once we know which type of IOMMU we are using.
2012 QLIST_FOREACH(container, &space->containers, next) {
2013 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
2014 ret = vfio_ram_block_discard_disable(container, true);
2015 if (ret) {
2016 error_setg_errno(errp, -ret,
2017 "Cannot set discarding of RAM broken");
2018 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER,
2019 &container->fd)) {
2020 error_report("vfio: error disconnecting group %d from"
2021 " container", group->groupid);
2023 return ret;
2025 group->container = container;
2026 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
2027 vfio_kvm_device_add_group(group);
2028 return 0;
2032 fd = qemu_open_old("/dev/vfio/vfio", O_RDWR);
2033 if (fd < 0) {
2034 error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio");
2035 ret = -errno;
2036 goto put_space_exit;
2039 ret = ioctl(fd, VFIO_GET_API_VERSION);
2040 if (ret != VFIO_API_VERSION) {
2041 error_setg(errp, "supported vfio version: %d, "
2042 "reported version: %d", VFIO_API_VERSION, ret);
2043 ret = -EINVAL;
2044 goto close_fd_exit;
2047 container = g_malloc0(sizeof(*container));
2048 container->space = space;
2049 container->fd = fd;
2050 container->error = NULL;
2051 container->dirty_pages_supported = false;
2052 container->dma_max_mappings = 0;
2053 QLIST_INIT(&container->giommu_list);
2054 QLIST_INIT(&container->hostwin_list);
2055 QLIST_INIT(&container->vrdl_list);
2057 ret = vfio_init_container(container, group->fd, errp);
2058 if (ret) {
2059 goto free_container_exit;
2062 ret = vfio_ram_block_discard_disable(container, true);
2063 if (ret) {
2064 error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken");
2065 goto free_container_exit;
2068 switch (container->iommu_type) {
2069 case VFIO_TYPE1v2_IOMMU:
2070 case VFIO_TYPE1_IOMMU:
2072 struct vfio_iommu_type1_info *info;
2075 * FIXME: This assumes that a Type1 IOMMU can map any 64-bit
2076 * IOVA whatsoever. That's not actually true, but the current
2077 * kernel interface doesn't tell us what it can map, and the
2078 * existing Type1 IOMMUs generally support any IOVA we're
2079 * going to actually try in practice.
2081 ret = vfio_get_iommu_info(container, &info);
2083 if (ret || !(info->flags & VFIO_IOMMU_INFO_PGSIZES)) {
2084 /* Assume 4k IOVA page size */
2085 info->iova_pgsizes = 4096;
2087 vfio_host_win_add(container, 0, (hwaddr)-1, info->iova_pgsizes);
2088 container->pgsizes = info->iova_pgsizes;
2090 /* The default in the kernel ("dma_entry_limit") is 65535. */
2091 container->dma_max_mappings = 65535;
2092 if (!ret) {
2093 vfio_get_info_dma_avail(info, &container->dma_max_mappings);
2094 vfio_get_iommu_info_migration(container, info);
2096 g_free(info);
2097 break;
2099 case VFIO_SPAPR_TCE_v2_IOMMU:
2100 case VFIO_SPAPR_TCE_IOMMU:
2102 struct vfio_iommu_spapr_tce_info info;
2103 bool v2 = container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU;
2106 * The host kernel code implementing VFIO_IOMMU_DISABLE is called
2107 * when container fd is closed so we do not call it explicitly
2108 * in this file.
2110 if (!v2) {
2111 ret = ioctl(fd, VFIO_IOMMU_ENABLE);
2112 if (ret) {
2113 error_setg_errno(errp, errno, "failed to enable container");
2114 ret = -errno;
2115 goto enable_discards_exit;
2117 } else {
2118 container->prereg_listener = vfio_prereg_listener;
2120 memory_listener_register(&container->prereg_listener,
2121 &address_space_memory);
2122 if (container->error) {
2123 memory_listener_unregister(&container->prereg_listener);
2124 ret = -1;
2125 error_propagate_prepend(errp, container->error,
2126 "RAM memory listener initialization failed: ");
2127 goto enable_discards_exit;
2131 info.argsz = sizeof(info);
2132 ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
2133 if (ret) {
2134 error_setg_errno(errp, errno,
2135 "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed");
2136 ret = -errno;
2137 if (v2) {
2138 memory_listener_unregister(&container->prereg_listener);
2140 goto enable_discards_exit;
2143 if (v2) {
2144 container->pgsizes = info.ddw.pgsizes;
2146 * There is a default window in just created container.
2147 * To make region_add/del simpler, we better remove this
2148 * window now and let those iommu_listener callbacks
2149 * create/remove them when needed.
2151 ret = vfio_spapr_remove_window(container, info.dma32_window_start);
2152 if (ret) {
2153 error_setg_errno(errp, -ret,
2154 "failed to remove existing window");
2155 goto enable_discards_exit;
2157 } else {
2158 /* The default table uses 4K pages */
2159 container->pgsizes = 0x1000;
2160 vfio_host_win_add(container, info.dma32_window_start,
2161 info.dma32_window_start +
2162 info.dma32_window_size - 1,
2163 0x1000);
2168 vfio_kvm_device_add_group(group);
2170 QLIST_INIT(&container->group_list);
2171 QLIST_INSERT_HEAD(&space->containers, container, next);
2173 group->container = container;
2174 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
2176 container->listener = vfio_memory_listener;
2178 memory_listener_register(&container->listener, container->space->as);
2180 if (container->error) {
2181 ret = -1;
2182 error_propagate_prepend(errp, container->error,
2183 "memory listener initialization failed: ");
2184 goto listener_release_exit;
2187 container->initialized = true;
2189 return 0;
2190 listener_release_exit:
2191 QLIST_REMOVE(group, container_next);
2192 QLIST_REMOVE(container, next);
2193 vfio_kvm_device_del_group(group);
2194 vfio_listener_release(container);
2196 enable_discards_exit:
2197 vfio_ram_block_discard_disable(container, false);
2199 free_container_exit:
2200 g_free(container);
2202 close_fd_exit:
2203 close(fd);
2205 put_space_exit:
2206 vfio_put_address_space(space);
2208 return ret;
2211 static void vfio_disconnect_container(VFIOGroup *group)
2213 VFIOContainer *container = group->container;
2215 QLIST_REMOVE(group, container_next);
2216 group->container = NULL;
2219 * Explicitly release the listener first before unset container,
2220 * since unset may destroy the backend container if it's the last
2221 * group.
2223 if (QLIST_EMPTY(&container->group_list)) {
2224 vfio_listener_release(container);
2227 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
2228 error_report("vfio: error disconnecting group %d from container",
2229 group->groupid);
2232 if (QLIST_EMPTY(&container->group_list)) {
2233 VFIOAddressSpace *space = container->space;
2234 VFIOGuestIOMMU *giommu, *tmp;
2236 QLIST_REMOVE(container, next);
2238 QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) {
2239 memory_region_unregister_iommu_notifier(
2240 MEMORY_REGION(giommu->iommu), &giommu->n);
2241 QLIST_REMOVE(giommu, giommu_next);
2242 g_free(giommu);
2245 trace_vfio_disconnect_container(container->fd);
2246 close(container->fd);
2247 g_free(container);
2249 vfio_put_address_space(space);
2253 VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp)
2255 VFIOGroup *group;
2256 char path[32];
2257 struct vfio_group_status status = { .argsz = sizeof(status) };
2259 QLIST_FOREACH(group, &vfio_group_list, next) {
2260 if (group->groupid == groupid) {
2261 /* Found it. Now is it already in the right context? */
2262 if (group->container->space->as == as) {
2263 return group;
2264 } else {
2265 error_setg(errp, "group %d used in multiple address spaces",
2266 group->groupid);
2267 return NULL;
2272 group = g_malloc0(sizeof(*group));
2274 snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
2275 group->fd = qemu_open_old(path, O_RDWR);
2276 if (group->fd < 0) {
2277 error_setg_errno(errp, errno, "failed to open %s", path);
2278 goto free_group_exit;
2281 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
2282 error_setg_errno(errp, errno, "failed to get group %d status", groupid);
2283 goto close_fd_exit;
2286 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
2287 error_setg(errp, "group %d is not viable", groupid);
2288 error_append_hint(errp,
2289 "Please ensure all devices within the iommu_group "
2290 "are bound to their vfio bus driver.\n");
2291 goto close_fd_exit;
2294 group->groupid = groupid;
2295 QLIST_INIT(&group->device_list);
2297 if (vfio_connect_container(group, as, errp)) {
2298 error_prepend(errp, "failed to setup container for group %d: ",
2299 groupid);
2300 goto close_fd_exit;
2303 if (QLIST_EMPTY(&vfio_group_list)) {
2304 qemu_register_reset(vfio_reset_handler, NULL);
2307 QLIST_INSERT_HEAD(&vfio_group_list, group, next);
2309 return group;
2311 close_fd_exit:
2312 close(group->fd);
2314 free_group_exit:
2315 g_free(group);
2317 return NULL;
2320 void vfio_put_group(VFIOGroup *group)
2322 if (!group || !QLIST_EMPTY(&group->device_list)) {
2323 return;
2326 if (!group->ram_block_discard_allowed) {
2327 vfio_ram_block_discard_disable(group->container, false);
2329 vfio_kvm_device_del_group(group);
2330 vfio_disconnect_container(group);
2331 QLIST_REMOVE(group, next);
2332 trace_vfio_put_group(group->fd);
2333 close(group->fd);
2334 g_free(group);
2336 if (QLIST_EMPTY(&vfio_group_list)) {
2337 qemu_unregister_reset(vfio_reset_handler, NULL);
2341 int vfio_get_device(VFIOGroup *group, const char *name,
2342 VFIODevice *vbasedev, Error **errp)
2344 struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) };
2345 int ret, fd;
2347 fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
2348 if (fd < 0) {
2349 error_setg_errno(errp, errno, "error getting device from group %d",
2350 group->groupid);
2351 error_append_hint(errp,
2352 "Verify all devices in group %d are bound to vfio-<bus> "
2353 "or pci-stub and not already in use\n", group->groupid);
2354 return fd;
2357 ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info);
2358 if (ret) {
2359 error_setg_errno(errp, errno, "error getting device info");
2360 close(fd);
2361 return ret;
2365 * Set discarding of RAM as not broken for this group if the driver knows
2366 * the device operates compatibly with discarding. Setting must be
2367 * consistent per group, but since compatibility is really only possible
2368 * with mdev currently, we expect singleton groups.
2370 if (vbasedev->ram_block_discard_allowed !=
2371 group->ram_block_discard_allowed) {
2372 if (!QLIST_EMPTY(&group->device_list)) {
2373 error_setg(errp, "Inconsistent setting of support for discarding "
2374 "RAM (e.g., balloon) within group");
2375 close(fd);
2376 return -1;
2379 if (!group->ram_block_discard_allowed) {
2380 group->ram_block_discard_allowed = true;
2381 vfio_ram_block_discard_disable(group->container, false);
2385 vbasedev->fd = fd;
2386 vbasedev->group = group;
2387 QLIST_INSERT_HEAD(&group->device_list, vbasedev, next);
2389 vbasedev->num_irqs = dev_info.num_irqs;
2390 vbasedev->num_regions = dev_info.num_regions;
2391 vbasedev->flags = dev_info.flags;
2393 trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions,
2394 dev_info.num_irqs);
2396 vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET);
2397 return 0;
2400 void vfio_put_base_device(VFIODevice *vbasedev)
2402 if (!vbasedev->group) {
2403 return;
2405 QLIST_REMOVE(vbasedev, next);
2406 vbasedev->group = NULL;
2407 trace_vfio_put_base_device(vbasedev->fd);
2408 close(vbasedev->fd);
2411 int vfio_get_region_info(VFIODevice *vbasedev, int index,
2412 struct vfio_region_info **info)
2414 size_t argsz = sizeof(struct vfio_region_info);
2416 *info = g_malloc0(argsz);
2418 (*info)->index = index;
2419 retry:
2420 (*info)->argsz = argsz;
2422 if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) {
2423 g_free(*info);
2424 *info = NULL;
2425 return -errno;
2428 if ((*info)->argsz > argsz) {
2429 argsz = (*info)->argsz;
2430 *info = g_realloc(*info, argsz);
2432 goto retry;
2435 return 0;
2438 int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type,
2439 uint32_t subtype, struct vfio_region_info **info)
2441 int i;
2443 for (i = 0; i < vbasedev->num_regions; i++) {
2444 struct vfio_info_cap_header *hdr;
2445 struct vfio_region_info_cap_type *cap_type;
2447 if (vfio_get_region_info(vbasedev, i, info)) {
2448 continue;
2451 hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE);
2452 if (!hdr) {
2453 g_free(*info);
2454 continue;
2457 cap_type = container_of(hdr, struct vfio_region_info_cap_type, header);
2459 trace_vfio_get_dev_region(vbasedev->name, i,
2460 cap_type->type, cap_type->subtype);
2462 if (cap_type->type == type && cap_type->subtype == subtype) {
2463 return 0;
2466 g_free(*info);
2469 *info = NULL;
2470 return -ENODEV;
2473 bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type)
2475 struct vfio_region_info *info = NULL;
2476 bool ret = false;
2478 if (!vfio_get_region_info(vbasedev, region, &info)) {
2479 if (vfio_get_region_info_cap(info, cap_type)) {
2480 ret = true;
2482 g_free(info);
2485 return ret;
2489 * Interfaces for IBM EEH (Enhanced Error Handling)
2491 static bool vfio_eeh_container_ok(VFIOContainer *container)
2494 * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO
2495 * implementation is broken if there are multiple groups in a
2496 * container. The hardware works in units of Partitionable
2497 * Endpoints (== IOMMU groups) and the EEH operations naively
2498 * iterate across all groups in the container, without any logic
2499 * to make sure the groups have their state synchronized. For
2500 * certain operations (ENABLE) that might be ok, until an error
2501 * occurs, but for others (GET_STATE) it's clearly broken.
2505 * XXX Once fixed kernels exist, test for them here
2508 if (QLIST_EMPTY(&container->group_list)) {
2509 return false;
2512 if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) {
2513 return false;
2516 return true;
2519 static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op)
2521 struct vfio_eeh_pe_op pe_op = {
2522 .argsz = sizeof(pe_op),
2523 .op = op,
2525 int ret;
2527 if (!vfio_eeh_container_ok(container)) {
2528 error_report("vfio/eeh: EEH_PE_OP 0x%x: "
2529 "kernel requires a container with exactly one group", op);
2530 return -EPERM;
2533 ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op);
2534 if (ret < 0) {
2535 error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op);
2536 return -errno;
2539 return ret;
2542 static VFIOContainer *vfio_eeh_as_container(AddressSpace *as)
2544 VFIOAddressSpace *space = vfio_get_address_space(as);
2545 VFIOContainer *container = NULL;
2547 if (QLIST_EMPTY(&space->containers)) {
2548 /* No containers to act on */
2549 goto out;
2552 container = QLIST_FIRST(&space->containers);
2554 if (QLIST_NEXT(container, next)) {
2555 /* We don't yet have logic to synchronize EEH state across
2556 * multiple containers */
2557 container = NULL;
2558 goto out;
2561 out:
2562 vfio_put_address_space(space);
2563 return container;
2566 bool vfio_eeh_as_ok(AddressSpace *as)
2568 VFIOContainer *container = vfio_eeh_as_container(as);
2570 return (container != NULL) && vfio_eeh_container_ok(container);
2573 int vfio_eeh_as_op(AddressSpace *as, uint32_t op)
2575 VFIOContainer *container = vfio_eeh_as_container(as);
2577 if (!container) {
2578 return -ENODEV;
2580 return vfio_eeh_container_op(container, op);