2 * iommufd container backend
4 * Copyright (C) 2023 Intel Corporation.
5 * Copyright Red Hat, Inc. 2023
7 * Authors: Yi Liu <yi.l.liu@intel.com>
8 * Eric Auger <eric.auger@redhat.com>
10 * SPDX-License-Identifier: GPL-2.0-or-later
13 #include "qemu/osdep.h"
14 #include "sysemu/iommufd.h"
15 #include "qapi/error.h"
16 #include "qemu/module.h"
17 #include "qom/object_interfaces.h"
18 #include "qemu/error-report.h"
19 #include "monitor/monitor.h"
21 #include "hw/vfio/vfio-common.h"
22 #include <sys/ioctl.h>
23 #include <linux/iommufd.h>
25 static void iommufd_backend_init(Object
*obj
)
27 IOMMUFDBackend
*be
= IOMMUFD_BACKEND(obj
);
34 static void iommufd_backend_finalize(Object
*obj
)
36 IOMMUFDBackend
*be
= IOMMUFD_BACKEND(obj
);
44 static void iommufd_backend_set_fd(Object
*obj
, const char *str
, Error
**errp
)
47 IOMMUFDBackend
*be
= IOMMUFD_BACKEND(obj
);
50 fd
= monitor_fd_param(monitor_cur(), str
, errp
);
52 error_prepend(errp
, "Could not parse remote object fd %s:", str
);
57 trace_iommu_backend_set_fd(be
->fd
);
60 static bool iommufd_backend_can_be_deleted(UserCreatable
*uc
)
62 IOMMUFDBackend
*be
= IOMMUFD_BACKEND(uc
);
67 static void iommufd_backend_class_init(ObjectClass
*oc
, void *data
)
69 UserCreatableClass
*ucc
= USER_CREATABLE_CLASS(oc
);
71 ucc
->can_be_deleted
= iommufd_backend_can_be_deleted
;
73 object_class_property_add_str(oc
, "fd", NULL
, iommufd_backend_set_fd
);
76 bool iommufd_backend_connect(IOMMUFDBackend
*be
, Error
**errp
)
80 if (be
->owned
&& !be
->users
) {
81 fd
= qemu_open("/dev/iommu", O_RDWR
, errp
);
89 trace_iommufd_backend_connect(be
->fd
, be
->owned
, be
->users
);
93 void iommufd_backend_disconnect(IOMMUFDBackend
*be
)
99 if (!be
->users
&& be
->owned
) {
104 trace_iommufd_backend_disconnect(be
->fd
, be
->users
);
107 bool iommufd_backend_alloc_ioas(IOMMUFDBackend
*be
, uint32_t *ioas_id
,
111 struct iommu_ioas_alloc alloc_data
= {
112 .size
= sizeof(alloc_data
),
116 if (ioctl(fd
, IOMMU_IOAS_ALLOC
, &alloc_data
)) {
117 error_setg_errno(errp
, errno
, "Failed to allocate ioas");
121 *ioas_id
= alloc_data
.out_ioas_id
;
122 trace_iommufd_backend_alloc_ioas(fd
, *ioas_id
);
127 void iommufd_backend_free_id(IOMMUFDBackend
*be
, uint32_t id
)
129 int ret
, fd
= be
->fd
;
130 struct iommu_destroy des
= {
135 ret
= ioctl(fd
, IOMMU_DESTROY
, &des
);
136 trace_iommufd_backend_free_id(fd
, id
, ret
);
138 error_report("Failed to free id: %u %m", id
);
142 int iommufd_backend_map_dma(IOMMUFDBackend
*be
, uint32_t ioas_id
, hwaddr iova
,
143 ram_addr_t size
, void *vaddr
, bool readonly
)
145 int ret
, fd
= be
->fd
;
146 struct iommu_ioas_map map
= {
148 .flags
= IOMMU_IOAS_MAP_READABLE
|
149 IOMMU_IOAS_MAP_FIXED_IOVA
,
152 .user_va
= (uintptr_t)vaddr
,
158 map
.flags
|= IOMMU_IOAS_MAP_WRITEABLE
;
161 ret
= ioctl(fd
, IOMMU_IOAS_MAP
, &map
);
162 trace_iommufd_backend_map_dma(fd
, ioas_id
, iova
, size
,
163 vaddr
, readonly
, ret
);
167 /* TODO: Not support mapping hardware PCI BAR region for now. */
168 if (errno
== EFAULT
) {
169 warn_report("IOMMU_IOAS_MAP failed: %m, PCI BAR?");
171 error_report("IOMMU_IOAS_MAP failed: %m");
177 int iommufd_backend_unmap_dma(IOMMUFDBackend
*be
, uint32_t ioas_id
,
178 hwaddr iova
, ram_addr_t size
)
180 int ret
, fd
= be
->fd
;
181 struct iommu_ioas_unmap unmap
= {
182 .size
= sizeof(unmap
),
188 ret
= ioctl(fd
, IOMMU_IOAS_UNMAP
, &unmap
);
190 * IOMMUFD takes mapping as some kind of object, unmapping
191 * nonexistent mapping is treated as deleting a nonexistent
192 * object and return ENOENT. This is different from legacy
193 * backend which allows it. vIOMMU may trigger a lot of
194 * redundant unmapping, to avoid flush the log, treat them
195 * as succeess for IOMMUFD just like legacy backend.
197 if (ret
&& errno
== ENOENT
) {
198 trace_iommufd_backend_unmap_dma_non_exist(fd
, ioas_id
, iova
, size
, ret
);
201 trace_iommufd_backend_unmap_dma(fd
, ioas_id
, iova
, size
, ret
);
206 error_report("IOMMU_IOAS_UNMAP failed: %m");
211 bool iommufd_backend_alloc_hwpt(IOMMUFDBackend
*be
, uint32_t dev_id
,
212 uint32_t pt_id
, uint32_t flags
,
213 uint32_t data_type
, uint32_t data_len
,
214 void *data_ptr
, uint32_t *out_hwpt
,
217 int ret
, fd
= be
->fd
;
218 struct iommu_hwpt_alloc alloc_hwpt
= {
219 .size
= sizeof(struct iommu_hwpt_alloc
),
223 .data_type
= data_type
,
224 .data_len
= data_len
,
225 .data_uptr
= (uintptr_t)data_ptr
,
228 ret
= ioctl(fd
, IOMMU_HWPT_ALLOC
, &alloc_hwpt
);
229 trace_iommufd_backend_alloc_hwpt(fd
, dev_id
, pt_id
, flags
, data_type
,
230 data_len
, (uintptr_t)data_ptr
,
231 alloc_hwpt
.out_hwpt_id
, ret
);
233 error_setg_errno(errp
, errno
, "Failed to allocate hwpt");
237 *out_hwpt
= alloc_hwpt
.out_hwpt_id
;
241 bool iommufd_backend_set_dirty_tracking(IOMMUFDBackend
*be
,
242 uint32_t hwpt_id
, bool start
,
246 struct iommu_hwpt_set_dirty_tracking set_dirty
= {
247 .size
= sizeof(set_dirty
),
249 .flags
= start
? IOMMU_HWPT_DIRTY_TRACKING_ENABLE
: 0,
252 ret
= ioctl(be
->fd
, IOMMU_HWPT_SET_DIRTY_TRACKING
, &set_dirty
);
253 trace_iommufd_backend_set_dirty(be
->fd
, hwpt_id
, start
, ret
? errno
: 0);
255 error_setg_errno(errp
, errno
,
256 "IOMMU_HWPT_SET_DIRTY_TRACKING(hwpt_id %u) failed",
264 bool iommufd_backend_get_dirty_bitmap(IOMMUFDBackend
*be
,
266 uint64_t iova
, ram_addr_t size
,
267 uint64_t page_size
, uint64_t *data
,
271 struct iommu_hwpt_get_dirty_bitmap get_dirty_bitmap
= {
272 .size
= sizeof(get_dirty_bitmap
),
276 .page_size
= page_size
,
277 .data
= (uintptr_t)data
,
280 ret
= ioctl(be
->fd
, IOMMU_HWPT_GET_DIRTY_BITMAP
, &get_dirty_bitmap
);
281 trace_iommufd_backend_get_dirty_bitmap(be
->fd
, hwpt_id
, iova
, size
,
282 page_size
, ret
? errno
: 0);
284 error_setg_errno(errp
, errno
,
285 "IOMMU_HWPT_GET_DIRTY_BITMAP (iova: 0x%"HWADDR_PRIx
286 " size: 0x"RAM_ADDR_FMT
") failed", iova
, size
);
293 bool iommufd_backend_get_device_info(IOMMUFDBackend
*be
, uint32_t devid
,
294 uint32_t *type
, void *data
, uint32_t len
,
295 uint64_t *caps
, Error
**errp
)
297 struct iommu_hw_info info
= {
298 .size
= sizeof(info
),
301 .data_uptr
= (uintptr_t)data
,
304 if (ioctl(be
->fd
, IOMMU_GET_HW_INFO
, &info
)) {
305 error_setg_errno(errp
, errno
, "Failed to get hardware info");
310 *type
= info
.out_data_type
;
312 *caps
= info
.out_capabilities
;
317 static int hiod_iommufd_get_cap(HostIOMMUDevice
*hiod
, int cap
, Error
**errp
)
319 HostIOMMUDeviceCaps
*caps
= &hiod
->caps
;
322 case HOST_IOMMU_DEVICE_CAP_IOMMU_TYPE
:
324 case HOST_IOMMU_DEVICE_CAP_AW_BITS
:
325 return vfio_device_get_aw_bits(hiod
->agent
);
327 error_setg(errp
, "%s: unsupported capability %x", hiod
->name
, cap
);
332 static void hiod_iommufd_class_init(ObjectClass
*oc
, void *data
)
334 HostIOMMUDeviceClass
*hioc
= HOST_IOMMU_DEVICE_CLASS(oc
);
336 hioc
->get_cap
= hiod_iommufd_get_cap
;
339 static const TypeInfo types
[] = {
341 .name
= TYPE_IOMMUFD_BACKEND
,
342 .parent
= TYPE_OBJECT
,
343 .instance_size
= sizeof(IOMMUFDBackend
),
344 .instance_init
= iommufd_backend_init
,
345 .instance_finalize
= iommufd_backend_finalize
,
346 .class_size
= sizeof(IOMMUFDBackendClass
),
347 .class_init
= iommufd_backend_class_init
,
348 .interfaces
= (InterfaceInfo
[]) {
349 { TYPE_USER_CREATABLE
},
353 .name
= TYPE_HOST_IOMMU_DEVICE_IOMMUFD
,
354 .parent
= TYPE_HOST_IOMMU_DEVICE
,
355 .class_init
= hiod_iommufd_class_init
,