Merge tag 'pull-loongarch-20241016' of https://gitlab.com/gaosong/qemu into staging
[qemu/armbru.git] / tests / qtest / virtio-iommu-test.c
blobafb225971d241c6a932c099ea5fdb49bfc04779b
1 /*
2 * QTest testcase for VirtIO IOMMU
4 * Copyright (c) 2021 Red Hat, Inc.
6 * Authors:
7 * Eric Auger <eric.auger@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10 * option) any later version. See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "libqtest-single.h"
16 #include "qemu/module.h"
17 #include "libqos/qgraph.h"
18 #include "libqos/virtio-iommu.h"
19 #include "hw/virtio/virtio-iommu.h"
21 #define PCI_SLOT_HP 0x06
22 #define QVIRTIO_IOMMU_TIMEOUT_US (30 * 1000 * 1000)
24 static QGuestAllocator *alloc;
26 static void pci_config(void *obj, void *data, QGuestAllocator *t_alloc)
28 QVirtioIOMMU *v_iommu = obj;
29 QVirtioDevice *dev = v_iommu->vdev;
30 uint64_t input_range_start = qvirtio_config_readq(dev, 8);
31 uint64_t input_range_end = qvirtio_config_readq(dev, 16);
32 uint32_t domain_range_start = qvirtio_config_readl(dev, 24);
33 uint32_t domain_range_end = qvirtio_config_readl(dev, 28);
34 uint8_t bypass = qvirtio_config_readb(dev, 36);
36 g_assert_cmpint(input_range_start, ==, 0);
37 g_assert_cmphex(input_range_end, >=, UINT32_MAX);
38 g_assert_cmpint(domain_range_start, ==, 0);
39 g_assert_cmpint(domain_range_end, ==, UINT32_MAX);
40 g_assert_cmpint(bypass, ==, 1);
43 static int read_tail_status(struct virtio_iommu_req_tail *buffer)
45 int i;
47 for (i = 0; i < 3; i++) {
48 g_assert_cmpint(buffer->reserved[i], ==, 0);
50 return buffer->status;
53 /**
54 * send_attach_detach - Send an attach/detach command to the device
55 * @type: VIRTIO_IOMMU_T_ATTACH/VIRTIO_IOMMU_T_DETACH
56 * @domain: domain the endpoint is attached to
57 * @ep: endpoint
59 static int send_attach_detach(QTestState *qts, QVirtioIOMMU *v_iommu,
60 uint8_t type, uint32_t domain, uint32_t ep)
62 QVirtioDevice *dev = v_iommu->vdev;
63 QVirtQueue *vq = v_iommu->vq;
64 uint64_t ro_addr, wr_addr;
65 uint32_t free_head;
66 struct virtio_iommu_req_attach req = {}; /* same layout as detach */
67 size_t ro_size = sizeof(req) - sizeof(struct virtio_iommu_req_tail);
68 size_t wr_size = sizeof(struct virtio_iommu_req_tail);
69 struct virtio_iommu_req_tail buffer;
70 int ret;
72 req.head.type = type;
73 req.domain = cpu_to_le32(domain);
74 req.endpoint = cpu_to_le32(ep);
76 ro_addr = guest_alloc(alloc, ro_size);
77 wr_addr = guest_alloc(alloc, wr_size);
79 qtest_memwrite(qts, ro_addr, &req, ro_size);
80 free_head = qvirtqueue_add(qts, vq, ro_addr, ro_size, false, true);
81 qvirtqueue_add(qts, vq, wr_addr, wr_size, true, false);
82 qvirtqueue_kick(qts, dev, vq, free_head);
83 qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL,
84 QVIRTIO_IOMMU_TIMEOUT_US);
85 qtest_memread(qts, wr_addr, &buffer, wr_size);
86 ret = read_tail_status(&buffer);
87 guest_free(alloc, ro_addr);
88 guest_free(alloc, wr_addr);
89 return ret;
92 /**
93 * send_map - Send a map command to the device
94 * @domain: domain the new mapping is attached to
95 * @virt_start: iova start
96 * @virt_end: iova end
97 * @phys_start: base physical address
98 * @flags: mapping flags
100 static int send_map(QTestState *qts, QVirtioIOMMU *v_iommu,
101 uint32_t domain, uint64_t virt_start, uint64_t virt_end,
102 uint64_t phys_start, uint32_t flags)
104 QVirtioDevice *dev = v_iommu->vdev;
105 QVirtQueue *vq = v_iommu->vq;
106 uint64_t ro_addr, wr_addr;
107 uint32_t free_head;
108 struct virtio_iommu_req_map req;
109 size_t ro_size = sizeof(req) - sizeof(struct virtio_iommu_req_tail);
110 size_t wr_size = sizeof(struct virtio_iommu_req_tail);
111 struct virtio_iommu_req_tail buffer;
112 int ret;
114 req.head.type = VIRTIO_IOMMU_T_MAP;
115 req.domain = cpu_to_le32(domain);
116 req.virt_start = cpu_to_le64(virt_start);
117 req.virt_end = cpu_to_le64(virt_end);
118 req.phys_start = cpu_to_le64(phys_start);
119 req.flags = cpu_to_le32(flags);
121 ro_addr = guest_alloc(alloc, ro_size);
122 wr_addr = guest_alloc(alloc, wr_size);
124 qtest_memwrite(qts, ro_addr, &req, ro_size);
125 free_head = qvirtqueue_add(qts, vq, ro_addr, ro_size, false, true);
126 qvirtqueue_add(qts, vq, wr_addr, wr_size, true, false);
127 qvirtqueue_kick(qts, dev, vq, free_head);
128 qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL,
129 QVIRTIO_IOMMU_TIMEOUT_US);
130 qtest_memread(qts, wr_addr, &buffer, wr_size);
131 ret = read_tail_status(&buffer);
132 guest_free(alloc, ro_addr);
133 guest_free(alloc, wr_addr);
134 return ret;
138 * send_unmap - Send an unmap command to the device
139 * @domain: domain the new binding is attached to
140 * @virt_start: iova start
141 * @virt_end: iova end
143 static int send_unmap(QTestState *qts, QVirtioIOMMU *v_iommu,
144 uint32_t domain, uint64_t virt_start, uint64_t virt_end)
146 QVirtioDevice *dev = v_iommu->vdev;
147 QVirtQueue *vq = v_iommu->vq;
148 uint64_t ro_addr, wr_addr;
149 uint32_t free_head;
150 struct virtio_iommu_req_unmap req;
151 size_t ro_size = sizeof(req) - sizeof(struct virtio_iommu_req_tail);
152 size_t wr_size = sizeof(struct virtio_iommu_req_tail);
153 struct virtio_iommu_req_tail buffer;
154 int ret;
156 req.head.type = VIRTIO_IOMMU_T_UNMAP;
157 req.domain = cpu_to_le32(domain);
158 req.virt_start = cpu_to_le64(virt_start);
159 req.virt_end = cpu_to_le64(virt_end);
161 ro_addr = guest_alloc(alloc, ro_size);
162 wr_addr = guest_alloc(alloc, wr_size);
164 qtest_memwrite(qts, ro_addr, &req, ro_size);
165 free_head = qvirtqueue_add(qts, vq, ro_addr, ro_size, false, true);
166 qvirtqueue_add(qts, vq, wr_addr, wr_size, true, false);
167 qvirtqueue_kick(qts, dev, vq, free_head);
168 qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL,
169 QVIRTIO_IOMMU_TIMEOUT_US);
170 qtest_memread(qts, wr_addr, &buffer, wr_size);
171 ret = read_tail_status(&buffer);
172 guest_free(alloc, ro_addr);
173 guest_free(alloc, wr_addr);
174 return ret;
177 static void test_attach_detach(void *obj, void *data, QGuestAllocator *t_alloc)
179 QVirtioIOMMU *v_iommu = obj;
180 QTestState *qts = global_qtest;
181 int ret;
183 alloc = t_alloc;
185 /* type, domain, ep */
187 /* attach ep0 to domain 0 */
188 ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_ATTACH, 0, 0);
189 g_assert_cmpint(ret, ==, 0);
191 /* attach a non existing device */
192 ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_ATTACH, 0, 444);
193 g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_NOENT);
195 /* detach a non existing device (1) */
196 ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_DETACH, 0, 1);
197 g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_NOENT);
199 /* move ep0 from domain 0 to domain 1 */
200 ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_ATTACH, 1, 0);
201 g_assert_cmpint(ret, ==, 0);
203 /* detach ep0 from domain 0 */
204 ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_DETACH, 0, 0);
205 g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_INVAL);
207 /* detach ep0 from domain 1 */
208 ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_DETACH, 1, 0);
209 g_assert_cmpint(ret, ==, 0);
211 ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_ATTACH, 1, 0);
212 g_assert_cmpint(ret, ==, 0);
213 ret = send_map(qts, v_iommu, 1, 0x0, 0xFFF, 0xa1000,
214 VIRTIO_IOMMU_MAP_F_READ);
215 g_assert_cmpint(ret, ==, 0);
216 ret = send_map(qts, v_iommu, 1, 0x2000, 0x2FFF, 0xb1000,
217 VIRTIO_IOMMU_MAP_F_READ);
218 g_assert_cmpint(ret, ==, 0);
219 ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_DETACH, 1, 0);
220 g_assert_cmpint(ret, ==, 0);
223 /* Test map/unmap scenari documented in the spec */
224 static void test_map_unmap(void *obj, void *data, QGuestAllocator *t_alloc)
226 QVirtioIOMMU *v_iommu = obj;
227 QTestState *qts = global_qtest;
228 int ret;
230 alloc = t_alloc;
232 /* attach ep0 to domain 1 */
233 ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_ATTACH, 1, 0);
234 g_assert_cmpint(ret, ==, 0);
236 ret = send_map(qts, v_iommu, 0, 0, 0xFFF, 0xa1000, VIRTIO_IOMMU_MAP_F_READ);
237 g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_NOENT);
239 /* domain, virt start, virt end, phys start, flags */
240 ret = send_map(qts, v_iommu, 1, 0x0, 0xFFF, 0xa1000, VIRTIO_IOMMU_MAP_F_READ);
241 g_assert_cmpint(ret, ==, 0);
243 /* send a new mapping overlapping the previous one */
244 ret = send_map(qts, v_iommu, 1, 0, 0xFFFF, 0xb1000, VIRTIO_IOMMU_MAP_F_READ);
245 g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_INVAL);
247 ret = send_unmap(qts, v_iommu, 4, 0x10, 0xFFF);
248 g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_NOENT);
250 ret = send_unmap(qts, v_iommu, 1, 0x10, 0xFFF);
251 g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_RANGE);
253 ret = send_unmap(qts, v_iommu, 1, 0, 0x1000);
254 g_assert_cmpint(ret, ==, 0); /* unmap everything */
256 /* Spec example sequence */
258 /* 1 */
259 ret = send_unmap(qts, v_iommu, 1, 0, 4);
260 g_assert_cmpint(ret, ==, 0); /* doesn't unmap anything */
262 /* 2 */
263 ret = send_map(qts, v_iommu, 1, 0, 9, 0xa1000, VIRTIO_IOMMU_MAP_F_READ);
264 g_assert_cmpint(ret, ==, 0);
265 ret = send_unmap(qts, v_iommu, 1, 0, 9);
266 g_assert_cmpint(ret, ==, 0); /* unmaps [0,9] */
268 /* 3 */
269 ret = send_map(qts, v_iommu, 1, 0, 4, 0xb1000, VIRTIO_IOMMU_MAP_F_READ);
270 g_assert_cmpint(ret, ==, 0);
271 ret = send_map(qts, v_iommu, 1, 5, 9, 0xb2000, VIRTIO_IOMMU_MAP_F_READ);
272 g_assert_cmpint(ret, ==, 0);
273 ret = send_unmap(qts, v_iommu, 1, 0, 9);
274 g_assert_cmpint(ret, ==, 0); /* unmaps [0,4] and [5,9] */
276 /* 4 */
277 ret = send_map(qts, v_iommu, 1, 0, 9, 0xc1000, VIRTIO_IOMMU_MAP_F_READ);
278 g_assert_cmpint(ret, ==, 0);
280 ret = send_unmap(qts, v_iommu, 1, 0, 4);
281 g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_RANGE); /* doesn't unmap anything */
283 ret = send_unmap(qts, v_iommu, 1, 0, 10);
284 g_assert_cmpint(ret, ==, 0);
286 /* 5 */
287 ret = send_map(qts, v_iommu, 1, 0, 4, 0xd1000, VIRTIO_IOMMU_MAP_F_READ);
288 g_assert_cmpint(ret, ==, 0);
289 ret = send_map(qts, v_iommu, 1, 5, 9, 0xd2000, VIRTIO_IOMMU_MAP_F_READ);
290 g_assert_cmpint(ret, ==, 0);
291 ret = send_unmap(qts, v_iommu, 1, 0, 4);
292 g_assert_cmpint(ret, ==, 0); /* unmaps [0,4] */
294 ret = send_unmap(qts, v_iommu, 1, 5, 9);
295 g_assert_cmpint(ret, ==, 0);
297 /* 6 */
298 ret = send_map(qts, v_iommu, 1, 0, 4, 0xe2000, VIRTIO_IOMMU_MAP_F_READ);
299 g_assert_cmpint(ret, ==, 0);
300 ret = send_unmap(qts, v_iommu, 1, 0, 9);
301 g_assert_cmpint(ret, ==, 0); /* unmaps [0,4] */
303 /* 7 */
304 ret = send_map(qts, v_iommu, 1, 0, 4, 0xf2000, VIRTIO_IOMMU_MAP_F_READ);
305 g_assert_cmpint(ret, ==, 0);
306 ret = send_map(qts, v_iommu, 1, 10, 14, 0xf3000, VIRTIO_IOMMU_MAP_F_READ);
307 g_assert_cmpint(ret, ==, 0);
308 ret = send_unmap(qts, v_iommu, 1, 0, 14);
309 g_assert_cmpint(ret, ==, 0); /* unmaps [0,4] and [10,14] */
311 ret = send_map(qts, v_iommu, 1, 10, 14, 0xf3000, VIRTIO_IOMMU_MAP_F_READ);
312 g_assert_cmpint(ret, ==, 0);
313 ret = send_map(qts, v_iommu, 1, 0, 4, 0xf2000, VIRTIO_IOMMU_MAP_F_READ);
314 g_assert_cmpint(ret, ==, 0);
315 ret = send_unmap(qts, v_iommu, 1, 0, 4);
316 g_assert_cmpint(ret, ==, 0); /* only unmaps [0,4] */
317 ret = send_map(qts, v_iommu, 1, 10, 14, 0xf3000, VIRTIO_IOMMU_MAP_F_READ);
318 g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_INVAL); /* 10-14 still is mapped */
321 static void register_virtio_iommu_test(void)
323 qos_add_test("config", "virtio-iommu", pci_config, NULL);
324 qos_add_test("attach_detach", "virtio-iommu", test_attach_detach, NULL);
325 qos_add_test("map_unmap", "virtio-iommu", test_map_unmap, NULL);
328 libqos_init(register_virtio_iommu_test);