Merge tag 'pull-loongarch-20241016' of https://gitlab.com/gaosong/qemu into staging
[qemu/armbru.git] / hw / virtio / vhost-backend.c
blob833804dd40f2ec1f2567e719896ed35725a3cca4
1 /*
2 * vhost-backend
4 * Copyright (c) 2013 Virtual Open Systems Sarl.
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
9 */
11 #include "qemu/osdep.h"
12 #include "hw/virtio/vhost.h"
13 #include "hw/virtio/vhost-backend.h"
14 #include "qemu/error-report.h"
15 #include "qemu/main-loop.h"
16 #include "standard-headers/linux/vhost_types.h"
18 #include "hw/virtio/vhost-vdpa.h"
19 #ifdef CONFIG_VHOST_KERNEL
20 #include <linux/vhost.h>
21 #include <sys/ioctl.h>
23 static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int request,
24 void *arg)
26 int fd = (uintptr_t) dev->opaque;
27 int ret;
29 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
31 ret = ioctl(fd, request, arg);
32 return ret < 0 ? -errno : ret;
35 static int vhost_kernel_init(struct vhost_dev *dev, void *opaque, Error **errp)
37 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
39 dev->opaque = opaque;
41 return 0;
44 static int vhost_kernel_cleanup(struct vhost_dev *dev)
46 int fd = (uintptr_t) dev->opaque;
48 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
50 return close(fd) < 0 ? -errno : 0;
53 static int vhost_kernel_memslots_limit(struct vhost_dev *dev)
55 int limit = 64;
56 char *s;
58 if (g_file_get_contents("/sys/module/vhost/parameters/max_mem_regions",
59 &s, NULL, NULL)) {
60 uint64_t val = g_ascii_strtoull(s, NULL, 10);
61 if (val < INT_MAX && val > 0) {
62 g_free(s);
63 return val;
65 error_report("ignoring invalid max_mem_regions value in vhost module:"
66 " %s", s);
68 g_free(s);
69 return limit;
72 static int vhost_kernel_net_set_backend(struct vhost_dev *dev,
73 struct vhost_vring_file *file)
75 return vhost_kernel_call(dev, VHOST_NET_SET_BACKEND, file);
78 static int vhost_kernel_scsi_set_endpoint(struct vhost_dev *dev,
79 struct vhost_scsi_target *target)
81 return vhost_kernel_call(dev, VHOST_SCSI_SET_ENDPOINT, target);
84 static int vhost_kernel_scsi_clear_endpoint(struct vhost_dev *dev,
85 struct vhost_scsi_target *target)
87 return vhost_kernel_call(dev, VHOST_SCSI_CLEAR_ENDPOINT, target);
90 static int vhost_kernel_scsi_get_abi_version(struct vhost_dev *dev, int *version)
92 return vhost_kernel_call(dev, VHOST_SCSI_GET_ABI_VERSION, version);
95 static int vhost_kernel_set_log_base(struct vhost_dev *dev, uint64_t base,
96 struct vhost_log *log)
98 return vhost_kernel_call(dev, VHOST_SET_LOG_BASE, &base);
101 static int vhost_kernel_set_mem_table(struct vhost_dev *dev,
102 struct vhost_memory *mem)
104 return vhost_kernel_call(dev, VHOST_SET_MEM_TABLE, mem);
107 static int vhost_kernel_set_vring_addr(struct vhost_dev *dev,
108 struct vhost_vring_addr *addr)
110 return vhost_kernel_call(dev, VHOST_SET_VRING_ADDR, addr);
113 static int vhost_kernel_set_vring_endian(struct vhost_dev *dev,
114 struct vhost_vring_state *ring)
116 return vhost_kernel_call(dev, VHOST_SET_VRING_ENDIAN, ring);
119 static int vhost_kernel_set_vring_num(struct vhost_dev *dev,
120 struct vhost_vring_state *ring)
122 return vhost_kernel_call(dev, VHOST_SET_VRING_NUM, ring);
125 static int vhost_kernel_set_vring_base(struct vhost_dev *dev,
126 struct vhost_vring_state *ring)
128 return vhost_kernel_call(dev, VHOST_SET_VRING_BASE, ring);
131 static int vhost_kernel_get_vring_base(struct vhost_dev *dev,
132 struct vhost_vring_state *ring)
134 return vhost_kernel_call(dev, VHOST_GET_VRING_BASE, ring);
137 static int vhost_kernel_set_vring_kick(struct vhost_dev *dev,
138 struct vhost_vring_file *file)
140 return vhost_kernel_call(dev, VHOST_SET_VRING_KICK, file);
143 static int vhost_kernel_set_vring_call(struct vhost_dev *dev,
144 struct vhost_vring_file *file)
146 return vhost_kernel_call(dev, VHOST_SET_VRING_CALL, file);
149 static int vhost_kernel_set_vring_err(struct vhost_dev *dev,
150 struct vhost_vring_file *file)
152 return vhost_kernel_call(dev, VHOST_SET_VRING_ERR, file);
155 static int vhost_kernel_set_vring_busyloop_timeout(struct vhost_dev *dev,
156 struct vhost_vring_state *s)
158 return vhost_kernel_call(dev, VHOST_SET_VRING_BUSYLOOP_TIMEOUT, s);
161 static int vhost_kernel_new_worker(struct vhost_dev *dev,
162 struct vhost_worker_state *worker)
164 return vhost_kernel_call(dev, VHOST_NEW_WORKER, worker);
167 static int vhost_kernel_free_worker(struct vhost_dev *dev,
168 struct vhost_worker_state *worker)
170 return vhost_kernel_call(dev, VHOST_FREE_WORKER, worker);
173 static int vhost_kernel_attach_vring_worker(struct vhost_dev *dev,
174 struct vhost_vring_worker *worker)
176 return vhost_kernel_call(dev, VHOST_ATTACH_VRING_WORKER, worker);
179 static int vhost_kernel_get_vring_worker(struct vhost_dev *dev,
180 struct vhost_vring_worker *worker)
182 return vhost_kernel_call(dev, VHOST_GET_VRING_WORKER, worker);
185 static int vhost_kernel_set_features(struct vhost_dev *dev,
186 uint64_t features)
188 return vhost_kernel_call(dev, VHOST_SET_FEATURES, &features);
191 static int vhost_kernel_set_backend_cap(struct vhost_dev *dev)
193 uint64_t features;
194 uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2;
195 int r;
197 if (vhost_kernel_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
198 return 0;
201 features &= f;
202 r = vhost_kernel_call(dev, VHOST_SET_BACKEND_FEATURES,
203 &features);
204 if (r) {
205 return 0;
208 dev->backend_cap = features;
210 return 0;
213 static int vhost_kernel_get_features(struct vhost_dev *dev,
214 uint64_t *features)
216 return vhost_kernel_call(dev, VHOST_GET_FEATURES, features);
219 static int vhost_kernel_set_owner(struct vhost_dev *dev)
221 return vhost_kernel_call(dev, VHOST_SET_OWNER, NULL);
224 static int vhost_kernel_get_vq_index(struct vhost_dev *dev, int idx)
226 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
228 return idx - dev->vq_index;
231 static int vhost_kernel_vsock_set_guest_cid(struct vhost_dev *dev,
232 uint64_t guest_cid)
234 return vhost_kernel_call(dev, VHOST_VSOCK_SET_GUEST_CID, &guest_cid);
237 static int vhost_kernel_vsock_set_running(struct vhost_dev *dev, int start)
239 return vhost_kernel_call(dev, VHOST_VSOCK_SET_RUNNING, &start);
242 static void vhost_kernel_iotlb_read(void *opaque)
244 struct vhost_dev *dev = opaque;
245 ssize_t len;
247 if (dev->backend_cap &
248 (0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)) {
249 struct vhost_msg_v2 msg;
251 while ((len = read((uintptr_t)dev->opaque, &msg, sizeof msg)) > 0) {
252 if (len < sizeof msg) {
253 error_report("Wrong vhost message len: %d", (int)len);
254 break;
256 if (msg.type != VHOST_IOTLB_MSG_V2) {
257 error_report("Unknown vhost iotlb message type");
258 break;
261 vhost_backend_handle_iotlb_msg(dev, &msg.iotlb);
263 } else {
264 struct vhost_msg msg;
266 while ((len = read((uintptr_t)dev->opaque, &msg, sizeof msg)) > 0) {
267 if (len < sizeof msg) {
268 error_report("Wrong vhost message len: %d", (int)len);
269 break;
271 if (msg.type != VHOST_IOTLB_MSG) {
272 error_report("Unknown vhost iotlb message type");
273 break;
276 vhost_backend_handle_iotlb_msg(dev, &msg.iotlb);
281 static int vhost_kernel_send_device_iotlb_msg(struct vhost_dev *dev,
282 struct vhost_iotlb_msg *imsg)
284 if (dev->backend_cap & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)) {
285 struct vhost_msg_v2 msg = {};
287 msg.type = VHOST_IOTLB_MSG_V2;
288 msg.iotlb = *imsg;
290 if (write((uintptr_t)dev->opaque, &msg, sizeof msg) != sizeof msg) {
291 error_report("Fail to update device iotlb");
292 return -EFAULT;
294 } else {
295 struct vhost_msg msg = {};
297 msg.type = VHOST_IOTLB_MSG;
298 msg.iotlb = *imsg;
300 if (write((uintptr_t)dev->opaque, &msg, sizeof msg) != sizeof msg) {
301 error_report("Fail to update device iotlb");
302 return -EFAULT;
306 return 0;
309 static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
310 int enabled)
312 if (enabled)
313 qemu_set_fd_handler((uintptr_t)dev->opaque,
314 vhost_kernel_iotlb_read, NULL, dev);
315 else
316 qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
319 const VhostOps kernel_ops = {
320 .backend_type = VHOST_BACKEND_TYPE_KERNEL,
321 .vhost_backend_init = vhost_kernel_init,
322 .vhost_backend_cleanup = vhost_kernel_cleanup,
323 .vhost_backend_memslots_limit = vhost_kernel_memslots_limit,
324 .vhost_net_set_backend = vhost_kernel_net_set_backend,
325 .vhost_scsi_set_endpoint = vhost_kernel_scsi_set_endpoint,
326 .vhost_scsi_clear_endpoint = vhost_kernel_scsi_clear_endpoint,
327 .vhost_scsi_get_abi_version = vhost_kernel_scsi_get_abi_version,
328 .vhost_set_log_base = vhost_kernel_set_log_base,
329 .vhost_set_mem_table = vhost_kernel_set_mem_table,
330 .vhost_set_vring_addr = vhost_kernel_set_vring_addr,
331 .vhost_set_vring_endian = vhost_kernel_set_vring_endian,
332 .vhost_set_vring_num = vhost_kernel_set_vring_num,
333 .vhost_set_vring_base = vhost_kernel_set_vring_base,
334 .vhost_get_vring_base = vhost_kernel_get_vring_base,
335 .vhost_set_vring_kick = vhost_kernel_set_vring_kick,
336 .vhost_set_vring_call = vhost_kernel_set_vring_call,
337 .vhost_set_vring_err = vhost_kernel_set_vring_err,
338 .vhost_set_vring_busyloop_timeout =
339 vhost_kernel_set_vring_busyloop_timeout,
340 .vhost_get_vring_worker = vhost_kernel_get_vring_worker,
341 .vhost_attach_vring_worker = vhost_kernel_attach_vring_worker,
342 .vhost_new_worker = vhost_kernel_new_worker,
343 .vhost_free_worker = vhost_kernel_free_worker,
344 .vhost_set_features = vhost_kernel_set_features,
345 .vhost_get_features = vhost_kernel_get_features,
346 .vhost_set_backend_cap = vhost_kernel_set_backend_cap,
347 .vhost_set_owner = vhost_kernel_set_owner,
348 .vhost_get_vq_index = vhost_kernel_get_vq_index,
349 .vhost_vsock_set_guest_cid = vhost_kernel_vsock_set_guest_cid,
350 .vhost_vsock_set_running = vhost_kernel_vsock_set_running,
351 .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
352 .vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg,
354 #endif
356 int vhost_backend_update_device_iotlb(struct vhost_dev *dev,
357 uint64_t iova, uint64_t uaddr,
358 uint64_t len,
359 IOMMUAccessFlags perm)
361 struct vhost_iotlb_msg imsg;
363 imsg.iova = iova;
364 imsg.uaddr = uaddr;
365 imsg.size = len;
366 imsg.type = VHOST_IOTLB_UPDATE;
368 switch (perm) {
369 case IOMMU_RO:
370 imsg.perm = VHOST_ACCESS_RO;
371 break;
372 case IOMMU_WO:
373 imsg.perm = VHOST_ACCESS_WO;
374 break;
375 case IOMMU_RW:
376 imsg.perm = VHOST_ACCESS_RW;
377 break;
378 default:
379 return -EINVAL;
382 if (dev->vhost_ops && dev->vhost_ops->vhost_send_device_iotlb_msg)
383 return dev->vhost_ops->vhost_send_device_iotlb_msg(dev, &imsg);
385 return -ENODEV;
388 int vhost_backend_invalidate_device_iotlb(struct vhost_dev *dev,
389 uint64_t iova, uint64_t len)
391 struct vhost_iotlb_msg imsg;
393 imsg.iova = iova;
394 imsg.size = len;
395 imsg.type = VHOST_IOTLB_INVALIDATE;
397 if (dev->vhost_ops && dev->vhost_ops->vhost_send_device_iotlb_msg)
398 return dev->vhost_ops->vhost_send_device_iotlb_msg(dev, &imsg);
400 return -ENODEV;
403 int vhost_backend_handle_iotlb_msg(struct vhost_dev *dev,
404 struct vhost_iotlb_msg *imsg)
406 int ret = 0;
408 if (unlikely(!dev->vdev)) {
409 error_report("Unexpected IOTLB message when virtio device is stopped");
410 return -EINVAL;
413 switch (imsg->type) {
414 case VHOST_IOTLB_MISS:
415 ret = vhost_device_iotlb_miss(dev, imsg->iova,
416 imsg->perm != VHOST_ACCESS_RO);
417 break;
418 case VHOST_IOTLB_ACCESS_FAIL:
419 /* FIXME: report device iotlb error */
420 error_report("Access failure IOTLB message type not supported");
421 ret = -ENOTSUP;
422 break;
423 case VHOST_IOTLB_UPDATE:
424 case VHOST_IOTLB_INVALIDATE:
425 default:
426 error_report("Unexpected IOTLB message type");
427 ret = -EINVAL;
428 break;
431 return ret;