1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2009 Red Hat, Inc.
3 * Author: Michael S. Tsirkin <mst@redhat.com>
5 * test virtio server in host kernel.
8 #include <linux/compat.h>
9 #include <linux/eventfd.h>
10 #include <linux/vhost.h>
11 #include <linux/miscdevice.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/workqueue.h>
15 #include <linux/file.h>
16 #include <linux/slab.h>
21 /* Max number of bytes transferred before requeueing the job.
22 * Using this limit prevents one virtqueue from starving others. */
23 #define VHOST_TEST_WEIGHT 0x80000
25 /* Max number of packets transferred before requeueing the job.
26 * Using this limit prevents one virtqueue from starving others with
29 #define VHOST_TEST_PKT_WEIGHT 256
33 VHOST_TEST_VQ_MAX
= 1,
38 struct vhost_virtqueue vqs
[VHOST_TEST_VQ_MAX
];
41 /* Expects to be always run from workqueue - which acts as
42 * read-size critical section for our kind of RCU. */
43 static void handle_vq(struct vhost_test
*n
)
45 struct vhost_virtqueue
*vq
= &n
->vqs
[VHOST_TEST_VQ
];
48 size_t len
, total_len
= 0;
51 mutex_lock(&vq
->mutex
);
52 private = vhost_vq_get_backend(vq
);
54 mutex_unlock(&vq
->mutex
);
58 vhost_disable_notify(&n
->dev
, vq
);
61 head
= vhost_get_vq_desc(vq
, vq
->iov
,
65 /* On error, stop handling until the next kick. */
66 if (unlikely(head
< 0))
68 /* Nothing new? Wait for eventfd to tell us they refilled. */
69 if (head
== vq
->num
) {
70 if (unlikely(vhost_enable_notify(&n
->dev
, vq
))) {
71 vhost_disable_notify(&n
->dev
, vq
);
77 vq_err(vq
, "Unexpected descriptor format for TX: "
78 "out %d, int %d\n", out
, in
);
81 len
= iov_length(vq
->iov
, out
);
84 vq_err(vq
, "Unexpected 0 len for TX\n");
87 vhost_add_used_and_signal(&n
->dev
, vq
, head
, 0);
89 if (unlikely(vhost_exceeds_weight(vq
, 0, total_len
)))
93 mutex_unlock(&vq
->mutex
);
96 static void handle_vq_kick(struct vhost_work
*work
)
98 struct vhost_virtqueue
*vq
= container_of(work
, struct vhost_virtqueue
,
100 struct vhost_test
*n
= container_of(vq
->dev
, struct vhost_test
, dev
);
105 static int vhost_test_open(struct inode
*inode
, struct file
*f
)
107 struct vhost_test
*n
= kmalloc(sizeof *n
, GFP_KERNEL
);
108 struct vhost_dev
*dev
;
109 struct vhost_virtqueue
**vqs
;
113 vqs
= kmalloc_array(VHOST_TEST_VQ_MAX
, sizeof(*vqs
), GFP_KERNEL
);
120 vqs
[VHOST_TEST_VQ
] = &n
->vqs
[VHOST_TEST_VQ
];
121 n
->vqs
[VHOST_TEST_VQ
].handle_kick
= handle_vq_kick
;
122 vhost_dev_init(dev
, vqs
, VHOST_TEST_VQ_MAX
, UIO_MAXIOV
,
123 VHOST_TEST_PKT_WEIGHT
, VHOST_TEST_WEIGHT
, true, NULL
);
130 static void *vhost_test_stop_vq(struct vhost_test
*n
,
131 struct vhost_virtqueue
*vq
)
135 mutex_lock(&vq
->mutex
);
136 private = vhost_vq_get_backend(vq
);
137 vhost_vq_set_backend(vq
, NULL
);
138 mutex_unlock(&vq
->mutex
);
142 static void vhost_test_stop(struct vhost_test
*n
, void **privatep
)
144 *privatep
= vhost_test_stop_vq(n
, n
->vqs
+ VHOST_TEST_VQ
);
147 static void vhost_test_flush_vq(struct vhost_test
*n
, int index
)
149 vhost_poll_flush(&n
->vqs
[index
].poll
);
152 static void vhost_test_flush(struct vhost_test
*n
)
154 vhost_test_flush_vq(n
, VHOST_TEST_VQ
);
157 static int vhost_test_release(struct inode
*inode
, struct file
*f
)
159 struct vhost_test
*n
= f
->private_data
;
162 vhost_test_stop(n
, &private);
164 vhost_dev_stop(&n
->dev
);
165 vhost_dev_cleanup(&n
->dev
);
166 /* We do an extra flush before freeing memory,
167 * since jobs can re-queue themselves. */
173 static long vhost_test_run(struct vhost_test
*n
, int test
)
175 void *priv
, *oldpriv
;
176 struct vhost_virtqueue
*vq
;
179 if (test
< 0 || test
> 1)
182 mutex_lock(&n
->dev
.mutex
);
183 r
= vhost_dev_check_owner(&n
->dev
);
187 for (index
= 0; index
< n
->dev
.nvqs
; ++index
) {
188 /* Verify that ring has been setup correctly. */
189 if (!vhost_vq_access_ok(&n
->vqs
[index
])) {
195 for (index
= 0; index
< n
->dev
.nvqs
; ++index
) {
197 mutex_lock(&vq
->mutex
);
198 priv
= test
? n
: NULL
;
200 /* start polling new socket */
201 oldpriv
= vhost_vq_get_backend(vq
);
202 vhost_vq_set_backend(vq
, priv
);
204 r
= vhost_vq_init_access(&n
->vqs
[index
]);
206 mutex_unlock(&vq
->mutex
);
212 vhost_test_flush_vq(n
, index
);
216 mutex_unlock(&n
->dev
.mutex
);
220 mutex_unlock(&n
->dev
.mutex
);
224 static long vhost_test_reset_owner(struct vhost_test
*n
)
228 struct vhost_iotlb
*umem
;
230 mutex_lock(&n
->dev
.mutex
);
231 err
= vhost_dev_check_owner(&n
->dev
);
234 umem
= vhost_dev_reset_owner_prepare();
239 vhost_test_stop(n
, &priv
);
241 vhost_dev_stop(&n
->dev
);
242 vhost_dev_reset_owner(&n
->dev
, umem
);
244 mutex_unlock(&n
->dev
.mutex
);
248 static int vhost_test_set_features(struct vhost_test
*n
, u64 features
)
250 struct vhost_virtqueue
*vq
;
252 mutex_lock(&n
->dev
.mutex
);
253 if ((features
& (1 << VHOST_F_LOG_ALL
)) &&
254 !vhost_log_access_ok(&n
->dev
)) {
255 mutex_unlock(&n
->dev
.mutex
);
258 vq
= &n
->vqs
[VHOST_TEST_VQ
];
259 mutex_lock(&vq
->mutex
);
260 vq
->acked_features
= features
;
261 mutex_unlock(&vq
->mutex
);
262 mutex_unlock(&n
->dev
.mutex
);
266 static long vhost_test_set_backend(struct vhost_test
*n
, unsigned index
, int fd
)
268 static void *backend
;
270 const bool enable
= fd
!= -1;
271 struct vhost_virtqueue
*vq
;
274 mutex_lock(&n
->dev
.mutex
);
275 r
= vhost_dev_check_owner(&n
->dev
);
279 if (index
>= VHOST_TEST_VQ_MAX
) {
284 mutex_lock(&vq
->mutex
);
286 /* Verify that ring has been setup correctly. */
287 if (!vhost_vq_access_ok(vq
)) {
292 vhost_poll_stop(&vq
->poll
);
293 backend
= vhost_vq_get_backend(vq
);
294 vhost_vq_set_backend(vq
, NULL
);
296 vhost_vq_set_backend(vq
, backend
);
297 r
= vhost_vq_init_access(vq
);
299 r
= vhost_poll_start(&vq
->poll
, vq
->kick
);
302 mutex_unlock(&vq
->mutex
);
305 vhost_test_flush_vq(n
, index
);
308 mutex_unlock(&n
->dev
.mutex
);
312 mutex_unlock(&vq
->mutex
);
314 mutex_unlock(&n
->dev
.mutex
);
318 static long vhost_test_ioctl(struct file
*f
, unsigned int ioctl
,
321 struct vhost_vring_file backend
;
322 struct vhost_test
*n
= f
->private_data
;
323 void __user
*argp
= (void __user
*)arg
;
324 u64 __user
*featurep
= argp
;
330 if (copy_from_user(&test
, argp
, sizeof test
))
332 return vhost_test_run(n
, test
);
333 case VHOST_TEST_SET_BACKEND
:
334 if (copy_from_user(&backend
, argp
, sizeof backend
))
336 return vhost_test_set_backend(n
, backend
.index
, backend
.fd
);
337 case VHOST_GET_FEATURES
:
338 features
= VHOST_FEATURES
;
339 if (copy_to_user(featurep
, &features
, sizeof features
))
342 case VHOST_SET_FEATURES
:
343 printk(KERN_ERR
"1\n");
344 if (copy_from_user(&features
, featurep
, sizeof features
))
346 printk(KERN_ERR
"2\n");
347 if (features
& ~VHOST_FEATURES
)
349 printk(KERN_ERR
"3\n");
350 return vhost_test_set_features(n
, features
);
351 case VHOST_RESET_OWNER
:
352 return vhost_test_reset_owner(n
);
354 mutex_lock(&n
->dev
.mutex
);
355 r
= vhost_dev_ioctl(&n
->dev
, ioctl
, argp
);
356 if (r
== -ENOIOCTLCMD
)
357 r
= vhost_vring_ioctl(&n
->dev
, ioctl
, argp
);
359 mutex_unlock(&n
->dev
.mutex
);
364 static const struct file_operations vhost_test_fops
= {
365 .owner
= THIS_MODULE
,
366 .release
= vhost_test_release
,
367 .unlocked_ioctl
= vhost_test_ioctl
,
368 .compat_ioctl
= compat_ptr_ioctl
,
369 .open
= vhost_test_open
,
370 .llseek
= noop_llseek
,
373 static struct miscdevice vhost_test_misc
= {
378 module_misc_device(vhost_test_misc
);
380 MODULE_VERSION("0.0.1");
381 MODULE_LICENSE("GPL v2");
382 MODULE_AUTHOR("Michael S. Tsirkin");
383 MODULE_DESCRIPTION("Host kernel side for virtio simulator");