1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2009 Red Hat, Inc.
3 * Author: Michael S. Tsirkin <mst@redhat.com>
5 * virtio-net server in host kernel.
8 #include <linux/compat.h>
9 #include <linux/eventfd.h>
10 #include <linux/vhost.h>
11 #include <linux/virtio_net.h>
12 #include <linux/miscdevice.h>
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/mutex.h>
16 #include <linux/workqueue.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
19 #include <linux/sched/clock.h>
20 #include <linux/sched/signal.h>
21 #include <linux/vmalloc.h>
23 #include <linux/net.h>
24 #include <linux/if_packet.h>
25 #include <linux/if_arp.h>
26 #include <linux/if_tun.h>
27 #include <linux/if_macvlan.h>
28 #include <linux/if_tap.h>
29 #include <linux/if_vlan.h>
30 #include <linux/skb_array.h>
31 #include <linux/skbuff.h>
38 static int experimental_zcopytx
= 0;
39 module_param(experimental_zcopytx
, int, 0444);
40 MODULE_PARM_DESC(experimental_zcopytx
, "Enable Zero Copy TX;"
41 " 1 -Enable; 0 - Disable");
43 /* Max number of bytes transferred before requeueing the job.
44 * Using this limit prevents one virtqueue from starving others. */
45 #define VHOST_NET_WEIGHT 0x80000
47 /* Max number of packets transferred before requeueing the job.
48 * Using this limit prevents one virtqueue from starving others with small
51 #define VHOST_NET_PKT_WEIGHT 256
53 /* MAX number of TX used buffers for outstanding zerocopy */
54 #define VHOST_MAX_PEND 128
55 #define VHOST_GOODCOPY_LEN 256
58 * For transmit, used buffer len is unused; we override it to track buffer
59 * status internally; used for zerocopy tx only.
61 /* Lower device DMA failed */
62 #define VHOST_DMA_FAILED_LEN ((__force __virtio32)3)
63 /* Lower device DMA done */
64 #define VHOST_DMA_DONE_LEN ((__force __virtio32)2)
65 /* Lower device DMA in progress */
66 #define VHOST_DMA_IN_PROGRESS ((__force __virtio32)1)
68 #define VHOST_DMA_CLEAR_LEN ((__force __virtio32)0)
70 #define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN)
73 VHOST_NET_FEATURES
= VHOST_FEATURES
|
74 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR
) |
75 (1ULL << VIRTIO_NET_F_MRG_RXBUF
) |
76 (1ULL << VIRTIO_F_ACCESS_PLATFORM
) |
77 (1ULL << VIRTIO_F_RING_RESET
)
81 VHOST_NET_BACKEND_FEATURES
= (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2
)
90 struct vhost_net_ubuf_ref
{
91 /* refcount follows semantics similar to kref:
92 * 0: object is released
93 * 1: no outstanding ubufs
94 * >1: outstanding ubufs
97 wait_queue_head_t wait
;
98 struct vhost_virtqueue
*vq
;
101 #define VHOST_NET_BATCH 64
102 struct vhost_net_buf
{
108 struct vhost_net_virtqueue
{
109 struct vhost_virtqueue vq
;
112 /* vhost zerocopy support fields below: */
113 /* last used idx for outstanding DMA zerocopy buffers */
115 /* For TX, first used idx for DMA done zerocopy buffers
116 * For RX, number of batched heads
119 /* Number of XDP frames batched */
121 /* an array of userspace buffers info */
122 struct ubuf_info_msgzc
*ubuf_info
;
123 /* Reference counting for outstanding ubufs.
124 * Protected by vq mutex. Writers must also take device mutex. */
125 struct vhost_net_ubuf_ref
*ubufs
;
126 struct ptr_ring
*rx_ring
;
127 struct vhost_net_buf rxq
;
128 /* Batched XDP buffs */
129 struct xdp_buff
*xdp
;
133 struct vhost_dev dev
;
134 struct vhost_net_virtqueue vqs
[VHOST_NET_VQ_MAX
];
135 struct vhost_poll poll
[VHOST_NET_VQ_MAX
];
136 /* Number of TX recently submitted.
137 * Protected by tx vq lock. */
139 /* Number of times zerocopy TX recently failed.
140 * Protected by tx vq lock. */
141 unsigned tx_zcopy_err
;
142 /* Flush in progress. Protected by tx vq lock. */
144 /* Private page frag cache */
145 struct page_frag_cache pf_cache
;
148 static unsigned vhost_net_zcopy_mask __read_mostly
;
150 static void *vhost_net_buf_get_ptr(struct vhost_net_buf
*rxq
)
152 if (rxq
->tail
!= rxq
->head
)
153 return rxq
->queue
[rxq
->head
];
158 static int vhost_net_buf_get_size(struct vhost_net_buf
*rxq
)
160 return rxq
->tail
- rxq
->head
;
163 static int vhost_net_buf_is_empty(struct vhost_net_buf
*rxq
)
165 return rxq
->tail
== rxq
->head
;
168 static void *vhost_net_buf_consume(struct vhost_net_buf
*rxq
)
170 void *ret
= vhost_net_buf_get_ptr(rxq
);
175 static int vhost_net_buf_produce(struct vhost_net_virtqueue
*nvq
)
177 struct vhost_net_buf
*rxq
= &nvq
->rxq
;
180 rxq
->tail
= ptr_ring_consume_batched(nvq
->rx_ring
, rxq
->queue
,
185 static void vhost_net_buf_unproduce(struct vhost_net_virtqueue
*nvq
)
187 struct vhost_net_buf
*rxq
= &nvq
->rxq
;
189 if (nvq
->rx_ring
&& !vhost_net_buf_is_empty(rxq
)) {
190 ptr_ring_unconsume(nvq
->rx_ring
, rxq
->queue
+ rxq
->head
,
191 vhost_net_buf_get_size(rxq
),
193 rxq
->head
= rxq
->tail
= 0;
197 static int vhost_net_buf_peek_len(void *ptr
)
199 if (tun_is_xdp_frame(ptr
)) {
200 struct xdp_frame
*xdpf
= tun_ptr_to_xdp(ptr
);
205 return __skb_array_len_with_tag(ptr
);
208 static int vhost_net_buf_peek(struct vhost_net_virtqueue
*nvq
)
210 struct vhost_net_buf
*rxq
= &nvq
->rxq
;
212 if (!vhost_net_buf_is_empty(rxq
))
215 if (!vhost_net_buf_produce(nvq
))
219 return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq
));
222 static void vhost_net_buf_init(struct vhost_net_buf
*rxq
)
224 rxq
->head
= rxq
->tail
= 0;
227 static void vhost_net_enable_zcopy(int vq
)
229 vhost_net_zcopy_mask
|= 0x1 << vq
;
232 static struct vhost_net_ubuf_ref
*
233 vhost_net_ubuf_alloc(struct vhost_virtqueue
*vq
, bool zcopy
)
235 struct vhost_net_ubuf_ref
*ubufs
;
236 /* No zero copy backend? Nothing to count. */
239 ubufs
= kmalloc(sizeof(*ubufs
), GFP_KERNEL
);
241 return ERR_PTR(-ENOMEM
);
242 atomic_set(&ubufs
->refcount
, 1);
243 init_waitqueue_head(&ubufs
->wait
);
248 static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref
*ubufs
)
250 int r
= atomic_sub_return(1, &ubufs
->refcount
);
252 wake_up(&ubufs
->wait
);
256 static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref
*ubufs
)
258 vhost_net_ubuf_put(ubufs
);
259 wait_event(ubufs
->wait
, !atomic_read(&ubufs
->refcount
));
262 static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref
*ubufs
)
264 vhost_net_ubuf_put_and_wait(ubufs
);
268 static void vhost_net_clear_ubuf_info(struct vhost_net
*n
)
272 for (i
= 0; i
< VHOST_NET_VQ_MAX
; ++i
) {
273 kfree(n
->vqs
[i
].ubuf_info
);
274 n
->vqs
[i
].ubuf_info
= NULL
;
278 static int vhost_net_set_ubuf_info(struct vhost_net
*n
)
283 for (i
= 0; i
< VHOST_NET_VQ_MAX
; ++i
) {
284 zcopy
= vhost_net_zcopy_mask
& (0x1 << i
);
287 n
->vqs
[i
].ubuf_info
=
288 kmalloc_array(UIO_MAXIOV
,
289 sizeof(*n
->vqs
[i
].ubuf_info
),
291 if (!n
->vqs
[i
].ubuf_info
)
297 vhost_net_clear_ubuf_info(n
);
301 static void vhost_net_vq_reset(struct vhost_net
*n
)
305 vhost_net_clear_ubuf_info(n
);
307 for (i
= 0; i
< VHOST_NET_VQ_MAX
; i
++) {
308 n
->vqs
[i
].done_idx
= 0;
309 n
->vqs
[i
].upend_idx
= 0;
310 n
->vqs
[i
].ubufs
= NULL
;
311 n
->vqs
[i
].vhost_hlen
= 0;
312 n
->vqs
[i
].sock_hlen
= 0;
313 vhost_net_buf_init(&n
->vqs
[i
].rxq
);
318 static void vhost_net_tx_packet(struct vhost_net
*net
)
321 if (net
->tx_packets
< 1024)
324 net
->tx_zcopy_err
= 0;
327 static void vhost_net_tx_err(struct vhost_net
*net
)
332 static bool vhost_net_tx_select_zcopy(struct vhost_net
*net
)
334 /* TX flush waits for outstanding DMAs to be done.
335 * Don't start new DMAs.
337 return !net
->tx_flush
&&
338 net
->tx_packets
/ 64 >= net
->tx_zcopy_err
;
341 static bool vhost_sock_zcopy(struct socket
*sock
)
343 return unlikely(experimental_zcopytx
) &&
344 sock_flag(sock
->sk
, SOCK_ZEROCOPY
);
347 static bool vhost_sock_xdp(struct socket
*sock
)
349 return sock_flag(sock
->sk
, SOCK_XDP
);
352 /* In case of DMA done not in order in lower device driver for some reason.
353 * upend_idx is used to track end of used idx, done_idx is used to track head
354 * of used idx. Once lower device DMA done contiguously, we will signal KVM
357 static void vhost_zerocopy_signal_used(struct vhost_net
*net
,
358 struct vhost_virtqueue
*vq
)
360 struct vhost_net_virtqueue
*nvq
=
361 container_of(vq
, struct vhost_net_virtqueue
, vq
);
365 for (i
= nvq
->done_idx
; i
!= nvq
->upend_idx
; i
= (i
+ 1) % UIO_MAXIOV
) {
366 if (vq
->heads
[i
].len
== VHOST_DMA_FAILED_LEN
)
367 vhost_net_tx_err(net
);
368 if (VHOST_DMA_IS_DONE(vq
->heads
[i
].len
)) {
369 vq
->heads
[i
].len
= VHOST_DMA_CLEAR_LEN
;
375 add
= min(UIO_MAXIOV
- nvq
->done_idx
, j
);
376 vhost_add_used_and_signal_n(vq
->dev
, vq
,
377 &vq
->heads
[nvq
->done_idx
], add
);
378 nvq
->done_idx
= (nvq
->done_idx
+ add
) % UIO_MAXIOV
;
383 static void vhost_zerocopy_complete(struct sk_buff
*skb
,
384 struct ubuf_info
*ubuf_base
, bool success
)
386 struct ubuf_info_msgzc
*ubuf
= uarg_to_msgzc(ubuf_base
);
387 struct vhost_net_ubuf_ref
*ubufs
= ubuf
->ctx
;
388 struct vhost_virtqueue
*vq
= ubufs
->vq
;
393 /* set len to mark this desc buffers done DMA */
394 vq
->heads
[ubuf
->desc
].len
= success
?
395 VHOST_DMA_DONE_LEN
: VHOST_DMA_FAILED_LEN
;
396 cnt
= vhost_net_ubuf_put(ubufs
);
399 * Trigger polling thread if guest stopped submitting new buffers:
400 * in this case, the refcount after decrement will eventually reach 1.
401 * We also trigger polling periodically after each 16 packets
402 * (the value 16 here is more or less arbitrary, it's tuned to trigger
403 * less than 10% of times).
405 if (cnt
<= 1 || !(cnt
% 16))
406 vhost_poll_queue(&vq
->poll
);
408 rcu_read_unlock_bh();
411 static const struct ubuf_info_ops vhost_ubuf_ops
= {
412 .complete
= vhost_zerocopy_complete
,
415 static inline unsigned long busy_clock(void)
417 return local_clock() >> 10;
420 static bool vhost_can_busy_poll(unsigned long endtime
)
422 return likely(!need_resched() && !time_after(busy_clock(), endtime
) &&
423 !signal_pending(current
));
426 static void vhost_net_disable_vq(struct vhost_net
*n
,
427 struct vhost_virtqueue
*vq
)
429 struct vhost_net_virtqueue
*nvq
=
430 container_of(vq
, struct vhost_net_virtqueue
, vq
);
431 struct vhost_poll
*poll
= n
->poll
+ (nvq
- n
->vqs
);
432 if (!vhost_vq_get_backend(vq
))
434 vhost_poll_stop(poll
);
437 static int vhost_net_enable_vq(struct vhost_net
*n
,
438 struct vhost_virtqueue
*vq
)
440 struct vhost_net_virtqueue
*nvq
=
441 container_of(vq
, struct vhost_net_virtqueue
, vq
);
442 struct vhost_poll
*poll
= n
->poll
+ (nvq
- n
->vqs
);
445 sock
= vhost_vq_get_backend(vq
);
449 return vhost_poll_start(poll
, sock
->file
);
452 static void vhost_net_signal_used(struct vhost_net_virtqueue
*nvq
)
454 struct vhost_virtqueue
*vq
= &nvq
->vq
;
455 struct vhost_dev
*dev
= vq
->dev
;
460 vhost_add_used_and_signal_n(dev
, vq
, vq
->heads
, nvq
->done_idx
);
464 static void vhost_tx_batch(struct vhost_net
*net
,
465 struct vhost_net_virtqueue
*nvq
,
467 struct msghdr
*msghdr
)
469 struct tun_msg_ctl ctl
= {
471 .num
= nvq
->batched_xdp
,
476 if (nvq
->batched_xdp
== 0)
479 msghdr
->msg_control
= &ctl
;
480 msghdr
->msg_controllen
= sizeof(ctl
);
481 err
= sock
->ops
->sendmsg(sock
, msghdr
, 0);
482 if (unlikely(err
< 0)) {
483 vq_err(&nvq
->vq
, "Fail to batch sending packets\n");
485 /* free pages owned by XDP; since this is an unlikely error path,
486 * keep it simple and avoid more complex bulk update for the
489 for (i
= 0; i
< nvq
->batched_xdp
; ++i
)
490 put_page(virt_to_head_page(nvq
->xdp
[i
].data
));
491 nvq
->batched_xdp
= 0;
497 vhost_net_signal_used(nvq
);
498 nvq
->batched_xdp
= 0;
501 static int sock_has_rx_data(struct socket
*sock
)
506 if (sock
->ops
->peek_len
)
507 return sock
->ops
->peek_len(sock
);
509 return skb_queue_empty(&sock
->sk
->sk_receive_queue
);
512 static void vhost_net_busy_poll_try_queue(struct vhost_net
*net
,
513 struct vhost_virtqueue
*vq
)
515 if (!vhost_vq_avail_empty(&net
->dev
, vq
)) {
516 vhost_poll_queue(&vq
->poll
);
517 } else if (unlikely(vhost_enable_notify(&net
->dev
, vq
))) {
518 vhost_disable_notify(&net
->dev
, vq
);
519 vhost_poll_queue(&vq
->poll
);
523 static void vhost_net_busy_poll(struct vhost_net
*net
,
524 struct vhost_virtqueue
*rvq
,
525 struct vhost_virtqueue
*tvq
,
529 unsigned long busyloop_timeout
;
530 unsigned long endtime
;
532 struct vhost_virtqueue
*vq
= poll_rx
? tvq
: rvq
;
534 /* Try to hold the vq mutex of the paired virtqueue. We can't
535 * use mutex_lock() here since we could not guarantee a
536 * consistenet lock ordering.
538 if (!mutex_trylock(&vq
->mutex
))
541 vhost_disable_notify(&net
->dev
, vq
);
542 sock
= vhost_vq_get_backend(rvq
);
544 busyloop_timeout
= poll_rx
? rvq
->busyloop_timeout
:
545 tvq
->busyloop_timeout
;
548 endtime
= busy_clock() + busyloop_timeout
;
550 while (vhost_can_busy_poll(endtime
)) {
551 if (vhost_vq_has_work(vq
)) {
552 *busyloop_intr
= true;
556 if ((sock_has_rx_data(sock
) &&
557 !vhost_vq_avail_empty(&net
->dev
, rvq
)) ||
558 !vhost_vq_avail_empty(&net
->dev
, tvq
))
566 if (poll_rx
|| sock_has_rx_data(sock
))
567 vhost_net_busy_poll_try_queue(net
, vq
);
568 else if (!poll_rx
) /* On tx here, sock has no rx data. */
569 vhost_enable_notify(&net
->dev
, rvq
);
571 mutex_unlock(&vq
->mutex
);
574 static int vhost_net_tx_get_vq_desc(struct vhost_net
*net
,
575 struct vhost_net_virtqueue
*tnvq
,
576 unsigned int *out_num
, unsigned int *in_num
,
577 struct msghdr
*msghdr
, bool *busyloop_intr
)
579 struct vhost_net_virtqueue
*rnvq
= &net
->vqs
[VHOST_NET_VQ_RX
];
580 struct vhost_virtqueue
*rvq
= &rnvq
->vq
;
581 struct vhost_virtqueue
*tvq
= &tnvq
->vq
;
583 int r
= vhost_get_vq_desc(tvq
, tvq
->iov
, ARRAY_SIZE(tvq
->iov
),
584 out_num
, in_num
, NULL
, NULL
);
586 if (r
== tvq
->num
&& tvq
->busyloop_timeout
) {
587 /* Flush batched packets first */
588 if (!vhost_sock_zcopy(vhost_vq_get_backend(tvq
)))
589 vhost_tx_batch(net
, tnvq
,
590 vhost_vq_get_backend(tvq
),
593 vhost_net_busy_poll(net
, rvq
, tvq
, busyloop_intr
, false);
595 r
= vhost_get_vq_desc(tvq
, tvq
->iov
, ARRAY_SIZE(tvq
->iov
),
596 out_num
, in_num
, NULL
, NULL
);
602 static bool vhost_exceeds_maxpend(struct vhost_net
*net
)
604 struct vhost_net_virtqueue
*nvq
= &net
->vqs
[VHOST_NET_VQ_TX
];
605 struct vhost_virtqueue
*vq
= &nvq
->vq
;
607 return (nvq
->upend_idx
+ UIO_MAXIOV
- nvq
->done_idx
) % UIO_MAXIOV
>
608 min_t(unsigned int, VHOST_MAX_PEND
, vq
->num
>> 2);
611 static size_t init_iov_iter(struct vhost_virtqueue
*vq
, struct iov_iter
*iter
,
612 size_t hdr_size
, int out
)
614 /* Skip header. TODO: support TSO. */
615 size_t len
= iov_length(vq
->iov
, out
);
617 iov_iter_init(iter
, ITER_SOURCE
, vq
->iov
, out
, len
);
618 iov_iter_advance(iter
, hdr_size
);
620 return iov_iter_count(iter
);
623 static int get_tx_bufs(struct vhost_net
*net
,
624 struct vhost_net_virtqueue
*nvq
,
626 unsigned int *out
, unsigned int *in
,
627 size_t *len
, bool *busyloop_intr
)
629 struct vhost_virtqueue
*vq
= &nvq
->vq
;
632 ret
= vhost_net_tx_get_vq_desc(net
, nvq
, out
, in
, msg
, busyloop_intr
);
634 if (ret
< 0 || ret
== vq
->num
)
638 vq_err(vq
, "Unexpected descriptor format for TX: out %d, int %d\n",
644 *len
= init_iov_iter(vq
, &msg
->msg_iter
, nvq
->vhost_hlen
, *out
);
646 vq_err(vq
, "Unexpected header len for TX: %zd expected %zd\n",
647 *len
, nvq
->vhost_hlen
);
654 static bool tx_can_batch(struct vhost_virtqueue
*vq
, size_t total_len
)
656 return total_len
< VHOST_NET_WEIGHT
&&
657 !vhost_vq_avail_empty(vq
->dev
, vq
);
660 #define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
662 static int vhost_net_build_xdp(struct vhost_net_virtqueue
*nvq
,
663 struct iov_iter
*from
)
665 struct vhost_virtqueue
*vq
= &nvq
->vq
;
666 struct vhost_net
*net
= container_of(vq
->dev
, struct vhost_net
,
668 struct socket
*sock
= vhost_vq_get_backend(vq
);
669 struct virtio_net_hdr
*gso
;
670 struct xdp_buff
*xdp
= &nvq
->xdp
[nvq
->batched_xdp
];
671 struct tun_xdp_hdr
*hdr
;
672 size_t len
= iov_iter_count(from
);
673 int headroom
= vhost_sock_xdp(sock
) ? XDP_PACKET_HEADROOM
: 0;
674 int buflen
= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
675 int pad
= SKB_DATA_ALIGN(VHOST_NET_RX_PAD
+ headroom
+ nvq
->sock_hlen
);
676 int sock_hlen
= nvq
->sock_hlen
;
681 if (unlikely(len
< nvq
->sock_hlen
))
684 if (SKB_DATA_ALIGN(len
+ pad
) +
685 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)) > PAGE_SIZE
)
688 buflen
+= SKB_DATA_ALIGN(len
+ pad
);
689 buf
= page_frag_alloc_align(&net
->pf_cache
, buflen
, GFP_KERNEL
,
694 copied
= copy_from_iter(buf
+ offsetof(struct tun_xdp_hdr
, gso
),
696 if (copied
!= sock_hlen
) {
707 if ((gso
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) &&
708 vhost16_to_cpu(vq
, gso
->csum_start
) +
709 vhost16_to_cpu(vq
, gso
->csum_offset
) + 2 >
710 vhost16_to_cpu(vq
, gso
->hdr_len
)) {
711 gso
->hdr_len
= cpu_to_vhost16(vq
,
712 vhost16_to_cpu(vq
, gso
->csum_start
) +
713 vhost16_to_cpu(vq
, gso
->csum_offset
) + 2);
715 if (vhost16_to_cpu(vq
, gso
->hdr_len
) > len
) {
722 copied
= copy_from_iter(buf
+ pad
, len
, from
);
728 xdp_init_buff(xdp
, buflen
, NULL
);
729 xdp_prepare_buff(xdp
, buf
, pad
, len
, true);
730 hdr
->buflen
= buflen
;
741 static void handle_tx_copy(struct vhost_net
*net
, struct socket
*sock
)
743 struct vhost_net_virtqueue
*nvq
= &net
->vqs
[VHOST_NET_VQ_TX
];
744 struct vhost_virtqueue
*vq
= &nvq
->vq
;
747 struct msghdr msg
= {
752 .msg_flags
= MSG_DONTWAIT
,
754 size_t len
, total_len
= 0;
757 bool sock_can_batch
= (sock
->sk
->sk_sndbuf
== INT_MAX
);
760 bool busyloop_intr
= false;
762 if (nvq
->done_idx
== VHOST_NET_BATCH
)
763 vhost_tx_batch(net
, nvq
, sock
, &msg
);
765 head
= get_tx_bufs(net
, nvq
, &msg
, &out
, &in
, &len
,
767 /* On error, stop handling until the next kick. */
768 if (unlikely(head
< 0))
770 /* Nothing new? Wait for eventfd to tell us they refilled. */
771 if (head
== vq
->num
) {
772 if (unlikely(busyloop_intr
)) {
773 vhost_poll_queue(&vq
->poll
);
774 } else if (unlikely(vhost_enable_notify(&net
->dev
,
776 vhost_disable_notify(&net
->dev
, vq
);
784 /* For simplicity, TX batching is only enabled if
785 * sndbuf is unlimited.
787 if (sock_can_batch
) {
788 err
= vhost_net_build_xdp(nvq
, &msg
.msg_iter
);
791 } else if (unlikely(err
!= -ENOSPC
)) {
792 vhost_tx_batch(net
, nvq
, sock
, &msg
);
793 vhost_discard_vq_desc(vq
, 1);
794 vhost_net_enable_vq(net
, vq
);
798 /* We can't build XDP buff, go for single
799 * packet path but let's flush batched
802 vhost_tx_batch(net
, nvq
, sock
, &msg
);
803 msg
.msg_control
= NULL
;
805 if (tx_can_batch(vq
, total_len
))
806 msg
.msg_flags
|= MSG_MORE
;
808 msg
.msg_flags
&= ~MSG_MORE
;
811 err
= sock
->ops
->sendmsg(sock
, &msg
, len
);
812 if (unlikely(err
< 0)) {
813 if (err
== -EAGAIN
|| err
== -ENOMEM
|| err
== -ENOBUFS
) {
814 vhost_discard_vq_desc(vq
, 1);
815 vhost_net_enable_vq(net
, vq
);
818 pr_debug("Fail to send packet: err %d", err
);
819 } else if (unlikely(err
!= len
))
820 pr_debug("Truncated TX packet: len %d != %zd\n",
823 vq
->heads
[nvq
->done_idx
].id
= cpu_to_vhost32(vq
, head
);
824 vq
->heads
[nvq
->done_idx
].len
= 0;
826 } while (likely(!vhost_exceeds_weight(vq
, ++sent_pkts
, total_len
)));
828 vhost_tx_batch(net
, nvq
, sock
, &msg
);
831 static void handle_tx_zerocopy(struct vhost_net
*net
, struct socket
*sock
)
833 struct vhost_net_virtqueue
*nvq
= &net
->vqs
[VHOST_NET_VQ_TX
];
834 struct vhost_virtqueue
*vq
= &nvq
->vq
;
837 struct msghdr msg
= {
842 .msg_flags
= MSG_DONTWAIT
,
844 struct tun_msg_ctl ctl
;
845 size_t len
, total_len
= 0;
847 struct vhost_net_ubuf_ref
*ubufs
;
848 struct ubuf_info_msgzc
*ubuf
;
855 /* Release DMAs done buffers first */
856 vhost_zerocopy_signal_used(net
, vq
);
858 busyloop_intr
= false;
859 head
= get_tx_bufs(net
, nvq
, &msg
, &out
, &in
, &len
,
861 /* On error, stop handling until the next kick. */
862 if (unlikely(head
< 0))
864 /* Nothing new? Wait for eventfd to tell us they refilled. */
865 if (head
== vq
->num
) {
866 if (unlikely(busyloop_intr
)) {
867 vhost_poll_queue(&vq
->poll
);
868 } else if (unlikely(vhost_enable_notify(&net
->dev
, vq
))) {
869 vhost_disable_notify(&net
->dev
, vq
);
875 zcopy_used
= len
>= VHOST_GOODCOPY_LEN
876 && !vhost_exceeds_maxpend(net
)
877 && vhost_net_tx_select_zcopy(net
);
879 /* use msg_control to pass vhost zerocopy ubuf info to skb */
881 ubuf
= nvq
->ubuf_info
+ nvq
->upend_idx
;
882 vq
->heads
[nvq
->upend_idx
].id
= cpu_to_vhost32(vq
, head
);
883 vq
->heads
[nvq
->upend_idx
].len
= VHOST_DMA_IN_PROGRESS
;
884 ubuf
->ctx
= nvq
->ubufs
;
885 ubuf
->desc
= nvq
->upend_idx
;
886 ubuf
->ubuf
.ops
= &vhost_ubuf_ops
;
887 ubuf
->ubuf
.flags
= SKBFL_ZEROCOPY_FRAG
;
888 refcount_set(&ubuf
->ubuf
.refcnt
, 1);
889 msg
.msg_control
= &ctl
;
890 ctl
.type
= TUN_MSG_UBUF
;
891 ctl
.ptr
= &ubuf
->ubuf
;
892 msg
.msg_controllen
= sizeof(ctl
);
894 atomic_inc(&ubufs
->refcount
);
895 nvq
->upend_idx
= (nvq
->upend_idx
+ 1) % UIO_MAXIOV
;
897 msg
.msg_control
= NULL
;
901 if (tx_can_batch(vq
, total_len
) &&
902 likely(!vhost_exceeds_maxpend(net
))) {
903 msg
.msg_flags
|= MSG_MORE
;
905 msg
.msg_flags
&= ~MSG_MORE
;
908 err
= sock
->ops
->sendmsg(sock
, &msg
, len
);
909 if (unlikely(err
< 0)) {
910 bool retry
= err
== -EAGAIN
|| err
== -ENOMEM
|| err
== -ENOBUFS
;
913 if (vq
->heads
[ubuf
->desc
].len
== VHOST_DMA_IN_PROGRESS
)
914 vhost_net_ubuf_put(ubufs
);
916 nvq
->upend_idx
= ((unsigned)nvq
->upend_idx
- 1)
919 vq
->heads
[ubuf
->desc
].len
= VHOST_DMA_DONE_LEN
;
922 vhost_discard_vq_desc(vq
, 1);
923 vhost_net_enable_vq(net
, vq
);
926 pr_debug("Fail to send packet: err %d", err
);
927 } else if (unlikely(err
!= len
))
928 pr_debug("Truncated TX packet: "
929 " len %d != %zd\n", err
, len
);
931 vhost_add_used_and_signal(&net
->dev
, vq
, head
, 0);
933 vhost_zerocopy_signal_used(net
, vq
);
934 vhost_net_tx_packet(net
);
935 } while (likely(!vhost_exceeds_weight(vq
, ++sent_pkts
, total_len
)));
938 /* Expects to be always run from workqueue - which acts as
939 * read-size critical section for our kind of RCU. */
940 static void handle_tx(struct vhost_net
*net
)
942 struct vhost_net_virtqueue
*nvq
= &net
->vqs
[VHOST_NET_VQ_TX
];
943 struct vhost_virtqueue
*vq
= &nvq
->vq
;
946 mutex_lock_nested(&vq
->mutex
, VHOST_NET_VQ_TX
);
947 sock
= vhost_vq_get_backend(vq
);
951 if (!vq_meta_prefetch(vq
))
954 vhost_disable_notify(&net
->dev
, vq
);
955 vhost_net_disable_vq(net
, vq
);
957 if (vhost_sock_zcopy(sock
))
958 handle_tx_zerocopy(net
, sock
);
960 handle_tx_copy(net
, sock
);
963 mutex_unlock(&vq
->mutex
);
966 static int peek_head_len(struct vhost_net_virtqueue
*rvq
, struct sock
*sk
)
968 struct sk_buff
*head
;
973 return vhost_net_buf_peek(rvq
);
975 spin_lock_irqsave(&sk
->sk_receive_queue
.lock
, flags
);
976 head
= skb_peek(&sk
->sk_receive_queue
);
979 if (skb_vlan_tag_present(head
))
983 spin_unlock_irqrestore(&sk
->sk_receive_queue
.lock
, flags
);
987 static int vhost_net_rx_peek_head_len(struct vhost_net
*net
, struct sock
*sk
,
990 struct vhost_net_virtqueue
*rnvq
= &net
->vqs
[VHOST_NET_VQ_RX
];
991 struct vhost_net_virtqueue
*tnvq
= &net
->vqs
[VHOST_NET_VQ_TX
];
992 struct vhost_virtqueue
*rvq
= &rnvq
->vq
;
993 struct vhost_virtqueue
*tvq
= &tnvq
->vq
;
994 int len
= peek_head_len(rnvq
, sk
);
996 if (!len
&& rvq
->busyloop_timeout
) {
997 /* Flush batched heads first */
998 vhost_net_signal_used(rnvq
);
999 /* Both tx vq and rx socket were polled here */
1000 vhost_net_busy_poll(net
, rvq
, tvq
, busyloop_intr
, true);
1002 len
= peek_head_len(rnvq
, sk
);
1008 /* This is a multi-buffer version of vhost_get_desc, that works if
1009 * vq has read descriptors only.
1010 * @vq - the relevant virtqueue
1011 * @datalen - data length we'll be reading
1012 * @iovcount - returned count of io vectors we fill
1014 * @log_num - log offset
1015 * @quota - headcount quota, 1 for big buffer
1016 * returns number of buffer heads allocated, negative on error
1018 static int get_rx_bufs(struct vhost_virtqueue
*vq
,
1019 struct vring_used_elem
*heads
,
1022 struct vhost_log
*log
,
1026 unsigned int out
, in
;
1031 /* len is always initialized before use since we are always called with
1036 while (datalen
> 0 && headcount
< quota
) {
1037 if (unlikely(seg
>= UIO_MAXIOV
)) {
1041 r
= vhost_get_vq_desc(vq
, vq
->iov
+ seg
,
1042 ARRAY_SIZE(vq
->iov
) - seg
, &out
,
1044 if (unlikely(r
< 0))
1052 if (unlikely(out
|| in
<= 0)) {
1053 vq_err(vq
, "unexpected descriptor format for RX: "
1054 "out %d, in %d\n", out
, in
);
1058 if (unlikely(log
)) {
1062 heads
[headcount
].id
= cpu_to_vhost32(vq
, d
);
1063 len
= iov_length(vq
->iov
+ seg
, in
);
1064 heads
[headcount
].len
= cpu_to_vhost32(vq
, len
);
1069 heads
[headcount
- 1].len
= cpu_to_vhost32(vq
, len
+ datalen
);
1074 /* Detect overrun */
1075 if (unlikely(datalen
> 0)) {
1081 vhost_discard_vq_desc(vq
, headcount
);
1085 /* Expects to be always run from workqueue - which acts as
1086 * read-size critical section for our kind of RCU. */
1087 static void handle_rx(struct vhost_net
*net
)
1089 struct vhost_net_virtqueue
*nvq
= &net
->vqs
[VHOST_NET_VQ_RX
];
1090 struct vhost_virtqueue
*vq
= &nvq
->vq
;
1092 struct vhost_log
*vq_log
;
1093 struct msghdr msg
= {
1096 .msg_control
= NULL
, /* FIXME: get and handle RX aux data. */
1097 .msg_controllen
= 0,
1098 .msg_flags
= MSG_DONTWAIT
,
1100 struct virtio_net_hdr hdr
= {
1102 .gso_type
= VIRTIO_NET_HDR_GSO_NONE
1104 size_t total_len
= 0;
1107 size_t vhost_hlen
, sock_hlen
;
1108 size_t vhost_len
, sock_len
;
1109 bool busyloop_intr
= false;
1110 struct socket
*sock
;
1111 struct iov_iter fixup
;
1112 __virtio16 num_buffers
;
1115 mutex_lock_nested(&vq
->mutex
, VHOST_NET_VQ_RX
);
1116 sock
= vhost_vq_get_backend(vq
);
1120 if (!vq_meta_prefetch(vq
))
1123 vhost_disable_notify(&net
->dev
, vq
);
1124 vhost_net_disable_vq(net
, vq
);
1126 vhost_hlen
= nvq
->vhost_hlen
;
1127 sock_hlen
= nvq
->sock_hlen
;
1129 vq_log
= unlikely(vhost_has_feature(vq
, VHOST_F_LOG_ALL
)) ?
1131 mergeable
= vhost_has_feature(vq
, VIRTIO_NET_F_MRG_RXBUF
);
1134 sock_len
= vhost_net_rx_peek_head_len(net
, sock
->sk
,
1138 sock_len
+= sock_hlen
;
1139 vhost_len
= sock_len
+ vhost_hlen
;
1140 headcount
= get_rx_bufs(vq
, vq
->heads
+ nvq
->done_idx
,
1141 vhost_len
, &in
, vq_log
, &log
,
1142 likely(mergeable
) ? UIO_MAXIOV
: 1);
1143 /* On error, stop handling until the next kick. */
1144 if (unlikely(headcount
< 0))
1146 /* OK, now we need to know about added descriptors. */
1148 if (unlikely(busyloop_intr
)) {
1149 vhost_poll_queue(&vq
->poll
);
1150 } else if (unlikely(vhost_enable_notify(&net
->dev
, vq
))) {
1151 /* They have slipped one in as we were
1152 * doing that: check again. */
1153 vhost_disable_notify(&net
->dev
, vq
);
1156 /* Nothing new? Wait for eventfd to tell us
1160 busyloop_intr
= false;
1162 msg
.msg_control
= vhost_net_buf_consume(&nvq
->rxq
);
1163 /* On overrun, truncate and discard */
1164 if (unlikely(headcount
> UIO_MAXIOV
)) {
1165 iov_iter_init(&msg
.msg_iter
, ITER_DEST
, vq
->iov
, 1, 1);
1166 err
= sock
->ops
->recvmsg(sock
, &msg
,
1167 1, MSG_DONTWAIT
| MSG_TRUNC
);
1168 pr_debug("Discarded rx packet: len %zd\n", sock_len
);
1171 /* We don't need to be notified again. */
1172 iov_iter_init(&msg
.msg_iter
, ITER_DEST
, vq
->iov
, in
, vhost_len
);
1173 fixup
= msg
.msg_iter
;
1174 if (unlikely((vhost_hlen
))) {
1175 /* We will supply the header ourselves
1176 * TODO: support TSO.
1178 iov_iter_advance(&msg
.msg_iter
, vhost_hlen
);
1180 err
= sock
->ops
->recvmsg(sock
, &msg
,
1181 sock_len
, MSG_DONTWAIT
| MSG_TRUNC
);
1182 /* Userspace might have consumed the packet meanwhile:
1183 * it's not supposed to do this usually, but might be hard
1184 * to prevent. Discard data we got (if any) and keep going. */
1185 if (unlikely(err
!= sock_len
)) {
1186 pr_debug("Discarded rx packet: "
1187 " len %d, expected %zd\n", err
, sock_len
);
1188 vhost_discard_vq_desc(vq
, headcount
);
1191 /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
1192 if (unlikely(vhost_hlen
)) {
1193 if (copy_to_iter(&hdr
, sizeof(hdr
),
1194 &fixup
) != sizeof(hdr
)) {
1195 vq_err(vq
, "Unable to write vnet_hdr "
1196 "at addr %p\n", vq
->iov
->iov_base
);
1200 /* Header came from socket; we'll need to patch
1201 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
1203 iov_iter_advance(&fixup
, sizeof(hdr
));
1205 /* TODO: Should check and handle checksum. */
1207 num_buffers
= cpu_to_vhost16(vq
, headcount
);
1208 if (likely(mergeable
) &&
1209 copy_to_iter(&num_buffers
, sizeof num_buffers
,
1210 &fixup
) != sizeof num_buffers
) {
1211 vq_err(vq
, "Failed num_buffers write");
1212 vhost_discard_vq_desc(vq
, headcount
);
1215 nvq
->done_idx
+= headcount
;
1216 if (nvq
->done_idx
> VHOST_NET_BATCH
)
1217 vhost_net_signal_used(nvq
);
1218 if (unlikely(vq_log
))
1219 vhost_log_write(vq
, vq_log
, log
, vhost_len
,
1221 total_len
+= vhost_len
;
1222 } while (likely(!vhost_exceeds_weight(vq
, ++recv_pkts
, total_len
)));
1224 if (unlikely(busyloop_intr
))
1225 vhost_poll_queue(&vq
->poll
);
1227 vhost_net_enable_vq(net
, vq
);
1229 vhost_net_signal_used(nvq
);
1230 mutex_unlock(&vq
->mutex
);
1233 static void handle_tx_kick(struct vhost_work
*work
)
1235 struct vhost_virtqueue
*vq
= container_of(work
, struct vhost_virtqueue
,
1237 struct vhost_net
*net
= container_of(vq
->dev
, struct vhost_net
, dev
);
1242 static void handle_rx_kick(struct vhost_work
*work
)
1244 struct vhost_virtqueue
*vq
= container_of(work
, struct vhost_virtqueue
,
1246 struct vhost_net
*net
= container_of(vq
->dev
, struct vhost_net
, dev
);
1251 static void handle_tx_net(struct vhost_work
*work
)
1253 struct vhost_net
*net
= container_of(work
, struct vhost_net
,
1254 poll
[VHOST_NET_VQ_TX
].work
);
1258 static void handle_rx_net(struct vhost_work
*work
)
1260 struct vhost_net
*net
= container_of(work
, struct vhost_net
,
1261 poll
[VHOST_NET_VQ_RX
].work
);
1265 static int vhost_net_open(struct inode
*inode
, struct file
*f
)
1267 struct vhost_net
*n
;
1268 struct vhost_dev
*dev
;
1269 struct vhost_virtqueue
**vqs
;
1271 struct xdp_buff
*xdp
;
1274 n
= kvmalloc(sizeof *n
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
1277 vqs
= kmalloc_array(VHOST_NET_VQ_MAX
, sizeof(*vqs
), GFP_KERNEL
);
1283 queue
= kmalloc_array(VHOST_NET_BATCH
, sizeof(void *),
1290 n
->vqs
[VHOST_NET_VQ_RX
].rxq
.queue
= queue
;
1292 xdp
= kmalloc_array(VHOST_NET_BATCH
, sizeof(*xdp
), GFP_KERNEL
);
1299 n
->vqs
[VHOST_NET_VQ_TX
].xdp
= xdp
;
1302 vqs
[VHOST_NET_VQ_TX
] = &n
->vqs
[VHOST_NET_VQ_TX
].vq
;
1303 vqs
[VHOST_NET_VQ_RX
] = &n
->vqs
[VHOST_NET_VQ_RX
].vq
;
1304 n
->vqs
[VHOST_NET_VQ_TX
].vq
.handle_kick
= handle_tx_kick
;
1305 n
->vqs
[VHOST_NET_VQ_RX
].vq
.handle_kick
= handle_rx_kick
;
1306 for (i
= 0; i
< VHOST_NET_VQ_MAX
; i
++) {
1307 n
->vqs
[i
].ubufs
= NULL
;
1308 n
->vqs
[i
].ubuf_info
= NULL
;
1309 n
->vqs
[i
].upend_idx
= 0;
1310 n
->vqs
[i
].done_idx
= 0;
1311 n
->vqs
[i
].batched_xdp
= 0;
1312 n
->vqs
[i
].vhost_hlen
= 0;
1313 n
->vqs
[i
].sock_hlen
= 0;
1314 n
->vqs
[i
].rx_ring
= NULL
;
1315 vhost_net_buf_init(&n
->vqs
[i
].rxq
);
1317 vhost_dev_init(dev
, vqs
, VHOST_NET_VQ_MAX
,
1318 UIO_MAXIOV
+ VHOST_NET_BATCH
,
1319 VHOST_NET_PKT_WEIGHT
, VHOST_NET_WEIGHT
, true,
1322 vhost_poll_init(n
->poll
+ VHOST_NET_VQ_TX
, handle_tx_net
, EPOLLOUT
, dev
,
1323 vqs
[VHOST_NET_VQ_TX
]);
1324 vhost_poll_init(n
->poll
+ VHOST_NET_VQ_RX
, handle_rx_net
, EPOLLIN
, dev
,
1325 vqs
[VHOST_NET_VQ_RX
]);
1327 f
->private_data
= n
;
1328 page_frag_cache_init(&n
->pf_cache
);
1333 static struct socket
*vhost_net_stop_vq(struct vhost_net
*n
,
1334 struct vhost_virtqueue
*vq
)
1336 struct socket
*sock
;
1337 struct vhost_net_virtqueue
*nvq
=
1338 container_of(vq
, struct vhost_net_virtqueue
, vq
);
1340 mutex_lock(&vq
->mutex
);
1341 sock
= vhost_vq_get_backend(vq
);
1342 vhost_net_disable_vq(n
, vq
);
1343 vhost_vq_set_backend(vq
, NULL
);
1344 vhost_net_buf_unproduce(nvq
);
1345 nvq
->rx_ring
= NULL
;
1346 mutex_unlock(&vq
->mutex
);
1350 static void vhost_net_stop(struct vhost_net
*n
, struct socket
**tx_sock
,
1351 struct socket
**rx_sock
)
1353 *tx_sock
= vhost_net_stop_vq(n
, &n
->vqs
[VHOST_NET_VQ_TX
].vq
);
1354 *rx_sock
= vhost_net_stop_vq(n
, &n
->vqs
[VHOST_NET_VQ_RX
].vq
);
1357 static void vhost_net_flush(struct vhost_net
*n
)
1359 vhost_dev_flush(&n
->dev
);
1360 if (n
->vqs
[VHOST_NET_VQ_TX
].ubufs
) {
1361 mutex_lock(&n
->vqs
[VHOST_NET_VQ_TX
].vq
.mutex
);
1363 mutex_unlock(&n
->vqs
[VHOST_NET_VQ_TX
].vq
.mutex
);
1364 /* Wait for all lower device DMAs done. */
1365 vhost_net_ubuf_put_and_wait(n
->vqs
[VHOST_NET_VQ_TX
].ubufs
);
1366 mutex_lock(&n
->vqs
[VHOST_NET_VQ_TX
].vq
.mutex
);
1367 n
->tx_flush
= false;
1368 atomic_set(&n
->vqs
[VHOST_NET_VQ_TX
].ubufs
->refcount
, 1);
1369 mutex_unlock(&n
->vqs
[VHOST_NET_VQ_TX
].vq
.mutex
);
1373 static int vhost_net_release(struct inode
*inode
, struct file
*f
)
1375 struct vhost_net
*n
= f
->private_data
;
1376 struct socket
*tx_sock
;
1377 struct socket
*rx_sock
;
1379 vhost_net_stop(n
, &tx_sock
, &rx_sock
);
1381 vhost_dev_stop(&n
->dev
);
1382 vhost_dev_cleanup(&n
->dev
);
1383 vhost_net_vq_reset(n
);
1385 sockfd_put(tx_sock
);
1387 sockfd_put(rx_sock
);
1388 /* Make sure no callbacks are outstanding */
1390 /* We do an extra flush before freeing memory,
1391 * since jobs can re-queue themselves. */
1393 kfree(n
->vqs
[VHOST_NET_VQ_RX
].rxq
.queue
);
1394 kfree(n
->vqs
[VHOST_NET_VQ_TX
].xdp
);
1396 page_frag_cache_drain(&n
->pf_cache
);
1401 static struct socket
*get_raw_socket(int fd
)
1404 struct socket
*sock
= sockfd_lookup(fd
, &r
);
1407 return ERR_PTR(-ENOTSOCK
);
1409 /* Parameter checking */
1410 if (sock
->sk
->sk_type
!= SOCK_RAW
) {
1411 r
= -ESOCKTNOSUPPORT
;
1415 if (sock
->sk
->sk_family
!= AF_PACKET
) {
1425 static struct ptr_ring
*get_tap_ptr_ring(struct file
*file
)
1427 struct ptr_ring
*ring
;
1428 ring
= tun_get_tx_ring(file
);
1431 ring
= tap_get_ptr_ring(file
);
1439 static struct socket
*get_tap_socket(int fd
)
1441 struct file
*file
= fget(fd
);
1442 struct socket
*sock
;
1445 return ERR_PTR(-EBADF
);
1446 sock
= tun_get_socket(file
);
1449 sock
= tap_get_socket(file
);
1455 static struct socket
*get_socket(int fd
)
1457 struct socket
*sock
;
1459 /* special case to disable backend */
1462 sock
= get_raw_socket(fd
);
1465 sock
= get_tap_socket(fd
);
1468 return ERR_PTR(-ENOTSOCK
);
1471 static long vhost_net_set_backend(struct vhost_net
*n
, unsigned index
, int fd
)
1473 struct socket
*sock
, *oldsock
;
1474 struct vhost_virtqueue
*vq
;
1475 struct vhost_net_virtqueue
*nvq
;
1476 struct vhost_net_ubuf_ref
*ubufs
, *oldubufs
= NULL
;
1479 mutex_lock(&n
->dev
.mutex
);
1480 r
= vhost_dev_check_owner(&n
->dev
);
1484 if (index
>= VHOST_NET_VQ_MAX
) {
1488 vq
= &n
->vqs
[index
].vq
;
1489 nvq
= &n
->vqs
[index
];
1490 mutex_lock(&vq
->mutex
);
1493 vhost_clear_msg(&n
->dev
);
1495 /* Verify that ring has been setup correctly. */
1496 if (!vhost_vq_access_ok(vq
)) {
1500 sock
= get_socket(fd
);
1506 /* start polling new socket */
1507 oldsock
= vhost_vq_get_backend(vq
);
1508 if (sock
!= oldsock
) {
1509 ubufs
= vhost_net_ubuf_alloc(vq
,
1510 sock
&& vhost_sock_zcopy(sock
));
1511 if (IS_ERR(ubufs
)) {
1516 vhost_net_disable_vq(n
, vq
);
1517 vhost_vq_set_backend(vq
, sock
);
1518 vhost_net_buf_unproduce(nvq
);
1519 r
= vhost_vq_init_access(vq
);
1522 r
= vhost_net_enable_vq(n
, vq
);
1525 if (index
== VHOST_NET_VQ_RX
) {
1527 nvq
->rx_ring
= get_tap_ptr_ring(sock
->file
);
1529 nvq
->rx_ring
= NULL
;
1532 oldubufs
= nvq
->ubufs
;
1536 n
->tx_zcopy_err
= 0;
1537 n
->tx_flush
= false;
1540 mutex_unlock(&vq
->mutex
);
1543 vhost_net_ubuf_put_wait_and_free(oldubufs
);
1544 mutex_lock(&vq
->mutex
);
1545 vhost_zerocopy_signal_used(n
, vq
);
1546 mutex_unlock(&vq
->mutex
);
1550 vhost_dev_flush(&n
->dev
);
1551 sockfd_put(oldsock
);
1554 mutex_unlock(&n
->dev
.mutex
);
1558 vhost_vq_set_backend(vq
, oldsock
);
1559 vhost_net_enable_vq(n
, vq
);
1561 vhost_net_ubuf_put_wait_and_free(ubufs
);
1566 mutex_unlock(&vq
->mutex
);
1568 mutex_unlock(&n
->dev
.mutex
);
1572 static long vhost_net_reset_owner(struct vhost_net
*n
)
1574 struct socket
*tx_sock
= NULL
;
1575 struct socket
*rx_sock
= NULL
;
1577 struct vhost_iotlb
*umem
;
1579 mutex_lock(&n
->dev
.mutex
);
1580 err
= vhost_dev_check_owner(&n
->dev
);
1583 umem
= vhost_dev_reset_owner_prepare();
1588 vhost_net_stop(n
, &tx_sock
, &rx_sock
);
1590 vhost_dev_stop(&n
->dev
);
1591 vhost_dev_reset_owner(&n
->dev
, umem
);
1592 vhost_net_vq_reset(n
);
1594 mutex_unlock(&n
->dev
.mutex
);
1596 sockfd_put(tx_sock
);
1598 sockfd_put(rx_sock
);
1602 static int vhost_net_set_features(struct vhost_net
*n
, u64 features
)
1604 size_t vhost_hlen
, sock_hlen
, hdr_len
;
1607 hdr_len
= (features
& ((1ULL << VIRTIO_NET_F_MRG_RXBUF
) |
1608 (1ULL << VIRTIO_F_VERSION_1
))) ?
1609 sizeof(struct virtio_net_hdr_mrg_rxbuf
) :
1610 sizeof(struct virtio_net_hdr
);
1611 if (features
& (1 << VHOST_NET_F_VIRTIO_NET_HDR
)) {
1612 /* vhost provides vnet_hdr */
1613 vhost_hlen
= hdr_len
;
1616 /* socket provides vnet_hdr */
1618 sock_hlen
= hdr_len
;
1620 mutex_lock(&n
->dev
.mutex
);
1621 if ((features
& (1 << VHOST_F_LOG_ALL
)) &&
1622 !vhost_log_access_ok(&n
->dev
))
1625 if ((features
& (1ULL << VIRTIO_F_ACCESS_PLATFORM
))) {
1626 if (vhost_init_device_iotlb(&n
->dev
))
1630 for (i
= 0; i
< VHOST_NET_VQ_MAX
; ++i
) {
1631 mutex_lock(&n
->vqs
[i
].vq
.mutex
);
1632 n
->vqs
[i
].vq
.acked_features
= features
;
1633 n
->vqs
[i
].vhost_hlen
= vhost_hlen
;
1634 n
->vqs
[i
].sock_hlen
= sock_hlen
;
1635 mutex_unlock(&n
->vqs
[i
].vq
.mutex
);
1637 mutex_unlock(&n
->dev
.mutex
);
1641 mutex_unlock(&n
->dev
.mutex
);
1645 static long vhost_net_set_owner(struct vhost_net
*n
)
1649 mutex_lock(&n
->dev
.mutex
);
1650 if (vhost_dev_has_owner(&n
->dev
)) {
1654 r
= vhost_net_set_ubuf_info(n
);
1657 r
= vhost_dev_set_owner(&n
->dev
);
1659 vhost_net_clear_ubuf_info(n
);
1662 mutex_unlock(&n
->dev
.mutex
);
1666 static long vhost_net_ioctl(struct file
*f
, unsigned int ioctl
,
1669 struct vhost_net
*n
= f
->private_data
;
1670 void __user
*argp
= (void __user
*)arg
;
1671 u64 __user
*featurep
= argp
;
1672 struct vhost_vring_file backend
;
1677 case VHOST_NET_SET_BACKEND
:
1678 if (copy_from_user(&backend
, argp
, sizeof backend
))
1680 return vhost_net_set_backend(n
, backend
.index
, backend
.fd
);
1681 case VHOST_GET_FEATURES
:
1682 features
= VHOST_NET_FEATURES
;
1683 if (copy_to_user(featurep
, &features
, sizeof features
))
1686 case VHOST_SET_FEATURES
:
1687 if (copy_from_user(&features
, featurep
, sizeof features
))
1689 if (features
& ~VHOST_NET_FEATURES
)
1691 return vhost_net_set_features(n
, features
);
1692 case VHOST_GET_BACKEND_FEATURES
:
1693 features
= VHOST_NET_BACKEND_FEATURES
;
1694 if (copy_to_user(featurep
, &features
, sizeof(features
)))
1697 case VHOST_SET_BACKEND_FEATURES
:
1698 if (copy_from_user(&features
, featurep
, sizeof(features
)))
1700 if (features
& ~VHOST_NET_BACKEND_FEATURES
)
1702 vhost_set_backend_features(&n
->dev
, features
);
1704 case VHOST_RESET_OWNER
:
1705 return vhost_net_reset_owner(n
);
1706 case VHOST_SET_OWNER
:
1707 return vhost_net_set_owner(n
);
1709 mutex_lock(&n
->dev
.mutex
);
1710 r
= vhost_dev_ioctl(&n
->dev
, ioctl
, argp
);
1711 if (r
== -ENOIOCTLCMD
)
1712 r
= vhost_vring_ioctl(&n
->dev
, ioctl
, argp
);
1715 mutex_unlock(&n
->dev
.mutex
);
1720 static ssize_t
vhost_net_chr_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
1722 struct file
*file
= iocb
->ki_filp
;
1723 struct vhost_net
*n
= file
->private_data
;
1724 struct vhost_dev
*dev
= &n
->dev
;
1725 int noblock
= file
->f_flags
& O_NONBLOCK
;
1727 return vhost_chr_read_iter(dev
, to
, noblock
);
1730 static ssize_t
vhost_net_chr_write_iter(struct kiocb
*iocb
,
1731 struct iov_iter
*from
)
1733 struct file
*file
= iocb
->ki_filp
;
1734 struct vhost_net
*n
= file
->private_data
;
1735 struct vhost_dev
*dev
= &n
->dev
;
1737 return vhost_chr_write_iter(dev
, from
);
1740 static __poll_t
vhost_net_chr_poll(struct file
*file
, poll_table
*wait
)
1742 struct vhost_net
*n
= file
->private_data
;
1743 struct vhost_dev
*dev
= &n
->dev
;
1745 return vhost_chr_poll(file
, dev
, wait
);
1748 static const struct file_operations vhost_net_fops
= {
1749 .owner
= THIS_MODULE
,
1750 .release
= vhost_net_release
,
1751 .read_iter
= vhost_net_chr_read_iter
,
1752 .write_iter
= vhost_net_chr_write_iter
,
1753 .poll
= vhost_net_chr_poll
,
1754 .unlocked_ioctl
= vhost_net_ioctl
,
1755 .compat_ioctl
= compat_ptr_ioctl
,
1756 .open
= vhost_net_open
,
1757 .llseek
= noop_llseek
,
1760 static struct miscdevice vhost_net_misc
= {
1761 .minor
= VHOST_NET_MINOR
,
1762 .name
= "vhost-net",
1763 .fops
= &vhost_net_fops
,
1766 static int __init
vhost_net_init(void)
1768 if (experimental_zcopytx
)
1769 vhost_net_enable_zcopy(VHOST_NET_VQ_TX
);
1770 return misc_register(&vhost_net_misc
);
1772 module_init(vhost_net_init
);
1774 static void __exit
vhost_net_exit(void)
1776 misc_deregister(&vhost_net_misc
);
1778 module_exit(vhost_net_exit
);
1780 MODULE_VERSION("0.0.1");
1781 MODULE_LICENSE("GPL v2");
1782 MODULE_AUTHOR("Michael S. Tsirkin");
1783 MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
1784 MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR
);
1785 MODULE_ALIAS("devname:vhost-net");