1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2009 Red Hat, Inc.
3 * Author: Michael S. Tsirkin <mst@redhat.com>
5 * virtio-net server in host kernel.
8 #include <linux/compat.h>
9 #include <linux/eventfd.h>
10 #include <linux/vhost.h>
11 #include <linux/virtio_net.h>
12 #include <linux/miscdevice.h>
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/mutex.h>
16 #include <linux/workqueue.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
19 #include <linux/sched/clock.h>
20 #include <linux/sched/signal.h>
21 #include <linux/vmalloc.h>
23 #include <linux/net.h>
24 #include <linux/if_packet.h>
25 #include <linux/if_arp.h>
26 #include <linux/if_tun.h>
27 #include <linux/if_macvlan.h>
28 #include <linux/if_tap.h>
29 #include <linux/if_vlan.h>
30 #include <linux/skb_array.h>
31 #include <linux/skbuff.h>
38 static int experimental_zcopytx
= 0;
39 module_param(experimental_zcopytx
, int, 0444);
40 MODULE_PARM_DESC(experimental_zcopytx
, "Enable Zero Copy TX;"
41 " 1 -Enable; 0 - Disable");
43 /* Max number of bytes transferred before requeueing the job.
44 * Using this limit prevents one virtqueue from starving others. */
45 #define VHOST_NET_WEIGHT 0x80000
47 /* Max number of packets transferred before requeueing the job.
48 * Using this limit prevents one virtqueue from starving others with small
51 #define VHOST_NET_PKT_WEIGHT 256
53 /* MAX number of TX used buffers for outstanding zerocopy */
54 #define VHOST_MAX_PEND 128
55 #define VHOST_GOODCOPY_LEN 256
58 * For transmit, used buffer len is unused; we override it to track buffer
59 * status internally; used for zerocopy tx only.
61 /* Lower device DMA failed */
62 #define VHOST_DMA_FAILED_LEN ((__force __virtio32)3)
63 /* Lower device DMA done */
64 #define VHOST_DMA_DONE_LEN ((__force __virtio32)2)
65 /* Lower device DMA in progress */
66 #define VHOST_DMA_IN_PROGRESS ((__force __virtio32)1)
68 #define VHOST_DMA_CLEAR_LEN ((__force __virtio32)0)
70 #define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN)
73 VHOST_NET_FEATURES
= VHOST_FEATURES
|
74 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR
) |
75 (1ULL << VIRTIO_NET_F_MRG_RXBUF
) |
76 (1ULL << VIRTIO_F_IOMMU_PLATFORM
)
80 VHOST_NET_BACKEND_FEATURES
= (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2
)
89 struct vhost_net_ubuf_ref
{
90 /* refcount follows semantics similar to kref:
91 * 0: object is released
92 * 1: no outstanding ubufs
93 * >1: outstanding ubufs
96 wait_queue_head_t wait
;
97 struct vhost_virtqueue
*vq
;
100 #define VHOST_NET_BATCH 64
101 struct vhost_net_buf
{
107 struct vhost_net_virtqueue
{
108 struct vhost_virtqueue vq
;
111 /* vhost zerocopy support fields below: */
112 /* last used idx for outstanding DMA zerocopy buffers */
114 /* For TX, first used idx for DMA done zerocopy buffers
115 * For RX, number of batched heads
118 /* Number of XDP frames batched */
120 /* an array of userspace buffers info */
121 struct ubuf_info
*ubuf_info
;
122 /* Reference counting for outstanding ubufs.
123 * Protected by vq mutex. Writers must also take device mutex. */
124 struct vhost_net_ubuf_ref
*ubufs
;
125 struct ptr_ring
*rx_ring
;
126 struct vhost_net_buf rxq
;
127 /* Batched XDP buffs */
128 struct xdp_buff
*xdp
;
132 struct vhost_dev dev
;
133 struct vhost_net_virtqueue vqs
[VHOST_NET_VQ_MAX
];
134 struct vhost_poll poll
[VHOST_NET_VQ_MAX
];
135 /* Number of TX recently submitted.
136 * Protected by tx vq lock. */
138 /* Number of times zerocopy TX recently failed.
139 * Protected by tx vq lock. */
140 unsigned tx_zcopy_err
;
141 /* Flush in progress. Protected by tx vq lock. */
143 /* Private page frag */
144 struct page_frag page_frag
;
145 /* Refcount bias of page frag */
149 static unsigned vhost_net_zcopy_mask __read_mostly
;
151 static void *vhost_net_buf_get_ptr(struct vhost_net_buf
*rxq
)
153 if (rxq
->tail
!= rxq
->head
)
154 return rxq
->queue
[rxq
->head
];
159 static int vhost_net_buf_get_size(struct vhost_net_buf
*rxq
)
161 return rxq
->tail
- rxq
->head
;
164 static int vhost_net_buf_is_empty(struct vhost_net_buf
*rxq
)
166 return rxq
->tail
== rxq
->head
;
169 static void *vhost_net_buf_consume(struct vhost_net_buf
*rxq
)
171 void *ret
= vhost_net_buf_get_ptr(rxq
);
176 static int vhost_net_buf_produce(struct vhost_net_virtqueue
*nvq
)
178 struct vhost_net_buf
*rxq
= &nvq
->rxq
;
181 rxq
->tail
= ptr_ring_consume_batched(nvq
->rx_ring
, rxq
->queue
,
186 static void vhost_net_buf_unproduce(struct vhost_net_virtqueue
*nvq
)
188 struct vhost_net_buf
*rxq
= &nvq
->rxq
;
190 if (nvq
->rx_ring
&& !vhost_net_buf_is_empty(rxq
)) {
191 ptr_ring_unconsume(nvq
->rx_ring
, rxq
->queue
+ rxq
->head
,
192 vhost_net_buf_get_size(rxq
),
194 rxq
->head
= rxq
->tail
= 0;
198 static int vhost_net_buf_peek_len(void *ptr
)
200 if (tun_is_xdp_frame(ptr
)) {
201 struct xdp_frame
*xdpf
= tun_ptr_to_xdp(ptr
);
206 return __skb_array_len_with_tag(ptr
);
209 static int vhost_net_buf_peek(struct vhost_net_virtqueue
*nvq
)
211 struct vhost_net_buf
*rxq
= &nvq
->rxq
;
213 if (!vhost_net_buf_is_empty(rxq
))
216 if (!vhost_net_buf_produce(nvq
))
220 return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq
));
223 static void vhost_net_buf_init(struct vhost_net_buf
*rxq
)
225 rxq
->head
= rxq
->tail
= 0;
228 static void vhost_net_enable_zcopy(int vq
)
230 vhost_net_zcopy_mask
|= 0x1 << vq
;
233 static struct vhost_net_ubuf_ref
*
234 vhost_net_ubuf_alloc(struct vhost_virtqueue
*vq
, bool zcopy
)
236 struct vhost_net_ubuf_ref
*ubufs
;
237 /* No zero copy backend? Nothing to count. */
240 ubufs
= kmalloc(sizeof(*ubufs
), GFP_KERNEL
);
242 return ERR_PTR(-ENOMEM
);
243 atomic_set(&ubufs
->refcount
, 1);
244 init_waitqueue_head(&ubufs
->wait
);
249 static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref
*ubufs
)
251 int r
= atomic_sub_return(1, &ubufs
->refcount
);
253 wake_up(&ubufs
->wait
);
257 static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref
*ubufs
)
259 vhost_net_ubuf_put(ubufs
);
260 wait_event(ubufs
->wait
, !atomic_read(&ubufs
->refcount
));
263 static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref
*ubufs
)
265 vhost_net_ubuf_put_and_wait(ubufs
);
269 static void vhost_net_clear_ubuf_info(struct vhost_net
*n
)
273 for (i
= 0; i
< VHOST_NET_VQ_MAX
; ++i
) {
274 kfree(n
->vqs
[i
].ubuf_info
);
275 n
->vqs
[i
].ubuf_info
= NULL
;
279 static int vhost_net_set_ubuf_info(struct vhost_net
*n
)
284 for (i
= 0; i
< VHOST_NET_VQ_MAX
; ++i
) {
285 zcopy
= vhost_net_zcopy_mask
& (0x1 << i
);
288 n
->vqs
[i
].ubuf_info
=
289 kmalloc_array(UIO_MAXIOV
,
290 sizeof(*n
->vqs
[i
].ubuf_info
),
292 if (!n
->vqs
[i
].ubuf_info
)
298 vhost_net_clear_ubuf_info(n
);
302 static void vhost_net_vq_reset(struct vhost_net
*n
)
306 vhost_net_clear_ubuf_info(n
);
308 for (i
= 0; i
< VHOST_NET_VQ_MAX
; i
++) {
309 n
->vqs
[i
].done_idx
= 0;
310 n
->vqs
[i
].upend_idx
= 0;
311 n
->vqs
[i
].ubufs
= NULL
;
312 n
->vqs
[i
].vhost_hlen
= 0;
313 n
->vqs
[i
].sock_hlen
= 0;
314 vhost_net_buf_init(&n
->vqs
[i
].rxq
);
319 static void vhost_net_tx_packet(struct vhost_net
*net
)
322 if (net
->tx_packets
< 1024)
325 net
->tx_zcopy_err
= 0;
328 static void vhost_net_tx_err(struct vhost_net
*net
)
333 static bool vhost_net_tx_select_zcopy(struct vhost_net
*net
)
335 /* TX flush waits for outstanding DMAs to be done.
336 * Don't start new DMAs.
338 return !net
->tx_flush
&&
339 net
->tx_packets
/ 64 >= net
->tx_zcopy_err
;
342 static bool vhost_sock_zcopy(struct socket
*sock
)
344 return unlikely(experimental_zcopytx
) &&
345 sock_flag(sock
->sk
, SOCK_ZEROCOPY
);
348 static bool vhost_sock_xdp(struct socket
*sock
)
350 return sock_flag(sock
->sk
, SOCK_XDP
);
353 /* In case of DMA done not in order in lower device driver for some reason.
354 * upend_idx is used to track end of used idx, done_idx is used to track head
355 * of used idx. Once lower device DMA done contiguously, we will signal KVM
358 static void vhost_zerocopy_signal_used(struct vhost_net
*net
,
359 struct vhost_virtqueue
*vq
)
361 struct vhost_net_virtqueue
*nvq
=
362 container_of(vq
, struct vhost_net_virtqueue
, vq
);
366 for (i
= nvq
->done_idx
; i
!= nvq
->upend_idx
; i
= (i
+ 1) % UIO_MAXIOV
) {
367 if (vq
->heads
[i
].len
== VHOST_DMA_FAILED_LEN
)
368 vhost_net_tx_err(net
);
369 if (VHOST_DMA_IS_DONE(vq
->heads
[i
].len
)) {
370 vq
->heads
[i
].len
= VHOST_DMA_CLEAR_LEN
;
376 add
= min(UIO_MAXIOV
- nvq
->done_idx
, j
);
377 vhost_add_used_and_signal_n(vq
->dev
, vq
,
378 &vq
->heads
[nvq
->done_idx
], add
);
379 nvq
->done_idx
= (nvq
->done_idx
+ add
) % UIO_MAXIOV
;
384 static void vhost_zerocopy_callback(struct ubuf_info
*ubuf
, bool success
)
386 struct vhost_net_ubuf_ref
*ubufs
= ubuf
->ctx
;
387 struct vhost_virtqueue
*vq
= ubufs
->vq
;
392 /* set len to mark this desc buffers done DMA */
393 vq
->heads
[ubuf
->desc
].len
= success
?
394 VHOST_DMA_DONE_LEN
: VHOST_DMA_FAILED_LEN
;
395 cnt
= vhost_net_ubuf_put(ubufs
);
398 * Trigger polling thread if guest stopped submitting new buffers:
399 * in this case, the refcount after decrement will eventually reach 1.
400 * We also trigger polling periodically after each 16 packets
401 * (the value 16 here is more or less arbitrary, it's tuned to trigger
402 * less than 10% of times).
404 if (cnt
<= 1 || !(cnt
% 16))
405 vhost_poll_queue(&vq
->poll
);
407 rcu_read_unlock_bh();
410 static inline unsigned long busy_clock(void)
412 return local_clock() >> 10;
415 static bool vhost_can_busy_poll(unsigned long endtime
)
417 return likely(!need_resched() && !time_after(busy_clock(), endtime
) &&
418 !signal_pending(current
));
421 static void vhost_net_disable_vq(struct vhost_net
*n
,
422 struct vhost_virtqueue
*vq
)
424 struct vhost_net_virtqueue
*nvq
=
425 container_of(vq
, struct vhost_net_virtqueue
, vq
);
426 struct vhost_poll
*poll
= n
->poll
+ (nvq
- n
->vqs
);
427 if (!vhost_vq_get_backend(vq
))
429 vhost_poll_stop(poll
);
432 static int vhost_net_enable_vq(struct vhost_net
*n
,
433 struct vhost_virtqueue
*vq
)
435 struct vhost_net_virtqueue
*nvq
=
436 container_of(vq
, struct vhost_net_virtqueue
, vq
);
437 struct vhost_poll
*poll
= n
->poll
+ (nvq
- n
->vqs
);
440 sock
= vhost_vq_get_backend(vq
);
444 return vhost_poll_start(poll
, sock
->file
);
447 static void vhost_net_signal_used(struct vhost_net_virtqueue
*nvq
)
449 struct vhost_virtqueue
*vq
= &nvq
->vq
;
450 struct vhost_dev
*dev
= vq
->dev
;
455 vhost_add_used_and_signal_n(dev
, vq
, vq
->heads
, nvq
->done_idx
);
459 static void vhost_tx_batch(struct vhost_net
*net
,
460 struct vhost_net_virtqueue
*nvq
,
462 struct msghdr
*msghdr
)
464 struct tun_msg_ctl ctl
= {
466 .num
= nvq
->batched_xdp
,
471 if (nvq
->batched_xdp
== 0)
474 msghdr
->msg_control
= &ctl
;
475 err
= sock
->ops
->sendmsg(sock
, msghdr
, 0);
476 if (unlikely(err
< 0)) {
477 vq_err(&nvq
->vq
, "Fail to batch sending packets\n");
482 vhost_net_signal_used(nvq
);
483 nvq
->batched_xdp
= 0;
486 static int sock_has_rx_data(struct socket
*sock
)
491 if (sock
->ops
->peek_len
)
492 return sock
->ops
->peek_len(sock
);
494 return skb_queue_empty(&sock
->sk
->sk_receive_queue
);
497 static void vhost_net_busy_poll_try_queue(struct vhost_net
*net
,
498 struct vhost_virtqueue
*vq
)
500 if (!vhost_vq_avail_empty(&net
->dev
, vq
)) {
501 vhost_poll_queue(&vq
->poll
);
502 } else if (unlikely(vhost_enable_notify(&net
->dev
, vq
))) {
503 vhost_disable_notify(&net
->dev
, vq
);
504 vhost_poll_queue(&vq
->poll
);
508 static void vhost_net_busy_poll(struct vhost_net
*net
,
509 struct vhost_virtqueue
*rvq
,
510 struct vhost_virtqueue
*tvq
,
514 unsigned long busyloop_timeout
;
515 unsigned long endtime
;
517 struct vhost_virtqueue
*vq
= poll_rx
? tvq
: rvq
;
519 /* Try to hold the vq mutex of the paired virtqueue. We can't
520 * use mutex_lock() here since we could not guarantee a
521 * consistenet lock ordering.
523 if (!mutex_trylock(&vq
->mutex
))
526 vhost_disable_notify(&net
->dev
, vq
);
527 sock
= vhost_vq_get_backend(rvq
);
529 busyloop_timeout
= poll_rx
? rvq
->busyloop_timeout
:
530 tvq
->busyloop_timeout
;
533 endtime
= busy_clock() + busyloop_timeout
;
535 while (vhost_can_busy_poll(endtime
)) {
536 if (vhost_has_work(&net
->dev
)) {
537 *busyloop_intr
= true;
541 if ((sock_has_rx_data(sock
) &&
542 !vhost_vq_avail_empty(&net
->dev
, rvq
)) ||
543 !vhost_vq_avail_empty(&net
->dev
, tvq
))
551 if (poll_rx
|| sock_has_rx_data(sock
))
552 vhost_net_busy_poll_try_queue(net
, vq
);
553 else if (!poll_rx
) /* On tx here, sock has no rx data. */
554 vhost_enable_notify(&net
->dev
, rvq
);
556 mutex_unlock(&vq
->mutex
);
559 static int vhost_net_tx_get_vq_desc(struct vhost_net
*net
,
560 struct vhost_net_virtqueue
*tnvq
,
561 unsigned int *out_num
, unsigned int *in_num
,
562 struct msghdr
*msghdr
, bool *busyloop_intr
)
564 struct vhost_net_virtqueue
*rnvq
= &net
->vqs
[VHOST_NET_VQ_RX
];
565 struct vhost_virtqueue
*rvq
= &rnvq
->vq
;
566 struct vhost_virtqueue
*tvq
= &tnvq
->vq
;
568 int r
= vhost_get_vq_desc(tvq
, tvq
->iov
, ARRAY_SIZE(tvq
->iov
),
569 out_num
, in_num
, NULL
, NULL
);
571 if (r
== tvq
->num
&& tvq
->busyloop_timeout
) {
572 /* Flush batched packets first */
573 if (!vhost_sock_zcopy(vhost_vq_get_backend(tvq
)))
574 vhost_tx_batch(net
, tnvq
,
575 vhost_vq_get_backend(tvq
),
578 vhost_net_busy_poll(net
, rvq
, tvq
, busyloop_intr
, false);
580 r
= vhost_get_vq_desc(tvq
, tvq
->iov
, ARRAY_SIZE(tvq
->iov
),
581 out_num
, in_num
, NULL
, NULL
);
587 static bool vhost_exceeds_maxpend(struct vhost_net
*net
)
589 struct vhost_net_virtqueue
*nvq
= &net
->vqs
[VHOST_NET_VQ_TX
];
590 struct vhost_virtqueue
*vq
= &nvq
->vq
;
592 return (nvq
->upend_idx
+ UIO_MAXIOV
- nvq
->done_idx
) % UIO_MAXIOV
>
593 min_t(unsigned int, VHOST_MAX_PEND
, vq
->num
>> 2);
596 static size_t init_iov_iter(struct vhost_virtqueue
*vq
, struct iov_iter
*iter
,
597 size_t hdr_size
, int out
)
599 /* Skip header. TODO: support TSO. */
600 size_t len
= iov_length(vq
->iov
, out
);
602 iov_iter_init(iter
, WRITE
, vq
->iov
, out
, len
);
603 iov_iter_advance(iter
, hdr_size
);
605 return iov_iter_count(iter
);
608 static int get_tx_bufs(struct vhost_net
*net
,
609 struct vhost_net_virtqueue
*nvq
,
611 unsigned int *out
, unsigned int *in
,
612 size_t *len
, bool *busyloop_intr
)
614 struct vhost_virtqueue
*vq
= &nvq
->vq
;
617 ret
= vhost_net_tx_get_vq_desc(net
, nvq
, out
, in
, msg
, busyloop_intr
);
619 if (ret
< 0 || ret
== vq
->num
)
623 vq_err(vq
, "Unexpected descriptor format for TX: out %d, int %d\n",
629 *len
= init_iov_iter(vq
, &msg
->msg_iter
, nvq
->vhost_hlen
, *out
);
631 vq_err(vq
, "Unexpected header len for TX: %zd expected %zd\n",
632 *len
, nvq
->vhost_hlen
);
639 static bool tx_can_batch(struct vhost_virtqueue
*vq
, size_t total_len
)
641 return total_len
< VHOST_NET_WEIGHT
&&
642 !vhost_vq_avail_empty(vq
->dev
, vq
);
645 #define SKB_FRAG_PAGE_ORDER get_order(32768)
647 static bool vhost_net_page_frag_refill(struct vhost_net
*net
, unsigned int sz
,
648 struct page_frag
*pfrag
, gfp_t gfp
)
651 if (pfrag
->offset
+ sz
<= pfrag
->size
)
653 __page_frag_cache_drain(pfrag
->page
, net
->refcnt_bias
);
657 net
->refcnt_bias
= 0;
658 if (SKB_FRAG_PAGE_ORDER
) {
659 /* Avoid direct reclaim but allow kswapd to wake */
660 pfrag
->page
= alloc_pages((gfp
& ~__GFP_DIRECT_RECLAIM
) |
661 __GFP_COMP
| __GFP_NOWARN
|
663 SKB_FRAG_PAGE_ORDER
);
664 if (likely(pfrag
->page
)) {
665 pfrag
->size
= PAGE_SIZE
<< SKB_FRAG_PAGE_ORDER
;
669 pfrag
->page
= alloc_page(gfp
);
670 if (likely(pfrag
->page
)) {
671 pfrag
->size
= PAGE_SIZE
;
677 net
->refcnt_bias
= USHRT_MAX
;
678 page_ref_add(pfrag
->page
, USHRT_MAX
- 1);
682 #define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
684 static int vhost_net_build_xdp(struct vhost_net_virtqueue
*nvq
,
685 struct iov_iter
*from
)
687 struct vhost_virtqueue
*vq
= &nvq
->vq
;
688 struct vhost_net
*net
= container_of(vq
->dev
, struct vhost_net
,
690 struct socket
*sock
= vhost_vq_get_backend(vq
);
691 struct page_frag
*alloc_frag
= &net
->page_frag
;
692 struct virtio_net_hdr
*gso
;
693 struct xdp_buff
*xdp
= &nvq
->xdp
[nvq
->batched_xdp
];
694 struct tun_xdp_hdr
*hdr
;
695 size_t len
= iov_iter_count(from
);
696 int headroom
= vhost_sock_xdp(sock
) ? XDP_PACKET_HEADROOM
: 0;
697 int buflen
= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
698 int pad
= SKB_DATA_ALIGN(VHOST_NET_RX_PAD
+ headroom
+ nvq
->sock_hlen
);
699 int sock_hlen
= nvq
->sock_hlen
;
703 if (unlikely(len
< nvq
->sock_hlen
))
706 if (SKB_DATA_ALIGN(len
+ pad
) +
707 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)) > PAGE_SIZE
)
710 buflen
+= SKB_DATA_ALIGN(len
+ pad
);
711 alloc_frag
->offset
= ALIGN((u64
)alloc_frag
->offset
, SMP_CACHE_BYTES
);
712 if (unlikely(!vhost_net_page_frag_refill(net
, buflen
,
713 alloc_frag
, GFP_KERNEL
)))
716 buf
= (char *)page_address(alloc_frag
->page
) + alloc_frag
->offset
;
717 copied
= copy_page_from_iter(alloc_frag
->page
,
719 offsetof(struct tun_xdp_hdr
, gso
),
721 if (copied
!= sock_hlen
)
727 if ((gso
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) &&
728 vhost16_to_cpu(vq
, gso
->csum_start
) +
729 vhost16_to_cpu(vq
, gso
->csum_offset
) + 2 >
730 vhost16_to_cpu(vq
, gso
->hdr_len
)) {
731 gso
->hdr_len
= cpu_to_vhost16(vq
,
732 vhost16_to_cpu(vq
, gso
->csum_start
) +
733 vhost16_to_cpu(vq
, gso
->csum_offset
) + 2);
735 if (vhost16_to_cpu(vq
, gso
->hdr_len
) > len
)
740 copied
= copy_page_from_iter(alloc_frag
->page
,
741 alloc_frag
->offset
+ pad
,
746 xdp
->data_hard_start
= buf
;
747 xdp
->data
= buf
+ pad
;
748 xdp
->data_end
= xdp
->data
+ len
;
749 hdr
->buflen
= buflen
;
752 alloc_frag
->offset
+= buflen
;
759 static void handle_tx_copy(struct vhost_net
*net
, struct socket
*sock
)
761 struct vhost_net_virtqueue
*nvq
= &net
->vqs
[VHOST_NET_VQ_TX
];
762 struct vhost_virtqueue
*vq
= &nvq
->vq
;
765 struct msghdr msg
= {
770 .msg_flags
= MSG_DONTWAIT
,
772 size_t len
, total_len
= 0;
775 bool sock_can_batch
= (sock
->sk
->sk_sndbuf
== INT_MAX
);
778 bool busyloop_intr
= false;
780 if (nvq
->done_idx
== VHOST_NET_BATCH
)
781 vhost_tx_batch(net
, nvq
, sock
, &msg
);
783 head
= get_tx_bufs(net
, nvq
, &msg
, &out
, &in
, &len
,
785 /* On error, stop handling until the next kick. */
786 if (unlikely(head
< 0))
788 /* Nothing new? Wait for eventfd to tell us they refilled. */
789 if (head
== vq
->num
) {
790 if (unlikely(busyloop_intr
)) {
791 vhost_poll_queue(&vq
->poll
);
792 } else if (unlikely(vhost_enable_notify(&net
->dev
,
794 vhost_disable_notify(&net
->dev
, vq
);
802 /* For simplicity, TX batching is only enabled if
803 * sndbuf is unlimited.
805 if (sock_can_batch
) {
806 err
= vhost_net_build_xdp(nvq
, &msg
.msg_iter
);
809 } else if (unlikely(err
!= -ENOSPC
)) {
810 vhost_tx_batch(net
, nvq
, sock
, &msg
);
811 vhost_discard_vq_desc(vq
, 1);
812 vhost_net_enable_vq(net
, vq
);
816 /* We can't build XDP buff, go for single
817 * packet path but let's flush batched
820 vhost_tx_batch(net
, nvq
, sock
, &msg
);
821 msg
.msg_control
= NULL
;
823 if (tx_can_batch(vq
, total_len
))
824 msg
.msg_flags
|= MSG_MORE
;
826 msg
.msg_flags
&= ~MSG_MORE
;
829 /* TODO: Check specific error and bomb out unless ENOBUFS? */
830 err
= sock
->ops
->sendmsg(sock
, &msg
, len
);
831 if (unlikely(err
< 0)) {
832 vhost_discard_vq_desc(vq
, 1);
833 vhost_net_enable_vq(net
, vq
);
837 pr_debug("Truncated TX packet: len %d != %zd\n",
840 vq
->heads
[nvq
->done_idx
].id
= cpu_to_vhost32(vq
, head
);
841 vq
->heads
[nvq
->done_idx
].len
= 0;
843 } while (likely(!vhost_exceeds_weight(vq
, ++sent_pkts
, total_len
)));
845 vhost_tx_batch(net
, nvq
, sock
, &msg
);
848 static void handle_tx_zerocopy(struct vhost_net
*net
, struct socket
*sock
)
850 struct vhost_net_virtqueue
*nvq
= &net
->vqs
[VHOST_NET_VQ_TX
];
851 struct vhost_virtqueue
*vq
= &nvq
->vq
;
854 struct msghdr msg
= {
859 .msg_flags
= MSG_DONTWAIT
,
861 struct tun_msg_ctl ctl
;
862 size_t len
, total_len
= 0;
864 struct vhost_net_ubuf_ref
*uninitialized_var(ubufs
);
871 /* Release DMAs done buffers first */
872 vhost_zerocopy_signal_used(net
, vq
);
874 busyloop_intr
= false;
875 head
= get_tx_bufs(net
, nvq
, &msg
, &out
, &in
, &len
,
877 /* On error, stop handling until the next kick. */
878 if (unlikely(head
< 0))
880 /* Nothing new? Wait for eventfd to tell us they refilled. */
881 if (head
== vq
->num
) {
882 if (unlikely(busyloop_intr
)) {
883 vhost_poll_queue(&vq
->poll
);
884 } else if (unlikely(vhost_enable_notify(&net
->dev
, vq
))) {
885 vhost_disable_notify(&net
->dev
, vq
);
891 zcopy_used
= len
>= VHOST_GOODCOPY_LEN
892 && !vhost_exceeds_maxpend(net
)
893 && vhost_net_tx_select_zcopy(net
);
895 /* use msg_control to pass vhost zerocopy ubuf info to skb */
897 struct ubuf_info
*ubuf
;
898 ubuf
= nvq
->ubuf_info
+ nvq
->upend_idx
;
900 vq
->heads
[nvq
->upend_idx
].id
= cpu_to_vhost32(vq
, head
);
901 vq
->heads
[nvq
->upend_idx
].len
= VHOST_DMA_IN_PROGRESS
;
902 ubuf
->callback
= vhost_zerocopy_callback
;
903 ubuf
->ctx
= nvq
->ubufs
;
904 ubuf
->desc
= nvq
->upend_idx
;
905 refcount_set(&ubuf
->refcnt
, 1);
906 msg
.msg_control
= &ctl
;
907 ctl
.type
= TUN_MSG_UBUF
;
909 msg
.msg_controllen
= sizeof(ctl
);
911 atomic_inc(&ubufs
->refcount
);
912 nvq
->upend_idx
= (nvq
->upend_idx
+ 1) % UIO_MAXIOV
;
914 msg
.msg_control
= NULL
;
918 if (tx_can_batch(vq
, total_len
) &&
919 likely(!vhost_exceeds_maxpend(net
))) {
920 msg
.msg_flags
|= MSG_MORE
;
922 msg
.msg_flags
&= ~MSG_MORE
;
925 /* TODO: Check specific error and bomb out unless ENOBUFS? */
926 err
= sock
->ops
->sendmsg(sock
, &msg
, len
);
927 if (unlikely(err
< 0)) {
929 vhost_net_ubuf_put(ubufs
);
930 nvq
->upend_idx
= ((unsigned)nvq
->upend_idx
- 1)
933 vhost_discard_vq_desc(vq
, 1);
934 vhost_net_enable_vq(net
, vq
);
938 pr_debug("Truncated TX packet: "
939 " len %d != %zd\n", err
, len
);
941 vhost_add_used_and_signal(&net
->dev
, vq
, head
, 0);
943 vhost_zerocopy_signal_used(net
, vq
);
944 vhost_net_tx_packet(net
);
945 } while (likely(!vhost_exceeds_weight(vq
, ++sent_pkts
, total_len
)));
948 /* Expects to be always run from workqueue - which acts as
949 * read-size critical section for our kind of RCU. */
950 static void handle_tx(struct vhost_net
*net
)
952 struct vhost_net_virtqueue
*nvq
= &net
->vqs
[VHOST_NET_VQ_TX
];
953 struct vhost_virtqueue
*vq
= &nvq
->vq
;
956 mutex_lock_nested(&vq
->mutex
, VHOST_NET_VQ_TX
);
957 sock
= vhost_vq_get_backend(vq
);
961 if (!vq_meta_prefetch(vq
))
964 vhost_disable_notify(&net
->dev
, vq
);
965 vhost_net_disable_vq(net
, vq
);
967 if (vhost_sock_zcopy(sock
))
968 handle_tx_zerocopy(net
, sock
);
970 handle_tx_copy(net
, sock
);
973 mutex_unlock(&vq
->mutex
);
976 static int peek_head_len(struct vhost_net_virtqueue
*rvq
, struct sock
*sk
)
978 struct sk_buff
*head
;
983 return vhost_net_buf_peek(rvq
);
985 spin_lock_irqsave(&sk
->sk_receive_queue
.lock
, flags
);
986 head
= skb_peek(&sk
->sk_receive_queue
);
989 if (skb_vlan_tag_present(head
))
993 spin_unlock_irqrestore(&sk
->sk_receive_queue
.lock
, flags
);
997 static int vhost_net_rx_peek_head_len(struct vhost_net
*net
, struct sock
*sk
,
1000 struct vhost_net_virtqueue
*rnvq
= &net
->vqs
[VHOST_NET_VQ_RX
];
1001 struct vhost_net_virtqueue
*tnvq
= &net
->vqs
[VHOST_NET_VQ_TX
];
1002 struct vhost_virtqueue
*rvq
= &rnvq
->vq
;
1003 struct vhost_virtqueue
*tvq
= &tnvq
->vq
;
1004 int len
= peek_head_len(rnvq
, sk
);
1006 if (!len
&& rvq
->busyloop_timeout
) {
1007 /* Flush batched heads first */
1008 vhost_net_signal_used(rnvq
);
1009 /* Both tx vq and rx socket were polled here */
1010 vhost_net_busy_poll(net
, rvq
, tvq
, busyloop_intr
, true);
1012 len
= peek_head_len(rnvq
, sk
);
1018 /* This is a multi-buffer version of vhost_get_desc, that works if
1019 * vq has read descriptors only.
1020 * @vq - the relevant virtqueue
1021 * @datalen - data length we'll be reading
1022 * @iovcount - returned count of io vectors we fill
1024 * @log_num - log offset
1025 * @quota - headcount quota, 1 for big buffer
1026 * returns number of buffer heads allocated, negative on error
1028 static int get_rx_bufs(struct vhost_virtqueue
*vq
,
1029 struct vring_used_elem
*heads
,
1032 struct vhost_log
*log
,
1036 unsigned int out
, in
;
1041 /* len is always initialized before use since we are always called with
1044 u32
uninitialized_var(len
);
1046 while (datalen
> 0 && headcount
< quota
) {
1047 if (unlikely(seg
>= UIO_MAXIOV
)) {
1051 r
= vhost_get_vq_desc(vq
, vq
->iov
+ seg
,
1052 ARRAY_SIZE(vq
->iov
) - seg
, &out
,
1054 if (unlikely(r
< 0))
1062 if (unlikely(out
|| in
<= 0)) {
1063 vq_err(vq
, "unexpected descriptor format for RX: "
1064 "out %d, in %d\n", out
, in
);
1068 if (unlikely(log
)) {
1072 heads
[headcount
].id
= cpu_to_vhost32(vq
, d
);
1073 len
= iov_length(vq
->iov
+ seg
, in
);
1074 heads
[headcount
].len
= cpu_to_vhost32(vq
, len
);
1079 heads
[headcount
- 1].len
= cpu_to_vhost32(vq
, len
+ datalen
);
1084 /* Detect overrun */
1085 if (unlikely(datalen
> 0)) {
1091 vhost_discard_vq_desc(vq
, headcount
);
1095 /* Expects to be always run from workqueue - which acts as
1096 * read-size critical section for our kind of RCU. */
1097 static void handle_rx(struct vhost_net
*net
)
1099 struct vhost_net_virtqueue
*nvq
= &net
->vqs
[VHOST_NET_VQ_RX
];
1100 struct vhost_virtqueue
*vq
= &nvq
->vq
;
1101 unsigned uninitialized_var(in
), log
;
1102 struct vhost_log
*vq_log
;
1103 struct msghdr msg
= {
1106 .msg_control
= NULL
, /* FIXME: get and handle RX aux data. */
1107 .msg_controllen
= 0,
1108 .msg_flags
= MSG_DONTWAIT
,
1110 struct virtio_net_hdr hdr
= {
1112 .gso_type
= VIRTIO_NET_HDR_GSO_NONE
1114 size_t total_len
= 0;
1117 size_t vhost_hlen
, sock_hlen
;
1118 size_t vhost_len
, sock_len
;
1119 bool busyloop_intr
= false;
1120 struct socket
*sock
;
1121 struct iov_iter fixup
;
1122 __virtio16 num_buffers
;
1125 mutex_lock_nested(&vq
->mutex
, VHOST_NET_VQ_RX
);
1126 sock
= vhost_vq_get_backend(vq
);
1130 if (!vq_meta_prefetch(vq
))
1133 vhost_disable_notify(&net
->dev
, vq
);
1134 vhost_net_disable_vq(net
, vq
);
1136 vhost_hlen
= nvq
->vhost_hlen
;
1137 sock_hlen
= nvq
->sock_hlen
;
1139 vq_log
= unlikely(vhost_has_feature(vq
, VHOST_F_LOG_ALL
)) ?
1141 mergeable
= vhost_has_feature(vq
, VIRTIO_NET_F_MRG_RXBUF
);
1144 sock_len
= vhost_net_rx_peek_head_len(net
, sock
->sk
,
1148 sock_len
+= sock_hlen
;
1149 vhost_len
= sock_len
+ vhost_hlen
;
1150 headcount
= get_rx_bufs(vq
, vq
->heads
+ nvq
->done_idx
,
1151 vhost_len
, &in
, vq_log
, &log
,
1152 likely(mergeable
) ? UIO_MAXIOV
: 1);
1153 /* On error, stop handling until the next kick. */
1154 if (unlikely(headcount
< 0))
1156 /* OK, now we need to know about added descriptors. */
1158 if (unlikely(busyloop_intr
)) {
1159 vhost_poll_queue(&vq
->poll
);
1160 } else if (unlikely(vhost_enable_notify(&net
->dev
, vq
))) {
1161 /* They have slipped one in as we were
1162 * doing that: check again. */
1163 vhost_disable_notify(&net
->dev
, vq
);
1166 /* Nothing new? Wait for eventfd to tell us
1170 busyloop_intr
= false;
1172 msg
.msg_control
= vhost_net_buf_consume(&nvq
->rxq
);
1173 /* On overrun, truncate and discard */
1174 if (unlikely(headcount
> UIO_MAXIOV
)) {
1175 iov_iter_init(&msg
.msg_iter
, READ
, vq
->iov
, 1, 1);
1176 err
= sock
->ops
->recvmsg(sock
, &msg
,
1177 1, MSG_DONTWAIT
| MSG_TRUNC
);
1178 pr_debug("Discarded rx packet: len %zd\n", sock_len
);
1181 /* We don't need to be notified again. */
1182 iov_iter_init(&msg
.msg_iter
, READ
, vq
->iov
, in
, vhost_len
);
1183 fixup
= msg
.msg_iter
;
1184 if (unlikely((vhost_hlen
))) {
1185 /* We will supply the header ourselves
1186 * TODO: support TSO.
1188 iov_iter_advance(&msg
.msg_iter
, vhost_hlen
);
1190 err
= sock
->ops
->recvmsg(sock
, &msg
,
1191 sock_len
, MSG_DONTWAIT
| MSG_TRUNC
);
1192 /* Userspace might have consumed the packet meanwhile:
1193 * it's not supposed to do this usually, but might be hard
1194 * to prevent. Discard data we got (if any) and keep going. */
1195 if (unlikely(err
!= sock_len
)) {
1196 pr_debug("Discarded rx packet: "
1197 " len %d, expected %zd\n", err
, sock_len
);
1198 vhost_discard_vq_desc(vq
, headcount
);
1201 /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
1202 if (unlikely(vhost_hlen
)) {
1203 if (copy_to_iter(&hdr
, sizeof(hdr
),
1204 &fixup
) != sizeof(hdr
)) {
1205 vq_err(vq
, "Unable to write vnet_hdr "
1206 "at addr %p\n", vq
->iov
->iov_base
);
1210 /* Header came from socket; we'll need to patch
1211 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
1213 iov_iter_advance(&fixup
, sizeof(hdr
));
1215 /* TODO: Should check and handle checksum. */
1217 num_buffers
= cpu_to_vhost16(vq
, headcount
);
1218 if (likely(mergeable
) &&
1219 copy_to_iter(&num_buffers
, sizeof num_buffers
,
1220 &fixup
) != sizeof num_buffers
) {
1221 vq_err(vq
, "Failed num_buffers write");
1222 vhost_discard_vq_desc(vq
, headcount
);
1225 nvq
->done_idx
+= headcount
;
1226 if (nvq
->done_idx
> VHOST_NET_BATCH
)
1227 vhost_net_signal_used(nvq
);
1228 if (unlikely(vq_log
))
1229 vhost_log_write(vq
, vq_log
, log
, vhost_len
,
1231 total_len
+= vhost_len
;
1232 } while (likely(!vhost_exceeds_weight(vq
, ++recv_pkts
, total_len
)));
1234 if (unlikely(busyloop_intr
))
1235 vhost_poll_queue(&vq
->poll
);
1237 vhost_net_enable_vq(net
, vq
);
1239 vhost_net_signal_used(nvq
);
1240 mutex_unlock(&vq
->mutex
);
1243 static void handle_tx_kick(struct vhost_work
*work
)
1245 struct vhost_virtqueue
*vq
= container_of(work
, struct vhost_virtqueue
,
1247 struct vhost_net
*net
= container_of(vq
->dev
, struct vhost_net
, dev
);
1252 static void handle_rx_kick(struct vhost_work
*work
)
1254 struct vhost_virtqueue
*vq
= container_of(work
, struct vhost_virtqueue
,
1256 struct vhost_net
*net
= container_of(vq
->dev
, struct vhost_net
, dev
);
1261 static void handle_tx_net(struct vhost_work
*work
)
1263 struct vhost_net
*net
= container_of(work
, struct vhost_net
,
1264 poll
[VHOST_NET_VQ_TX
].work
);
1268 static void handle_rx_net(struct vhost_work
*work
)
1270 struct vhost_net
*net
= container_of(work
, struct vhost_net
,
1271 poll
[VHOST_NET_VQ_RX
].work
);
1275 static int vhost_net_open(struct inode
*inode
, struct file
*f
)
1277 struct vhost_net
*n
;
1278 struct vhost_dev
*dev
;
1279 struct vhost_virtqueue
**vqs
;
1281 struct xdp_buff
*xdp
;
1284 n
= kvmalloc(sizeof *n
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
1287 vqs
= kmalloc_array(VHOST_NET_VQ_MAX
, sizeof(*vqs
), GFP_KERNEL
);
1293 queue
= kmalloc_array(VHOST_NET_BATCH
, sizeof(void *),
1300 n
->vqs
[VHOST_NET_VQ_RX
].rxq
.queue
= queue
;
1302 xdp
= kmalloc_array(VHOST_NET_BATCH
, sizeof(*xdp
), GFP_KERNEL
);
1309 n
->vqs
[VHOST_NET_VQ_TX
].xdp
= xdp
;
1312 vqs
[VHOST_NET_VQ_TX
] = &n
->vqs
[VHOST_NET_VQ_TX
].vq
;
1313 vqs
[VHOST_NET_VQ_RX
] = &n
->vqs
[VHOST_NET_VQ_RX
].vq
;
1314 n
->vqs
[VHOST_NET_VQ_TX
].vq
.handle_kick
= handle_tx_kick
;
1315 n
->vqs
[VHOST_NET_VQ_RX
].vq
.handle_kick
= handle_rx_kick
;
1316 for (i
= 0; i
< VHOST_NET_VQ_MAX
; i
++) {
1317 n
->vqs
[i
].ubufs
= NULL
;
1318 n
->vqs
[i
].ubuf_info
= NULL
;
1319 n
->vqs
[i
].upend_idx
= 0;
1320 n
->vqs
[i
].done_idx
= 0;
1321 n
->vqs
[i
].batched_xdp
= 0;
1322 n
->vqs
[i
].vhost_hlen
= 0;
1323 n
->vqs
[i
].sock_hlen
= 0;
1324 n
->vqs
[i
].rx_ring
= NULL
;
1325 vhost_net_buf_init(&n
->vqs
[i
].rxq
);
1327 vhost_dev_init(dev
, vqs
, VHOST_NET_VQ_MAX
,
1328 UIO_MAXIOV
+ VHOST_NET_BATCH
,
1329 VHOST_NET_PKT_WEIGHT
, VHOST_NET_WEIGHT
,
1332 vhost_poll_init(n
->poll
+ VHOST_NET_VQ_TX
, handle_tx_net
, EPOLLOUT
, dev
);
1333 vhost_poll_init(n
->poll
+ VHOST_NET_VQ_RX
, handle_rx_net
, EPOLLIN
, dev
);
1335 f
->private_data
= n
;
1336 n
->page_frag
.page
= NULL
;
1342 static struct socket
*vhost_net_stop_vq(struct vhost_net
*n
,
1343 struct vhost_virtqueue
*vq
)
1345 struct socket
*sock
;
1346 struct vhost_net_virtqueue
*nvq
=
1347 container_of(vq
, struct vhost_net_virtqueue
, vq
);
1349 mutex_lock(&vq
->mutex
);
1350 sock
= vhost_vq_get_backend(vq
);
1351 vhost_net_disable_vq(n
, vq
);
1352 vhost_vq_set_backend(vq
, NULL
);
1353 vhost_net_buf_unproduce(nvq
);
1354 nvq
->rx_ring
= NULL
;
1355 mutex_unlock(&vq
->mutex
);
1359 static void vhost_net_stop(struct vhost_net
*n
, struct socket
**tx_sock
,
1360 struct socket
**rx_sock
)
1362 *tx_sock
= vhost_net_stop_vq(n
, &n
->vqs
[VHOST_NET_VQ_TX
].vq
);
1363 *rx_sock
= vhost_net_stop_vq(n
, &n
->vqs
[VHOST_NET_VQ_RX
].vq
);
1366 static void vhost_net_flush_vq(struct vhost_net
*n
, int index
)
1368 vhost_poll_flush(n
->poll
+ index
);
1369 vhost_poll_flush(&n
->vqs
[index
].vq
.poll
);
1372 static void vhost_net_flush(struct vhost_net
*n
)
1374 vhost_net_flush_vq(n
, VHOST_NET_VQ_TX
);
1375 vhost_net_flush_vq(n
, VHOST_NET_VQ_RX
);
1376 if (n
->vqs
[VHOST_NET_VQ_TX
].ubufs
) {
1377 mutex_lock(&n
->vqs
[VHOST_NET_VQ_TX
].vq
.mutex
);
1379 mutex_unlock(&n
->vqs
[VHOST_NET_VQ_TX
].vq
.mutex
);
1380 /* Wait for all lower device DMAs done. */
1381 vhost_net_ubuf_put_and_wait(n
->vqs
[VHOST_NET_VQ_TX
].ubufs
);
1382 mutex_lock(&n
->vqs
[VHOST_NET_VQ_TX
].vq
.mutex
);
1383 n
->tx_flush
= false;
1384 atomic_set(&n
->vqs
[VHOST_NET_VQ_TX
].ubufs
->refcount
, 1);
1385 mutex_unlock(&n
->vqs
[VHOST_NET_VQ_TX
].vq
.mutex
);
1389 static int vhost_net_release(struct inode
*inode
, struct file
*f
)
1391 struct vhost_net
*n
= f
->private_data
;
1392 struct socket
*tx_sock
;
1393 struct socket
*rx_sock
;
1395 vhost_net_stop(n
, &tx_sock
, &rx_sock
);
1397 vhost_dev_stop(&n
->dev
);
1398 vhost_dev_cleanup(&n
->dev
);
1399 vhost_net_vq_reset(n
);
1401 sockfd_put(tx_sock
);
1403 sockfd_put(rx_sock
);
1404 /* Make sure no callbacks are outstanding */
1406 /* We do an extra flush before freeing memory,
1407 * since jobs can re-queue themselves. */
1409 kfree(n
->vqs
[VHOST_NET_VQ_RX
].rxq
.queue
);
1410 kfree(n
->vqs
[VHOST_NET_VQ_TX
].xdp
);
1412 if (n
->page_frag
.page
)
1413 __page_frag_cache_drain(n
->page_frag
.page
, n
->refcnt_bias
);
1418 static struct socket
*get_raw_socket(int fd
)
1421 struct socket
*sock
= sockfd_lookup(fd
, &r
);
1424 return ERR_PTR(-ENOTSOCK
);
1426 /* Parameter checking */
1427 if (sock
->sk
->sk_type
!= SOCK_RAW
) {
1428 r
= -ESOCKTNOSUPPORT
;
1432 if (sock
->sk
->sk_family
!= AF_PACKET
) {
1442 static struct ptr_ring
*get_tap_ptr_ring(int fd
)
1444 struct ptr_ring
*ring
;
1445 struct file
*file
= fget(fd
);
1449 ring
= tun_get_tx_ring(file
);
1452 ring
= tap_get_ptr_ring(file
);
1461 static struct socket
*get_tap_socket(int fd
)
1463 struct file
*file
= fget(fd
);
1464 struct socket
*sock
;
1467 return ERR_PTR(-EBADF
);
1468 sock
= tun_get_socket(file
);
1471 sock
= tap_get_socket(file
);
1477 static struct socket
*get_socket(int fd
)
1479 struct socket
*sock
;
1481 /* special case to disable backend */
1484 sock
= get_raw_socket(fd
);
1487 sock
= get_tap_socket(fd
);
1490 return ERR_PTR(-ENOTSOCK
);
1493 static long vhost_net_set_backend(struct vhost_net
*n
, unsigned index
, int fd
)
1495 struct socket
*sock
, *oldsock
;
1496 struct vhost_virtqueue
*vq
;
1497 struct vhost_net_virtqueue
*nvq
;
1498 struct vhost_net_ubuf_ref
*ubufs
, *oldubufs
= NULL
;
1501 mutex_lock(&n
->dev
.mutex
);
1502 r
= vhost_dev_check_owner(&n
->dev
);
1506 if (index
>= VHOST_NET_VQ_MAX
) {
1510 vq
= &n
->vqs
[index
].vq
;
1511 nvq
= &n
->vqs
[index
];
1512 mutex_lock(&vq
->mutex
);
1514 /* Verify that ring has been setup correctly. */
1515 if (!vhost_vq_access_ok(vq
)) {
1519 sock
= get_socket(fd
);
1525 /* start polling new socket */
1526 oldsock
= vhost_vq_get_backend(vq
);
1527 if (sock
!= oldsock
) {
1528 ubufs
= vhost_net_ubuf_alloc(vq
,
1529 sock
&& vhost_sock_zcopy(sock
));
1530 if (IS_ERR(ubufs
)) {
1535 vhost_net_disable_vq(n
, vq
);
1536 vhost_vq_set_backend(vq
, sock
);
1537 vhost_net_buf_unproduce(nvq
);
1538 r
= vhost_vq_init_access(vq
);
1541 r
= vhost_net_enable_vq(n
, vq
);
1544 if (index
== VHOST_NET_VQ_RX
)
1545 nvq
->rx_ring
= get_tap_ptr_ring(fd
);
1547 oldubufs
= nvq
->ubufs
;
1551 n
->tx_zcopy_err
= 0;
1552 n
->tx_flush
= false;
1555 mutex_unlock(&vq
->mutex
);
1558 vhost_net_ubuf_put_wait_and_free(oldubufs
);
1559 mutex_lock(&vq
->mutex
);
1560 vhost_zerocopy_signal_used(n
, vq
);
1561 mutex_unlock(&vq
->mutex
);
1565 vhost_net_flush_vq(n
, index
);
1566 sockfd_put(oldsock
);
1569 mutex_unlock(&n
->dev
.mutex
);
1573 vhost_vq_set_backend(vq
, oldsock
);
1574 vhost_net_enable_vq(n
, vq
);
1576 vhost_net_ubuf_put_wait_and_free(ubufs
);
1581 mutex_unlock(&vq
->mutex
);
1583 mutex_unlock(&n
->dev
.mutex
);
1587 static long vhost_net_reset_owner(struct vhost_net
*n
)
1589 struct socket
*tx_sock
= NULL
;
1590 struct socket
*rx_sock
= NULL
;
1592 struct vhost_iotlb
*umem
;
1594 mutex_lock(&n
->dev
.mutex
);
1595 err
= vhost_dev_check_owner(&n
->dev
);
1598 umem
= vhost_dev_reset_owner_prepare();
1603 vhost_net_stop(n
, &tx_sock
, &rx_sock
);
1605 vhost_dev_stop(&n
->dev
);
1606 vhost_dev_reset_owner(&n
->dev
, umem
);
1607 vhost_net_vq_reset(n
);
1609 mutex_unlock(&n
->dev
.mutex
);
1611 sockfd_put(tx_sock
);
1613 sockfd_put(rx_sock
);
1617 static int vhost_net_set_backend_features(struct vhost_net
*n
, u64 features
)
1621 mutex_lock(&n
->dev
.mutex
);
1622 for (i
= 0; i
< VHOST_NET_VQ_MAX
; ++i
) {
1623 mutex_lock(&n
->vqs
[i
].vq
.mutex
);
1624 n
->vqs
[i
].vq
.acked_backend_features
= features
;
1625 mutex_unlock(&n
->vqs
[i
].vq
.mutex
);
1627 mutex_unlock(&n
->dev
.mutex
);
1632 static int vhost_net_set_features(struct vhost_net
*n
, u64 features
)
1634 size_t vhost_hlen
, sock_hlen
, hdr_len
;
1637 hdr_len
= (features
& ((1ULL << VIRTIO_NET_F_MRG_RXBUF
) |
1638 (1ULL << VIRTIO_F_VERSION_1
))) ?
1639 sizeof(struct virtio_net_hdr_mrg_rxbuf
) :
1640 sizeof(struct virtio_net_hdr
);
1641 if (features
& (1 << VHOST_NET_F_VIRTIO_NET_HDR
)) {
1642 /* vhost provides vnet_hdr */
1643 vhost_hlen
= hdr_len
;
1646 /* socket provides vnet_hdr */
1648 sock_hlen
= hdr_len
;
1650 mutex_lock(&n
->dev
.mutex
);
1651 if ((features
& (1 << VHOST_F_LOG_ALL
)) &&
1652 !vhost_log_access_ok(&n
->dev
))
1655 if ((features
& (1ULL << VIRTIO_F_IOMMU_PLATFORM
))) {
1656 if (vhost_init_device_iotlb(&n
->dev
, true))
1660 for (i
= 0; i
< VHOST_NET_VQ_MAX
; ++i
) {
1661 mutex_lock(&n
->vqs
[i
].vq
.mutex
);
1662 n
->vqs
[i
].vq
.acked_features
= features
;
1663 n
->vqs
[i
].vhost_hlen
= vhost_hlen
;
1664 n
->vqs
[i
].sock_hlen
= sock_hlen
;
1665 mutex_unlock(&n
->vqs
[i
].vq
.mutex
);
1667 mutex_unlock(&n
->dev
.mutex
);
1671 mutex_unlock(&n
->dev
.mutex
);
1675 static long vhost_net_set_owner(struct vhost_net
*n
)
1679 mutex_lock(&n
->dev
.mutex
);
1680 if (vhost_dev_has_owner(&n
->dev
)) {
1684 r
= vhost_net_set_ubuf_info(n
);
1687 r
= vhost_dev_set_owner(&n
->dev
);
1689 vhost_net_clear_ubuf_info(n
);
1692 mutex_unlock(&n
->dev
.mutex
);
1696 static long vhost_net_ioctl(struct file
*f
, unsigned int ioctl
,
1699 struct vhost_net
*n
= f
->private_data
;
1700 void __user
*argp
= (void __user
*)arg
;
1701 u64 __user
*featurep
= argp
;
1702 struct vhost_vring_file backend
;
1707 case VHOST_NET_SET_BACKEND
:
1708 if (copy_from_user(&backend
, argp
, sizeof backend
))
1710 return vhost_net_set_backend(n
, backend
.index
, backend
.fd
);
1711 case VHOST_GET_FEATURES
:
1712 features
= VHOST_NET_FEATURES
;
1713 if (copy_to_user(featurep
, &features
, sizeof features
))
1716 case VHOST_SET_FEATURES
:
1717 if (copy_from_user(&features
, featurep
, sizeof features
))
1719 if (features
& ~VHOST_NET_FEATURES
)
1721 return vhost_net_set_features(n
, features
);
1722 case VHOST_GET_BACKEND_FEATURES
:
1723 features
= VHOST_NET_BACKEND_FEATURES
;
1724 if (copy_to_user(featurep
, &features
, sizeof(features
)))
1727 case VHOST_SET_BACKEND_FEATURES
:
1728 if (copy_from_user(&features
, featurep
, sizeof(features
)))
1730 if (features
& ~VHOST_NET_BACKEND_FEATURES
)
1732 return vhost_net_set_backend_features(n
, features
);
1733 case VHOST_RESET_OWNER
:
1734 return vhost_net_reset_owner(n
);
1735 case VHOST_SET_OWNER
:
1736 return vhost_net_set_owner(n
);
1738 mutex_lock(&n
->dev
.mutex
);
1739 r
= vhost_dev_ioctl(&n
->dev
, ioctl
, argp
);
1740 if (r
== -ENOIOCTLCMD
)
1741 r
= vhost_vring_ioctl(&n
->dev
, ioctl
, argp
);
1744 mutex_unlock(&n
->dev
.mutex
);
1749 static ssize_t
vhost_net_chr_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
1751 struct file
*file
= iocb
->ki_filp
;
1752 struct vhost_net
*n
= file
->private_data
;
1753 struct vhost_dev
*dev
= &n
->dev
;
1754 int noblock
= file
->f_flags
& O_NONBLOCK
;
1756 return vhost_chr_read_iter(dev
, to
, noblock
);
1759 static ssize_t
vhost_net_chr_write_iter(struct kiocb
*iocb
,
1760 struct iov_iter
*from
)
1762 struct file
*file
= iocb
->ki_filp
;
1763 struct vhost_net
*n
= file
->private_data
;
1764 struct vhost_dev
*dev
= &n
->dev
;
1766 return vhost_chr_write_iter(dev
, from
);
1769 static __poll_t
vhost_net_chr_poll(struct file
*file
, poll_table
*wait
)
1771 struct vhost_net
*n
= file
->private_data
;
1772 struct vhost_dev
*dev
= &n
->dev
;
1774 return vhost_chr_poll(file
, dev
, wait
);
1777 static const struct file_operations vhost_net_fops
= {
1778 .owner
= THIS_MODULE
,
1779 .release
= vhost_net_release
,
1780 .read_iter
= vhost_net_chr_read_iter
,
1781 .write_iter
= vhost_net_chr_write_iter
,
1782 .poll
= vhost_net_chr_poll
,
1783 .unlocked_ioctl
= vhost_net_ioctl
,
1784 .compat_ioctl
= compat_ptr_ioctl
,
1785 .open
= vhost_net_open
,
1786 .llseek
= noop_llseek
,
1789 static struct miscdevice vhost_net_misc
= {
1790 .minor
= VHOST_NET_MINOR
,
1791 .name
= "vhost-net",
1792 .fops
= &vhost_net_fops
,
1795 static int vhost_net_init(void)
1797 if (experimental_zcopytx
)
1798 vhost_net_enable_zcopy(VHOST_NET_VQ_TX
);
1799 return misc_register(&vhost_net_misc
);
1801 module_init(vhost_net_init
);
1803 static void vhost_net_exit(void)
1805 misc_deregister(&vhost_net_misc
);
1807 module_exit(vhost_net_exit
);
1809 MODULE_VERSION("0.0.1");
1810 MODULE_LICENSE("GPL v2");
1811 MODULE_AUTHOR("Michael S. Tsirkin");
1812 MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
1813 MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR
);
1814 MODULE_ALIAS("devname:vhost-net");