1 /* Copyright (C) 2009 Red Hat, Inc.
2 * Author: Michael S. Tsirkin <mst@redhat.com>
4 * This work is licensed under the terms of the GNU GPL, version 2.
6 * virtio-net server in host kernel.
9 #include <linux/compat.h>
10 #include <linux/eventfd.h>
11 #include <linux/vhost.h>
12 #include <linux/virtio_net.h>
13 #include <linux/miscdevice.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/mutex.h>
17 #include <linux/workqueue.h>
18 #include <linux/file.h>
19 #include <linux/slab.h>
20 #include <linux/sched/clock.h>
21 #include <linux/sched/signal.h>
22 #include <linux/vmalloc.h>
24 #include <linux/net.h>
25 #include <linux/if_packet.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_tun.h>
28 #include <linux/if_macvlan.h>
29 #include <linux/if_tap.h>
30 #include <linux/if_vlan.h>
31 #include <linux/skb_array.h>
32 #include <linux/skbuff.h>
39 static int experimental_zcopytx
= 1;
40 module_param(experimental_zcopytx
, int, 0444);
41 MODULE_PARM_DESC(experimental_zcopytx
, "Enable Zero Copy TX;"
42 " 1 -Enable; 0 - Disable");
44 /* Max number of bytes transferred before requeueing the job.
45 * Using this limit prevents one virtqueue from starving others. */
46 #define VHOST_NET_WEIGHT 0x80000
48 /* Max number of packets transferred before requeueing the job.
49 * Using this limit prevents one virtqueue from starving others with small
52 #define VHOST_NET_PKT_WEIGHT 256
54 /* MAX number of TX used buffers for outstanding zerocopy */
55 #define VHOST_MAX_PEND 128
56 #define VHOST_GOODCOPY_LEN 256
59 * For transmit, used buffer len is unused; we override it to track buffer
60 * status internally; used for zerocopy tx only.
62 /* Lower device DMA failed */
63 #define VHOST_DMA_FAILED_LEN ((__force __virtio32)3)
64 /* Lower device DMA done */
65 #define VHOST_DMA_DONE_LEN ((__force __virtio32)2)
66 /* Lower device DMA in progress */
67 #define VHOST_DMA_IN_PROGRESS ((__force __virtio32)1)
69 #define VHOST_DMA_CLEAR_LEN ((__force __virtio32)0)
71 #define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN)
74 VHOST_NET_FEATURES
= VHOST_FEATURES
|
75 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR
) |
76 (1ULL << VIRTIO_NET_F_MRG_RXBUF
) |
77 (1ULL << VIRTIO_F_IOMMU_PLATFORM
)
81 VHOST_NET_BACKEND_FEATURES
= (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2
)
90 struct vhost_net_ubuf_ref
{
91 /* refcount follows semantics similar to kref:
92 * 0: object is released
93 * 1: no outstanding ubufs
94 * >1: outstanding ubufs
97 wait_queue_head_t wait
;
98 struct vhost_virtqueue
*vq
;
101 #define VHOST_NET_BATCH 64
102 struct vhost_net_buf
{
108 struct vhost_net_virtqueue
{
109 struct vhost_virtqueue vq
;
112 /* vhost zerocopy support fields below: */
113 /* last used idx for outstanding DMA zerocopy buffers */
115 /* For TX, first used idx for DMA done zerocopy buffers
116 * For RX, number of batched heads
119 /* Number of XDP frames batched */
121 /* an array of userspace buffers info */
122 struct ubuf_info
*ubuf_info
;
123 /* Reference counting for outstanding ubufs.
124 * Protected by vq mutex. Writers must also take device mutex. */
125 struct vhost_net_ubuf_ref
*ubufs
;
126 struct ptr_ring
*rx_ring
;
127 struct vhost_net_buf rxq
;
128 /* Batched XDP buffs */
129 struct xdp_buff
*xdp
;
133 struct vhost_dev dev
;
134 struct vhost_net_virtqueue vqs
[VHOST_NET_VQ_MAX
];
135 struct vhost_poll poll
[VHOST_NET_VQ_MAX
];
136 /* Number of TX recently submitted.
137 * Protected by tx vq lock. */
139 /* Number of times zerocopy TX recently failed.
140 * Protected by tx vq lock. */
141 unsigned tx_zcopy_err
;
142 /* Flush in progress. Protected by tx vq lock. */
146 static unsigned vhost_net_zcopy_mask __read_mostly
;
148 static void *vhost_net_buf_get_ptr(struct vhost_net_buf
*rxq
)
150 if (rxq
->tail
!= rxq
->head
)
151 return rxq
->queue
[rxq
->head
];
156 static int vhost_net_buf_get_size(struct vhost_net_buf
*rxq
)
158 return rxq
->tail
- rxq
->head
;
161 static int vhost_net_buf_is_empty(struct vhost_net_buf
*rxq
)
163 return rxq
->tail
== rxq
->head
;
166 static void *vhost_net_buf_consume(struct vhost_net_buf
*rxq
)
168 void *ret
= vhost_net_buf_get_ptr(rxq
);
173 static int vhost_net_buf_produce(struct vhost_net_virtqueue
*nvq
)
175 struct vhost_net_buf
*rxq
= &nvq
->rxq
;
178 rxq
->tail
= ptr_ring_consume_batched(nvq
->rx_ring
, rxq
->queue
,
183 static void vhost_net_buf_unproduce(struct vhost_net_virtqueue
*nvq
)
185 struct vhost_net_buf
*rxq
= &nvq
->rxq
;
187 if (nvq
->rx_ring
&& !vhost_net_buf_is_empty(rxq
)) {
188 ptr_ring_unconsume(nvq
->rx_ring
, rxq
->queue
+ rxq
->head
,
189 vhost_net_buf_get_size(rxq
),
191 rxq
->head
= rxq
->tail
= 0;
195 static int vhost_net_buf_peek_len(void *ptr
)
197 if (tun_is_xdp_frame(ptr
)) {
198 struct xdp_frame
*xdpf
= tun_ptr_to_xdp(ptr
);
203 return __skb_array_len_with_tag(ptr
);
206 static int vhost_net_buf_peek(struct vhost_net_virtqueue
*nvq
)
208 struct vhost_net_buf
*rxq
= &nvq
->rxq
;
210 if (!vhost_net_buf_is_empty(rxq
))
213 if (!vhost_net_buf_produce(nvq
))
217 return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq
));
220 static void vhost_net_buf_init(struct vhost_net_buf
*rxq
)
222 rxq
->head
= rxq
->tail
= 0;
225 static void vhost_net_enable_zcopy(int vq
)
227 vhost_net_zcopy_mask
|= 0x1 << vq
;
230 static struct vhost_net_ubuf_ref
*
231 vhost_net_ubuf_alloc(struct vhost_virtqueue
*vq
, bool zcopy
)
233 struct vhost_net_ubuf_ref
*ubufs
;
234 /* No zero copy backend? Nothing to count. */
237 ubufs
= kmalloc(sizeof(*ubufs
), GFP_KERNEL
);
239 return ERR_PTR(-ENOMEM
);
240 atomic_set(&ubufs
->refcount
, 1);
241 init_waitqueue_head(&ubufs
->wait
);
246 static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref
*ubufs
)
248 int r
= atomic_sub_return(1, &ubufs
->refcount
);
250 wake_up(&ubufs
->wait
);
254 static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref
*ubufs
)
256 vhost_net_ubuf_put(ubufs
);
257 wait_event(ubufs
->wait
, !atomic_read(&ubufs
->refcount
));
260 static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref
*ubufs
)
262 vhost_net_ubuf_put_and_wait(ubufs
);
266 static void vhost_net_clear_ubuf_info(struct vhost_net
*n
)
270 for (i
= 0; i
< VHOST_NET_VQ_MAX
; ++i
) {
271 kfree(n
->vqs
[i
].ubuf_info
);
272 n
->vqs
[i
].ubuf_info
= NULL
;
276 static int vhost_net_set_ubuf_info(struct vhost_net
*n
)
281 for (i
= 0; i
< VHOST_NET_VQ_MAX
; ++i
) {
282 zcopy
= vhost_net_zcopy_mask
& (0x1 << i
);
285 n
->vqs
[i
].ubuf_info
=
286 kmalloc_array(UIO_MAXIOV
,
287 sizeof(*n
->vqs
[i
].ubuf_info
),
289 if (!n
->vqs
[i
].ubuf_info
)
295 vhost_net_clear_ubuf_info(n
);
299 static void vhost_net_vq_reset(struct vhost_net
*n
)
303 vhost_net_clear_ubuf_info(n
);
305 for (i
= 0; i
< VHOST_NET_VQ_MAX
; i
++) {
306 n
->vqs
[i
].done_idx
= 0;
307 n
->vqs
[i
].upend_idx
= 0;
308 n
->vqs
[i
].ubufs
= NULL
;
309 n
->vqs
[i
].vhost_hlen
= 0;
310 n
->vqs
[i
].sock_hlen
= 0;
311 vhost_net_buf_init(&n
->vqs
[i
].rxq
);
316 static void vhost_net_tx_packet(struct vhost_net
*net
)
319 if (net
->tx_packets
< 1024)
322 net
->tx_zcopy_err
= 0;
325 static void vhost_net_tx_err(struct vhost_net
*net
)
330 static bool vhost_net_tx_select_zcopy(struct vhost_net
*net
)
332 /* TX flush waits for outstanding DMAs to be done.
333 * Don't start new DMAs.
335 return !net
->tx_flush
&&
336 net
->tx_packets
/ 64 >= net
->tx_zcopy_err
;
339 static bool vhost_sock_zcopy(struct socket
*sock
)
341 return unlikely(experimental_zcopytx
) &&
342 sock_flag(sock
->sk
, SOCK_ZEROCOPY
);
345 static bool vhost_sock_xdp(struct socket
*sock
)
347 return sock_flag(sock
->sk
, SOCK_XDP
);
350 /* In case of DMA done not in order in lower device driver for some reason.
351 * upend_idx is used to track end of used idx, done_idx is used to track head
352 * of used idx. Once lower device DMA done contiguously, we will signal KVM
355 static void vhost_zerocopy_signal_used(struct vhost_net
*net
,
356 struct vhost_virtqueue
*vq
)
358 struct vhost_net_virtqueue
*nvq
=
359 container_of(vq
, struct vhost_net_virtqueue
, vq
);
363 for (i
= nvq
->done_idx
; i
!= nvq
->upend_idx
; i
= (i
+ 1) % UIO_MAXIOV
) {
364 if (vq
->heads
[i
].len
== VHOST_DMA_FAILED_LEN
)
365 vhost_net_tx_err(net
);
366 if (VHOST_DMA_IS_DONE(vq
->heads
[i
].len
)) {
367 vq
->heads
[i
].len
= VHOST_DMA_CLEAR_LEN
;
373 add
= min(UIO_MAXIOV
- nvq
->done_idx
, j
);
374 vhost_add_used_and_signal_n(vq
->dev
, vq
,
375 &vq
->heads
[nvq
->done_idx
], add
);
376 nvq
->done_idx
= (nvq
->done_idx
+ add
) % UIO_MAXIOV
;
381 static void vhost_zerocopy_callback(struct ubuf_info
*ubuf
, bool success
)
383 struct vhost_net_ubuf_ref
*ubufs
= ubuf
->ctx
;
384 struct vhost_virtqueue
*vq
= ubufs
->vq
;
389 /* set len to mark this desc buffers done DMA */
390 vq
->heads
[ubuf
->desc
].len
= success
?
391 VHOST_DMA_DONE_LEN
: VHOST_DMA_FAILED_LEN
;
392 cnt
= vhost_net_ubuf_put(ubufs
);
395 * Trigger polling thread if guest stopped submitting new buffers:
396 * in this case, the refcount after decrement will eventually reach 1.
397 * We also trigger polling periodically after each 16 packets
398 * (the value 16 here is more or less arbitrary, it's tuned to trigger
399 * less than 10% of times).
401 if (cnt
<= 1 || !(cnt
% 16))
402 vhost_poll_queue(&vq
->poll
);
404 rcu_read_unlock_bh();
407 static inline unsigned long busy_clock(void)
409 return local_clock() >> 10;
412 static bool vhost_can_busy_poll(unsigned long endtime
)
414 return likely(!need_resched() && !time_after(busy_clock(), endtime
) &&
415 !signal_pending(current
));
418 static void vhost_net_disable_vq(struct vhost_net
*n
,
419 struct vhost_virtqueue
*vq
)
421 struct vhost_net_virtqueue
*nvq
=
422 container_of(vq
, struct vhost_net_virtqueue
, vq
);
423 struct vhost_poll
*poll
= n
->poll
+ (nvq
- n
->vqs
);
424 if (!vq
->private_data
)
426 vhost_poll_stop(poll
);
429 static int vhost_net_enable_vq(struct vhost_net
*n
,
430 struct vhost_virtqueue
*vq
)
432 struct vhost_net_virtqueue
*nvq
=
433 container_of(vq
, struct vhost_net_virtqueue
, vq
);
434 struct vhost_poll
*poll
= n
->poll
+ (nvq
- n
->vqs
);
437 sock
= vq
->private_data
;
441 return vhost_poll_start(poll
, sock
->file
);
444 static void vhost_net_signal_used(struct vhost_net_virtqueue
*nvq
)
446 struct vhost_virtqueue
*vq
= &nvq
->vq
;
447 struct vhost_dev
*dev
= vq
->dev
;
452 vhost_add_used_and_signal_n(dev
, vq
, vq
->heads
, nvq
->done_idx
);
456 static void vhost_tx_batch(struct vhost_net
*net
,
457 struct vhost_net_virtqueue
*nvq
,
459 struct msghdr
*msghdr
)
461 struct tun_msg_ctl ctl
= {
463 .num
= nvq
->batched_xdp
,
468 if (nvq
->batched_xdp
== 0)
471 msghdr
->msg_control
= &ctl
;
472 err
= sock
->ops
->sendmsg(sock
, msghdr
, 0);
473 if (unlikely(err
< 0)) {
474 vq_err(&nvq
->vq
, "Fail to batch sending packets\n");
479 vhost_net_signal_used(nvq
);
480 nvq
->batched_xdp
= 0;
483 static int sock_has_rx_data(struct socket
*sock
)
488 if (sock
->ops
->peek_len
)
489 return sock
->ops
->peek_len(sock
);
491 return skb_queue_empty(&sock
->sk
->sk_receive_queue
);
494 static void vhost_net_busy_poll_try_queue(struct vhost_net
*net
,
495 struct vhost_virtqueue
*vq
)
497 if (!vhost_vq_avail_empty(&net
->dev
, vq
)) {
498 vhost_poll_queue(&vq
->poll
);
499 } else if (unlikely(vhost_enable_notify(&net
->dev
, vq
))) {
500 vhost_disable_notify(&net
->dev
, vq
);
501 vhost_poll_queue(&vq
->poll
);
505 static void vhost_net_busy_poll(struct vhost_net
*net
,
506 struct vhost_virtqueue
*rvq
,
507 struct vhost_virtqueue
*tvq
,
511 unsigned long busyloop_timeout
;
512 unsigned long endtime
;
514 struct vhost_virtqueue
*vq
= poll_rx
? tvq
: rvq
;
516 mutex_lock_nested(&vq
->mutex
, poll_rx
? VHOST_NET_VQ_TX
: VHOST_NET_VQ_RX
);
517 vhost_disable_notify(&net
->dev
, vq
);
518 sock
= rvq
->private_data
;
520 busyloop_timeout
= poll_rx
? rvq
->busyloop_timeout
:
521 tvq
->busyloop_timeout
;
524 endtime
= busy_clock() + busyloop_timeout
;
526 while (vhost_can_busy_poll(endtime
)) {
527 if (vhost_has_work(&net
->dev
)) {
528 *busyloop_intr
= true;
532 if ((sock_has_rx_data(sock
) &&
533 !vhost_vq_avail_empty(&net
->dev
, rvq
)) ||
534 !vhost_vq_avail_empty(&net
->dev
, tvq
))
542 if (poll_rx
|| sock_has_rx_data(sock
))
543 vhost_net_busy_poll_try_queue(net
, vq
);
544 else if (!poll_rx
) /* On tx here, sock has no rx data. */
545 vhost_enable_notify(&net
->dev
, rvq
);
547 mutex_unlock(&vq
->mutex
);
550 static int vhost_net_tx_get_vq_desc(struct vhost_net
*net
,
551 struct vhost_net_virtqueue
*tnvq
,
552 unsigned int *out_num
, unsigned int *in_num
,
553 struct msghdr
*msghdr
, bool *busyloop_intr
)
555 struct vhost_net_virtqueue
*rnvq
= &net
->vqs
[VHOST_NET_VQ_RX
];
556 struct vhost_virtqueue
*rvq
= &rnvq
->vq
;
557 struct vhost_virtqueue
*tvq
= &tnvq
->vq
;
559 int r
= vhost_get_vq_desc(tvq
, tvq
->iov
, ARRAY_SIZE(tvq
->iov
),
560 out_num
, in_num
, NULL
, NULL
);
562 if (r
== tvq
->num
&& tvq
->busyloop_timeout
) {
563 /* Flush batched packets first */
564 if (!vhost_sock_zcopy(tvq
->private_data
))
565 vhost_tx_batch(net
, tnvq
, tvq
->private_data
, msghdr
);
567 vhost_net_busy_poll(net
, rvq
, tvq
, busyloop_intr
, false);
569 r
= vhost_get_vq_desc(tvq
, tvq
->iov
, ARRAY_SIZE(tvq
->iov
),
570 out_num
, in_num
, NULL
, NULL
);
576 static bool vhost_exceeds_maxpend(struct vhost_net
*net
)
578 struct vhost_net_virtqueue
*nvq
= &net
->vqs
[VHOST_NET_VQ_TX
];
579 struct vhost_virtqueue
*vq
= &nvq
->vq
;
581 return (nvq
->upend_idx
+ UIO_MAXIOV
- nvq
->done_idx
) % UIO_MAXIOV
>
582 min_t(unsigned int, VHOST_MAX_PEND
, vq
->num
>> 2);
585 static size_t init_iov_iter(struct vhost_virtqueue
*vq
, struct iov_iter
*iter
,
586 size_t hdr_size
, int out
)
588 /* Skip header. TODO: support TSO. */
589 size_t len
= iov_length(vq
->iov
, out
);
591 iov_iter_init(iter
, WRITE
, vq
->iov
, out
, len
);
592 iov_iter_advance(iter
, hdr_size
);
594 return iov_iter_count(iter
);
597 static bool vhost_exceeds_weight(int pkts
, int total_len
)
599 return total_len
>= VHOST_NET_WEIGHT
||
600 pkts
>= VHOST_NET_PKT_WEIGHT
;
603 static int get_tx_bufs(struct vhost_net
*net
,
604 struct vhost_net_virtqueue
*nvq
,
606 unsigned int *out
, unsigned int *in
,
607 size_t *len
, bool *busyloop_intr
)
609 struct vhost_virtqueue
*vq
= &nvq
->vq
;
612 ret
= vhost_net_tx_get_vq_desc(net
, nvq
, out
, in
, msg
, busyloop_intr
);
614 if (ret
< 0 || ret
== vq
->num
)
618 vq_err(vq
, "Unexpected descriptor format for TX: out %d, int %d\n",
624 *len
= init_iov_iter(vq
, &msg
->msg_iter
, nvq
->vhost_hlen
, *out
);
626 vq_err(vq
, "Unexpected header len for TX: %zd expected %zd\n",
627 *len
, nvq
->vhost_hlen
);
634 static bool tx_can_batch(struct vhost_virtqueue
*vq
, size_t total_len
)
636 return total_len
< VHOST_NET_WEIGHT
&&
637 !vhost_vq_avail_empty(vq
->dev
, vq
);
640 #define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
642 static int vhost_net_build_xdp(struct vhost_net_virtqueue
*nvq
,
643 struct iov_iter
*from
)
645 struct vhost_virtqueue
*vq
= &nvq
->vq
;
646 struct socket
*sock
= vq
->private_data
;
647 struct page_frag
*alloc_frag
= ¤t
->task_frag
;
648 struct virtio_net_hdr
*gso
;
649 struct xdp_buff
*xdp
= &nvq
->xdp
[nvq
->batched_xdp
];
650 struct tun_xdp_hdr
*hdr
;
651 size_t len
= iov_iter_count(from
);
652 int headroom
= vhost_sock_xdp(sock
) ? XDP_PACKET_HEADROOM
: 0;
653 int buflen
= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
654 int pad
= SKB_DATA_ALIGN(VHOST_NET_RX_PAD
+ headroom
+ nvq
->sock_hlen
);
655 int sock_hlen
= nvq
->sock_hlen
;
659 if (unlikely(len
< nvq
->sock_hlen
))
662 if (SKB_DATA_ALIGN(len
+ pad
) +
663 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)) > PAGE_SIZE
)
666 buflen
+= SKB_DATA_ALIGN(len
+ pad
);
667 alloc_frag
->offset
= ALIGN((u64
)alloc_frag
->offset
, SMP_CACHE_BYTES
);
668 if (unlikely(!skb_page_frag_refill(buflen
, alloc_frag
, GFP_KERNEL
)))
671 buf
= (char *)page_address(alloc_frag
->page
) + alloc_frag
->offset
;
672 copied
= copy_page_from_iter(alloc_frag
->page
,
674 offsetof(struct tun_xdp_hdr
, gso
),
676 if (copied
!= sock_hlen
)
682 if ((gso
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) &&
683 vhost16_to_cpu(vq
, gso
->csum_start
) +
684 vhost16_to_cpu(vq
, gso
->csum_offset
) + 2 >
685 vhost16_to_cpu(vq
, gso
->hdr_len
)) {
686 gso
->hdr_len
= cpu_to_vhost16(vq
,
687 vhost16_to_cpu(vq
, gso
->csum_start
) +
688 vhost16_to_cpu(vq
, gso
->csum_offset
) + 2);
690 if (vhost16_to_cpu(vq
, gso
->hdr_len
) > len
)
695 copied
= copy_page_from_iter(alloc_frag
->page
,
696 alloc_frag
->offset
+ pad
,
701 xdp
->data_hard_start
= buf
;
702 xdp
->data
= buf
+ pad
;
703 xdp
->data_end
= xdp
->data
+ len
;
704 hdr
->buflen
= buflen
;
706 get_page(alloc_frag
->page
);
707 alloc_frag
->offset
+= buflen
;
714 static void handle_tx_copy(struct vhost_net
*net
, struct socket
*sock
)
716 struct vhost_net_virtqueue
*nvq
= &net
->vqs
[VHOST_NET_VQ_TX
];
717 struct vhost_virtqueue
*vq
= &nvq
->vq
;
720 struct msghdr msg
= {
725 .msg_flags
= MSG_DONTWAIT
,
727 size_t len
, total_len
= 0;
730 bool sock_can_batch
= (sock
->sk
->sk_sndbuf
== INT_MAX
);
733 bool busyloop_intr
= false;
735 if (nvq
->done_idx
== VHOST_NET_BATCH
)
736 vhost_tx_batch(net
, nvq
, sock
, &msg
);
738 head
= get_tx_bufs(net
, nvq
, &msg
, &out
, &in
, &len
,
740 /* On error, stop handling until the next kick. */
741 if (unlikely(head
< 0))
743 /* Nothing new? Wait for eventfd to tell us they refilled. */
744 if (head
== vq
->num
) {
745 if (unlikely(busyloop_intr
)) {
746 vhost_poll_queue(&vq
->poll
);
747 } else if (unlikely(vhost_enable_notify(&net
->dev
,
749 vhost_disable_notify(&net
->dev
, vq
);
757 /* For simplicity, TX batching is only enabled if
758 * sndbuf is unlimited.
760 if (sock_can_batch
) {
761 err
= vhost_net_build_xdp(nvq
, &msg
.msg_iter
);
764 } else if (unlikely(err
!= -ENOSPC
)) {
765 vhost_tx_batch(net
, nvq
, sock
, &msg
);
766 vhost_discard_vq_desc(vq
, 1);
767 vhost_net_enable_vq(net
, vq
);
771 /* We can't build XDP buff, go for single
772 * packet path but let's flush batched
775 vhost_tx_batch(net
, nvq
, sock
, &msg
);
776 msg
.msg_control
= NULL
;
778 if (tx_can_batch(vq
, total_len
))
779 msg
.msg_flags
|= MSG_MORE
;
781 msg
.msg_flags
&= ~MSG_MORE
;
784 /* TODO: Check specific error and bomb out unless ENOBUFS? */
785 err
= sock
->ops
->sendmsg(sock
, &msg
, len
);
786 if (unlikely(err
< 0)) {
787 vhost_discard_vq_desc(vq
, 1);
788 vhost_net_enable_vq(net
, vq
);
792 pr_debug("Truncated TX packet: len %d != %zd\n",
795 vq
->heads
[nvq
->done_idx
].id
= cpu_to_vhost32(vq
, head
);
796 vq
->heads
[nvq
->done_idx
].len
= 0;
798 if (vhost_exceeds_weight(++sent_pkts
, total_len
)) {
799 vhost_poll_queue(&vq
->poll
);
804 vhost_tx_batch(net
, nvq
, sock
, &msg
);
807 static void handle_tx_zerocopy(struct vhost_net
*net
, struct socket
*sock
)
809 struct vhost_net_virtqueue
*nvq
= &net
->vqs
[VHOST_NET_VQ_TX
];
810 struct vhost_virtqueue
*vq
= &nvq
->vq
;
813 struct msghdr msg
= {
818 .msg_flags
= MSG_DONTWAIT
,
820 struct tun_msg_ctl ctl
;
821 size_t len
, total_len
= 0;
823 struct vhost_net_ubuf_ref
*uninitialized_var(ubufs
);
830 /* Release DMAs done buffers first */
831 vhost_zerocopy_signal_used(net
, vq
);
833 busyloop_intr
= false;
834 head
= get_tx_bufs(net
, nvq
, &msg
, &out
, &in
, &len
,
836 /* On error, stop handling until the next kick. */
837 if (unlikely(head
< 0))
839 /* Nothing new? Wait for eventfd to tell us they refilled. */
840 if (head
== vq
->num
) {
841 if (unlikely(busyloop_intr
)) {
842 vhost_poll_queue(&vq
->poll
);
843 } else if (unlikely(vhost_enable_notify(&net
->dev
, vq
))) {
844 vhost_disable_notify(&net
->dev
, vq
);
850 zcopy_used
= len
>= VHOST_GOODCOPY_LEN
851 && !vhost_exceeds_maxpend(net
)
852 && vhost_net_tx_select_zcopy(net
);
854 /* use msg_control to pass vhost zerocopy ubuf info to skb */
856 struct ubuf_info
*ubuf
;
857 ubuf
= nvq
->ubuf_info
+ nvq
->upend_idx
;
859 vq
->heads
[nvq
->upend_idx
].id
= cpu_to_vhost32(vq
, head
);
860 vq
->heads
[nvq
->upend_idx
].len
= VHOST_DMA_IN_PROGRESS
;
861 ubuf
->callback
= vhost_zerocopy_callback
;
862 ubuf
->ctx
= nvq
->ubufs
;
863 ubuf
->desc
= nvq
->upend_idx
;
864 refcount_set(&ubuf
->refcnt
, 1);
865 msg
.msg_control
= &ctl
;
866 ctl
.type
= TUN_MSG_UBUF
;
868 msg
.msg_controllen
= sizeof(ctl
);
870 atomic_inc(&ubufs
->refcount
);
871 nvq
->upend_idx
= (nvq
->upend_idx
+ 1) % UIO_MAXIOV
;
873 msg
.msg_control
= NULL
;
877 if (tx_can_batch(vq
, total_len
) &&
878 likely(!vhost_exceeds_maxpend(net
))) {
879 msg
.msg_flags
|= MSG_MORE
;
881 msg
.msg_flags
&= ~MSG_MORE
;
884 /* TODO: Check specific error and bomb out unless ENOBUFS? */
885 err
= sock
->ops
->sendmsg(sock
, &msg
, len
);
886 if (unlikely(err
< 0)) {
888 vhost_net_ubuf_put(ubufs
);
889 nvq
->upend_idx
= ((unsigned)nvq
->upend_idx
- 1)
892 vhost_discard_vq_desc(vq
, 1);
893 vhost_net_enable_vq(net
, vq
);
897 pr_debug("Truncated TX packet: "
898 " len %d != %zd\n", err
, len
);
900 vhost_add_used_and_signal(&net
->dev
, vq
, head
, 0);
902 vhost_zerocopy_signal_used(net
, vq
);
903 vhost_net_tx_packet(net
);
904 if (unlikely(vhost_exceeds_weight(++sent_pkts
, total_len
))) {
905 vhost_poll_queue(&vq
->poll
);
911 /* Expects to be always run from workqueue - which acts as
912 * read-size critical section for our kind of RCU. */
913 static void handle_tx(struct vhost_net
*net
)
915 struct vhost_net_virtqueue
*nvq
= &net
->vqs
[VHOST_NET_VQ_TX
];
916 struct vhost_virtqueue
*vq
= &nvq
->vq
;
919 mutex_lock_nested(&vq
->mutex
, VHOST_NET_VQ_TX
);
920 sock
= vq
->private_data
;
924 if (!vq_iotlb_prefetch(vq
))
927 vhost_disable_notify(&net
->dev
, vq
);
928 vhost_net_disable_vq(net
, vq
);
930 if (vhost_sock_zcopy(sock
))
931 handle_tx_zerocopy(net
, sock
);
933 handle_tx_copy(net
, sock
);
936 mutex_unlock(&vq
->mutex
);
939 static int peek_head_len(struct vhost_net_virtqueue
*rvq
, struct sock
*sk
)
941 struct sk_buff
*head
;
946 return vhost_net_buf_peek(rvq
);
948 spin_lock_irqsave(&sk
->sk_receive_queue
.lock
, flags
);
949 head
= skb_peek(&sk
->sk_receive_queue
);
952 if (skb_vlan_tag_present(head
))
956 spin_unlock_irqrestore(&sk
->sk_receive_queue
.lock
, flags
);
960 static int vhost_net_rx_peek_head_len(struct vhost_net
*net
, struct sock
*sk
,
963 struct vhost_net_virtqueue
*rnvq
= &net
->vqs
[VHOST_NET_VQ_RX
];
964 struct vhost_net_virtqueue
*tnvq
= &net
->vqs
[VHOST_NET_VQ_TX
];
965 struct vhost_virtqueue
*rvq
= &rnvq
->vq
;
966 struct vhost_virtqueue
*tvq
= &tnvq
->vq
;
967 int len
= peek_head_len(rnvq
, sk
);
969 if (!len
&& rvq
->busyloop_timeout
) {
970 /* Flush batched heads first */
971 vhost_net_signal_used(rnvq
);
972 /* Both tx vq and rx socket were polled here */
973 vhost_net_busy_poll(net
, rvq
, tvq
, busyloop_intr
, true);
975 len
= peek_head_len(rnvq
, sk
);
981 /* This is a multi-buffer version of vhost_get_desc, that works if
982 * vq has read descriptors only.
983 * @vq - the relevant virtqueue
984 * @datalen - data length we'll be reading
985 * @iovcount - returned count of io vectors we fill
987 * @log_num - log offset
988 * @quota - headcount quota, 1 for big buffer
989 * returns number of buffer heads allocated, negative on error
991 static int get_rx_bufs(struct vhost_virtqueue
*vq
,
992 struct vring_used_elem
*heads
,
995 struct vhost_log
*log
,
999 unsigned int out
, in
;
1004 /* len is always initialized before use since we are always called with
1007 u32
uninitialized_var(len
);
1009 while (datalen
> 0 && headcount
< quota
) {
1010 if (unlikely(seg
>= UIO_MAXIOV
)) {
1014 r
= vhost_get_vq_desc(vq
, vq
->iov
+ seg
,
1015 ARRAY_SIZE(vq
->iov
) - seg
, &out
,
1017 if (unlikely(r
< 0))
1025 if (unlikely(out
|| in
<= 0)) {
1026 vq_err(vq
, "unexpected descriptor format for RX: "
1027 "out %d, in %d\n", out
, in
);
1031 if (unlikely(log
)) {
1035 heads
[headcount
].id
= cpu_to_vhost32(vq
, d
);
1036 len
= iov_length(vq
->iov
+ seg
, in
);
1037 heads
[headcount
].len
= cpu_to_vhost32(vq
, len
);
1042 heads
[headcount
- 1].len
= cpu_to_vhost32(vq
, len
+ datalen
);
1047 /* Detect overrun */
1048 if (unlikely(datalen
> 0)) {
1054 vhost_discard_vq_desc(vq
, headcount
);
1058 /* Expects to be always run from workqueue - which acts as
1059 * read-size critical section for our kind of RCU. */
1060 static void handle_rx(struct vhost_net
*net
)
1062 struct vhost_net_virtqueue
*nvq
= &net
->vqs
[VHOST_NET_VQ_RX
];
1063 struct vhost_virtqueue
*vq
= &nvq
->vq
;
1064 unsigned uninitialized_var(in
), log
;
1065 struct vhost_log
*vq_log
;
1066 struct msghdr msg
= {
1069 .msg_control
= NULL
, /* FIXME: get and handle RX aux data. */
1070 .msg_controllen
= 0,
1071 .msg_flags
= MSG_DONTWAIT
,
1073 struct virtio_net_hdr hdr
= {
1075 .gso_type
= VIRTIO_NET_HDR_GSO_NONE
1077 size_t total_len
= 0;
1080 size_t vhost_hlen
, sock_hlen
;
1081 size_t vhost_len
, sock_len
;
1082 bool busyloop_intr
= false;
1083 struct socket
*sock
;
1084 struct iov_iter fixup
;
1085 __virtio16 num_buffers
;
1088 mutex_lock_nested(&vq
->mutex
, VHOST_NET_VQ_RX
);
1089 sock
= vq
->private_data
;
1093 if (!vq_iotlb_prefetch(vq
))
1096 vhost_disable_notify(&net
->dev
, vq
);
1097 vhost_net_disable_vq(net
, vq
);
1099 vhost_hlen
= nvq
->vhost_hlen
;
1100 sock_hlen
= nvq
->sock_hlen
;
1102 vq_log
= unlikely(vhost_has_feature(vq
, VHOST_F_LOG_ALL
)) ?
1104 mergeable
= vhost_has_feature(vq
, VIRTIO_NET_F_MRG_RXBUF
);
1106 while ((sock_len
= vhost_net_rx_peek_head_len(net
, sock
->sk
,
1108 sock_len
+= sock_hlen
;
1109 vhost_len
= sock_len
+ vhost_hlen
;
1110 headcount
= get_rx_bufs(vq
, vq
->heads
+ nvq
->done_idx
,
1111 vhost_len
, &in
, vq_log
, &log
,
1112 likely(mergeable
) ? UIO_MAXIOV
: 1);
1113 /* On error, stop handling until the next kick. */
1114 if (unlikely(headcount
< 0))
1116 /* OK, now we need to know about added descriptors. */
1118 if (unlikely(busyloop_intr
)) {
1119 vhost_poll_queue(&vq
->poll
);
1120 } else if (unlikely(vhost_enable_notify(&net
->dev
, vq
))) {
1121 /* They have slipped one in as we were
1122 * doing that: check again. */
1123 vhost_disable_notify(&net
->dev
, vq
);
1126 /* Nothing new? Wait for eventfd to tell us
1130 busyloop_intr
= false;
1132 msg
.msg_control
= vhost_net_buf_consume(&nvq
->rxq
);
1133 /* On overrun, truncate and discard */
1134 if (unlikely(headcount
> UIO_MAXIOV
)) {
1135 iov_iter_init(&msg
.msg_iter
, READ
, vq
->iov
, 1, 1);
1136 err
= sock
->ops
->recvmsg(sock
, &msg
,
1137 1, MSG_DONTWAIT
| MSG_TRUNC
);
1138 pr_debug("Discarded rx packet: len %zd\n", sock_len
);
1141 /* We don't need to be notified again. */
1142 iov_iter_init(&msg
.msg_iter
, READ
, vq
->iov
, in
, vhost_len
);
1143 fixup
= msg
.msg_iter
;
1144 if (unlikely((vhost_hlen
))) {
1145 /* We will supply the header ourselves
1146 * TODO: support TSO.
1148 iov_iter_advance(&msg
.msg_iter
, vhost_hlen
);
1150 err
= sock
->ops
->recvmsg(sock
, &msg
,
1151 sock_len
, MSG_DONTWAIT
| MSG_TRUNC
);
1152 /* Userspace might have consumed the packet meanwhile:
1153 * it's not supposed to do this usually, but might be hard
1154 * to prevent. Discard data we got (if any) and keep going. */
1155 if (unlikely(err
!= sock_len
)) {
1156 pr_debug("Discarded rx packet: "
1157 " len %d, expected %zd\n", err
, sock_len
);
1158 vhost_discard_vq_desc(vq
, headcount
);
1161 /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
1162 if (unlikely(vhost_hlen
)) {
1163 if (copy_to_iter(&hdr
, sizeof(hdr
),
1164 &fixup
) != sizeof(hdr
)) {
1165 vq_err(vq
, "Unable to write vnet_hdr "
1166 "at addr %p\n", vq
->iov
->iov_base
);
1170 /* Header came from socket; we'll need to patch
1171 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
1173 iov_iter_advance(&fixup
, sizeof(hdr
));
1175 /* TODO: Should check and handle checksum. */
1177 num_buffers
= cpu_to_vhost16(vq
, headcount
);
1178 if (likely(mergeable
) &&
1179 copy_to_iter(&num_buffers
, sizeof num_buffers
,
1180 &fixup
) != sizeof num_buffers
) {
1181 vq_err(vq
, "Failed num_buffers write");
1182 vhost_discard_vq_desc(vq
, headcount
);
1185 nvq
->done_idx
+= headcount
;
1186 if (nvq
->done_idx
> VHOST_NET_BATCH
)
1187 vhost_net_signal_used(nvq
);
1188 if (unlikely(vq_log
))
1189 vhost_log_write(vq
, vq_log
, log
, vhost_len
);
1190 total_len
+= vhost_len
;
1191 if (unlikely(vhost_exceeds_weight(++recv_pkts
, total_len
))) {
1192 vhost_poll_queue(&vq
->poll
);
1196 if (unlikely(busyloop_intr
))
1197 vhost_poll_queue(&vq
->poll
);
1199 vhost_net_enable_vq(net
, vq
);
1201 vhost_net_signal_used(nvq
);
1202 mutex_unlock(&vq
->mutex
);
1205 static void handle_tx_kick(struct vhost_work
*work
)
1207 struct vhost_virtqueue
*vq
= container_of(work
, struct vhost_virtqueue
,
1209 struct vhost_net
*net
= container_of(vq
->dev
, struct vhost_net
, dev
);
1214 static void handle_rx_kick(struct vhost_work
*work
)
1216 struct vhost_virtqueue
*vq
= container_of(work
, struct vhost_virtqueue
,
1218 struct vhost_net
*net
= container_of(vq
->dev
, struct vhost_net
, dev
);
1223 static void handle_tx_net(struct vhost_work
*work
)
1225 struct vhost_net
*net
= container_of(work
, struct vhost_net
,
1226 poll
[VHOST_NET_VQ_TX
].work
);
1230 static void handle_rx_net(struct vhost_work
*work
)
1232 struct vhost_net
*net
= container_of(work
, struct vhost_net
,
1233 poll
[VHOST_NET_VQ_RX
].work
);
1237 static int vhost_net_open(struct inode
*inode
, struct file
*f
)
1239 struct vhost_net
*n
;
1240 struct vhost_dev
*dev
;
1241 struct vhost_virtqueue
**vqs
;
1243 struct xdp_buff
*xdp
;
1246 n
= kvmalloc(sizeof *n
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
1249 vqs
= kmalloc_array(VHOST_NET_VQ_MAX
, sizeof(*vqs
), GFP_KERNEL
);
1255 queue
= kmalloc_array(VHOST_NET_BATCH
, sizeof(void *),
1262 n
->vqs
[VHOST_NET_VQ_RX
].rxq
.queue
= queue
;
1264 xdp
= kmalloc_array(VHOST_NET_BATCH
, sizeof(*xdp
), GFP_KERNEL
);
1271 n
->vqs
[VHOST_NET_VQ_TX
].xdp
= xdp
;
1274 vqs
[VHOST_NET_VQ_TX
] = &n
->vqs
[VHOST_NET_VQ_TX
].vq
;
1275 vqs
[VHOST_NET_VQ_RX
] = &n
->vqs
[VHOST_NET_VQ_RX
].vq
;
1276 n
->vqs
[VHOST_NET_VQ_TX
].vq
.handle_kick
= handle_tx_kick
;
1277 n
->vqs
[VHOST_NET_VQ_RX
].vq
.handle_kick
= handle_rx_kick
;
1278 for (i
= 0; i
< VHOST_NET_VQ_MAX
; i
++) {
1279 n
->vqs
[i
].ubufs
= NULL
;
1280 n
->vqs
[i
].ubuf_info
= NULL
;
1281 n
->vqs
[i
].upend_idx
= 0;
1282 n
->vqs
[i
].done_idx
= 0;
1283 n
->vqs
[i
].batched_xdp
= 0;
1284 n
->vqs
[i
].vhost_hlen
= 0;
1285 n
->vqs
[i
].sock_hlen
= 0;
1286 n
->vqs
[i
].rx_ring
= NULL
;
1287 vhost_net_buf_init(&n
->vqs
[i
].rxq
);
1289 vhost_dev_init(dev
, vqs
, VHOST_NET_VQ_MAX
);
1291 vhost_poll_init(n
->poll
+ VHOST_NET_VQ_TX
, handle_tx_net
, EPOLLOUT
, dev
);
1292 vhost_poll_init(n
->poll
+ VHOST_NET_VQ_RX
, handle_rx_net
, EPOLLIN
, dev
);
1294 f
->private_data
= n
;
1299 static struct socket
*vhost_net_stop_vq(struct vhost_net
*n
,
1300 struct vhost_virtqueue
*vq
)
1302 struct socket
*sock
;
1303 struct vhost_net_virtqueue
*nvq
=
1304 container_of(vq
, struct vhost_net_virtqueue
, vq
);
1306 mutex_lock(&vq
->mutex
);
1307 sock
= vq
->private_data
;
1308 vhost_net_disable_vq(n
, vq
);
1309 vq
->private_data
= NULL
;
1310 vhost_net_buf_unproduce(nvq
);
1311 nvq
->rx_ring
= NULL
;
1312 mutex_unlock(&vq
->mutex
);
1316 static void vhost_net_stop(struct vhost_net
*n
, struct socket
**tx_sock
,
1317 struct socket
**rx_sock
)
1319 *tx_sock
= vhost_net_stop_vq(n
, &n
->vqs
[VHOST_NET_VQ_TX
].vq
);
1320 *rx_sock
= vhost_net_stop_vq(n
, &n
->vqs
[VHOST_NET_VQ_RX
].vq
);
1323 static void vhost_net_flush_vq(struct vhost_net
*n
, int index
)
1325 vhost_poll_flush(n
->poll
+ index
);
1326 vhost_poll_flush(&n
->vqs
[index
].vq
.poll
);
1329 static void vhost_net_flush(struct vhost_net
*n
)
1331 vhost_net_flush_vq(n
, VHOST_NET_VQ_TX
);
1332 vhost_net_flush_vq(n
, VHOST_NET_VQ_RX
);
1333 if (n
->vqs
[VHOST_NET_VQ_TX
].ubufs
) {
1334 mutex_lock(&n
->vqs
[VHOST_NET_VQ_TX
].vq
.mutex
);
1336 mutex_unlock(&n
->vqs
[VHOST_NET_VQ_TX
].vq
.mutex
);
1337 /* Wait for all lower device DMAs done. */
1338 vhost_net_ubuf_put_and_wait(n
->vqs
[VHOST_NET_VQ_TX
].ubufs
);
1339 mutex_lock(&n
->vqs
[VHOST_NET_VQ_TX
].vq
.mutex
);
1340 n
->tx_flush
= false;
1341 atomic_set(&n
->vqs
[VHOST_NET_VQ_TX
].ubufs
->refcount
, 1);
1342 mutex_unlock(&n
->vqs
[VHOST_NET_VQ_TX
].vq
.mutex
);
1346 static int vhost_net_release(struct inode
*inode
, struct file
*f
)
1348 struct vhost_net
*n
= f
->private_data
;
1349 struct socket
*tx_sock
;
1350 struct socket
*rx_sock
;
1352 vhost_net_stop(n
, &tx_sock
, &rx_sock
);
1354 vhost_dev_stop(&n
->dev
);
1355 vhost_dev_cleanup(&n
->dev
);
1356 vhost_net_vq_reset(n
);
1358 sockfd_put(tx_sock
);
1360 sockfd_put(rx_sock
);
1361 /* Make sure no callbacks are outstanding */
1362 synchronize_rcu_bh();
1363 /* We do an extra flush before freeing memory,
1364 * since jobs can re-queue themselves. */
1366 kfree(n
->vqs
[VHOST_NET_VQ_RX
].rxq
.queue
);
1367 kfree(n
->vqs
[VHOST_NET_VQ_TX
].xdp
);
1373 static struct socket
*get_raw_socket(int fd
)
1376 struct sockaddr_ll sa
;
1377 char buf
[MAX_ADDR_LEN
];
1380 struct socket
*sock
= sockfd_lookup(fd
, &r
);
1383 return ERR_PTR(-ENOTSOCK
);
1385 /* Parameter checking */
1386 if (sock
->sk
->sk_type
!= SOCK_RAW
) {
1387 r
= -ESOCKTNOSUPPORT
;
1391 r
= sock
->ops
->getname(sock
, (struct sockaddr
*)&uaddr
.sa
, 0);
1395 if (uaddr
.sa
.sll_family
!= AF_PACKET
) {
1405 static struct ptr_ring
*get_tap_ptr_ring(int fd
)
1407 struct ptr_ring
*ring
;
1408 struct file
*file
= fget(fd
);
1412 ring
= tun_get_tx_ring(file
);
1415 ring
= tap_get_ptr_ring(file
);
1424 static struct socket
*get_tap_socket(int fd
)
1426 struct file
*file
= fget(fd
);
1427 struct socket
*sock
;
1430 return ERR_PTR(-EBADF
);
1431 sock
= tun_get_socket(file
);
1434 sock
= tap_get_socket(file
);
1440 static struct socket
*get_socket(int fd
)
1442 struct socket
*sock
;
1444 /* special case to disable backend */
1447 sock
= get_raw_socket(fd
);
1450 sock
= get_tap_socket(fd
);
1453 return ERR_PTR(-ENOTSOCK
);
1456 static long vhost_net_set_backend(struct vhost_net
*n
, unsigned index
, int fd
)
1458 struct socket
*sock
, *oldsock
;
1459 struct vhost_virtqueue
*vq
;
1460 struct vhost_net_virtqueue
*nvq
;
1461 struct vhost_net_ubuf_ref
*ubufs
, *oldubufs
= NULL
;
1464 mutex_lock(&n
->dev
.mutex
);
1465 r
= vhost_dev_check_owner(&n
->dev
);
1469 if (index
>= VHOST_NET_VQ_MAX
) {
1473 vq
= &n
->vqs
[index
].vq
;
1474 nvq
= &n
->vqs
[index
];
1475 mutex_lock(&vq
->mutex
);
1477 /* Verify that ring has been setup correctly. */
1478 if (!vhost_vq_access_ok(vq
)) {
1482 sock
= get_socket(fd
);
1488 /* start polling new socket */
1489 oldsock
= vq
->private_data
;
1490 if (sock
!= oldsock
) {
1491 ubufs
= vhost_net_ubuf_alloc(vq
,
1492 sock
&& vhost_sock_zcopy(sock
));
1493 if (IS_ERR(ubufs
)) {
1498 vhost_net_disable_vq(n
, vq
);
1499 vq
->private_data
= sock
;
1500 vhost_net_buf_unproduce(nvq
);
1501 r
= vhost_vq_init_access(vq
);
1504 r
= vhost_net_enable_vq(n
, vq
);
1507 if (index
== VHOST_NET_VQ_RX
)
1508 nvq
->rx_ring
= get_tap_ptr_ring(fd
);
1510 oldubufs
= nvq
->ubufs
;
1514 n
->tx_zcopy_err
= 0;
1515 n
->tx_flush
= false;
1518 mutex_unlock(&vq
->mutex
);
1521 vhost_net_ubuf_put_wait_and_free(oldubufs
);
1522 mutex_lock(&vq
->mutex
);
1523 vhost_zerocopy_signal_used(n
, vq
);
1524 mutex_unlock(&vq
->mutex
);
1528 vhost_net_flush_vq(n
, index
);
1529 sockfd_put(oldsock
);
1532 mutex_unlock(&n
->dev
.mutex
);
1536 vq
->private_data
= oldsock
;
1537 vhost_net_enable_vq(n
, vq
);
1539 vhost_net_ubuf_put_wait_and_free(ubufs
);
1544 mutex_unlock(&vq
->mutex
);
1546 mutex_unlock(&n
->dev
.mutex
);
1550 static long vhost_net_reset_owner(struct vhost_net
*n
)
1552 struct socket
*tx_sock
= NULL
;
1553 struct socket
*rx_sock
= NULL
;
1555 struct vhost_umem
*umem
;
1557 mutex_lock(&n
->dev
.mutex
);
1558 err
= vhost_dev_check_owner(&n
->dev
);
1561 umem
= vhost_dev_reset_owner_prepare();
1566 vhost_net_stop(n
, &tx_sock
, &rx_sock
);
1568 vhost_dev_stop(&n
->dev
);
1569 vhost_dev_reset_owner(&n
->dev
, umem
);
1570 vhost_net_vq_reset(n
);
1572 mutex_unlock(&n
->dev
.mutex
);
1574 sockfd_put(tx_sock
);
1576 sockfd_put(rx_sock
);
1580 static int vhost_net_set_backend_features(struct vhost_net
*n
, u64 features
)
1584 mutex_lock(&n
->dev
.mutex
);
1585 for (i
= 0; i
< VHOST_NET_VQ_MAX
; ++i
) {
1586 mutex_lock(&n
->vqs
[i
].vq
.mutex
);
1587 n
->vqs
[i
].vq
.acked_backend_features
= features
;
1588 mutex_unlock(&n
->vqs
[i
].vq
.mutex
);
1590 mutex_unlock(&n
->dev
.mutex
);
1595 static int vhost_net_set_features(struct vhost_net
*n
, u64 features
)
1597 size_t vhost_hlen
, sock_hlen
, hdr_len
;
1600 hdr_len
= (features
& ((1ULL << VIRTIO_NET_F_MRG_RXBUF
) |
1601 (1ULL << VIRTIO_F_VERSION_1
))) ?
1602 sizeof(struct virtio_net_hdr_mrg_rxbuf
) :
1603 sizeof(struct virtio_net_hdr
);
1604 if (features
& (1 << VHOST_NET_F_VIRTIO_NET_HDR
)) {
1605 /* vhost provides vnet_hdr */
1606 vhost_hlen
= hdr_len
;
1609 /* socket provides vnet_hdr */
1611 sock_hlen
= hdr_len
;
1613 mutex_lock(&n
->dev
.mutex
);
1614 if ((features
& (1 << VHOST_F_LOG_ALL
)) &&
1615 !vhost_log_access_ok(&n
->dev
))
1618 if ((features
& (1ULL << VIRTIO_F_IOMMU_PLATFORM
))) {
1619 if (vhost_init_device_iotlb(&n
->dev
, true))
1623 for (i
= 0; i
< VHOST_NET_VQ_MAX
; ++i
) {
1624 mutex_lock(&n
->vqs
[i
].vq
.mutex
);
1625 n
->vqs
[i
].vq
.acked_features
= features
;
1626 n
->vqs
[i
].vhost_hlen
= vhost_hlen
;
1627 n
->vqs
[i
].sock_hlen
= sock_hlen
;
1628 mutex_unlock(&n
->vqs
[i
].vq
.mutex
);
1630 mutex_unlock(&n
->dev
.mutex
);
1634 mutex_unlock(&n
->dev
.mutex
);
1638 static long vhost_net_set_owner(struct vhost_net
*n
)
1642 mutex_lock(&n
->dev
.mutex
);
1643 if (vhost_dev_has_owner(&n
->dev
)) {
1647 r
= vhost_net_set_ubuf_info(n
);
1650 r
= vhost_dev_set_owner(&n
->dev
);
1652 vhost_net_clear_ubuf_info(n
);
1655 mutex_unlock(&n
->dev
.mutex
);
1659 static long vhost_net_ioctl(struct file
*f
, unsigned int ioctl
,
1662 struct vhost_net
*n
= f
->private_data
;
1663 void __user
*argp
= (void __user
*)arg
;
1664 u64 __user
*featurep
= argp
;
1665 struct vhost_vring_file backend
;
1670 case VHOST_NET_SET_BACKEND
:
1671 if (copy_from_user(&backend
, argp
, sizeof backend
))
1673 return vhost_net_set_backend(n
, backend
.index
, backend
.fd
);
1674 case VHOST_GET_FEATURES
:
1675 features
= VHOST_NET_FEATURES
;
1676 if (copy_to_user(featurep
, &features
, sizeof features
))
1679 case VHOST_SET_FEATURES
:
1680 if (copy_from_user(&features
, featurep
, sizeof features
))
1682 if (features
& ~VHOST_NET_FEATURES
)
1684 return vhost_net_set_features(n
, features
);
1685 case VHOST_GET_BACKEND_FEATURES
:
1686 features
= VHOST_NET_BACKEND_FEATURES
;
1687 if (copy_to_user(featurep
, &features
, sizeof(features
)))
1690 case VHOST_SET_BACKEND_FEATURES
:
1691 if (copy_from_user(&features
, featurep
, sizeof(features
)))
1693 if (features
& ~VHOST_NET_BACKEND_FEATURES
)
1695 return vhost_net_set_backend_features(n
, features
);
1696 case VHOST_RESET_OWNER
:
1697 return vhost_net_reset_owner(n
);
1698 case VHOST_SET_OWNER
:
1699 return vhost_net_set_owner(n
);
1701 mutex_lock(&n
->dev
.mutex
);
1702 r
= vhost_dev_ioctl(&n
->dev
, ioctl
, argp
);
1703 if (r
== -ENOIOCTLCMD
)
1704 r
= vhost_vring_ioctl(&n
->dev
, ioctl
, argp
);
1707 mutex_unlock(&n
->dev
.mutex
);
1712 #ifdef CONFIG_COMPAT
1713 static long vhost_net_compat_ioctl(struct file
*f
, unsigned int ioctl
,
1716 return vhost_net_ioctl(f
, ioctl
, (unsigned long)compat_ptr(arg
));
1720 static ssize_t
vhost_net_chr_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
1722 struct file
*file
= iocb
->ki_filp
;
1723 struct vhost_net
*n
= file
->private_data
;
1724 struct vhost_dev
*dev
= &n
->dev
;
1725 int noblock
= file
->f_flags
& O_NONBLOCK
;
1727 return vhost_chr_read_iter(dev
, to
, noblock
);
1730 static ssize_t
vhost_net_chr_write_iter(struct kiocb
*iocb
,
1731 struct iov_iter
*from
)
1733 struct file
*file
= iocb
->ki_filp
;
1734 struct vhost_net
*n
= file
->private_data
;
1735 struct vhost_dev
*dev
= &n
->dev
;
1737 return vhost_chr_write_iter(dev
, from
);
1740 static __poll_t
vhost_net_chr_poll(struct file
*file
, poll_table
*wait
)
1742 struct vhost_net
*n
= file
->private_data
;
1743 struct vhost_dev
*dev
= &n
->dev
;
1745 return vhost_chr_poll(file
, dev
, wait
);
1748 static const struct file_operations vhost_net_fops
= {
1749 .owner
= THIS_MODULE
,
1750 .release
= vhost_net_release
,
1751 .read_iter
= vhost_net_chr_read_iter
,
1752 .write_iter
= vhost_net_chr_write_iter
,
1753 .poll
= vhost_net_chr_poll
,
1754 .unlocked_ioctl
= vhost_net_ioctl
,
1755 #ifdef CONFIG_COMPAT
1756 .compat_ioctl
= vhost_net_compat_ioctl
,
1758 .open
= vhost_net_open
,
1759 .llseek
= noop_llseek
,
1762 static struct miscdevice vhost_net_misc
= {
1763 .minor
= VHOST_NET_MINOR
,
1764 .name
= "vhost-net",
1765 .fops
= &vhost_net_fops
,
1768 static int vhost_net_init(void)
1770 if (experimental_zcopytx
)
1771 vhost_net_enable_zcopy(VHOST_NET_VQ_TX
);
1772 return misc_register(&vhost_net_misc
);
1774 module_init(vhost_net_init
);
1776 static void vhost_net_exit(void)
1778 misc_deregister(&vhost_net_misc
);
1780 module_exit(vhost_net_exit
);
1782 MODULE_VERSION("0.0.1");
1783 MODULE_LICENSE("GPL v2");
1784 MODULE_AUTHOR("Michael S. Tsirkin");
1785 MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
1786 MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR
);
1787 MODULE_ALIAS("devname:vhost-net");