1 #include <linux/etherdevice.h>
2 #include <linux/if_macvlan.h>
3 #include <linux/if_vlan.h>
4 #include <linux/interrupt.h>
5 #include <linux/nsproxy.h>
6 #include <linux/compat.h>
7 #include <linux/if_tun.h>
8 #include <linux/module.h>
9 #include <linux/skbuff.h>
10 #include <linux/cache.h>
11 #include <linux/sched.h>
12 #include <linux/types.h>
13 #include <linux/slab.h>
14 #include <linux/wait.h>
15 #include <linux/cdev.h>
16 #include <linux/idr.h>
18 #include <linux/uio.h>
21 #include <net/net_namespace.h>
22 #include <net/rtnetlink.h>
24 #include <linux/virtio_net.h>
27 * A macvtap queue is the central object of this driver, it connects
28 * an open character device to a macvlan interface. There can be
29 * multiple queues on one interface, which map back to queues
30 * implemented in hardware on the underlying device.
32 * macvtap_proto is used to allocate queues through the sock allocation
36 struct macvtap_queue
{
41 struct macvlan_dev __rcu
*vlan
;
46 struct list_head next
;
49 #define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_VNET_LE | IFF_MULTI_QUEUE)
51 static inline u16
macvtap16_to_cpu(struct macvtap_queue
*q
, __virtio16 val
)
53 return __virtio16_to_cpu(q
->flags
& IFF_VNET_LE
, val
);
56 static inline __virtio16
cpu_to_macvtap16(struct macvtap_queue
*q
, u16 val
)
58 return __cpu_to_virtio16(q
->flags
& IFF_VNET_LE
, val
);
61 static struct proto macvtap_proto
= {
64 .obj_size
= sizeof (struct macvtap_queue
),
68 * Variables for dealing with macvtaps device numbers.
70 static dev_t macvtap_major
;
71 #define MACVTAP_NUM_DEVS (1U << MINORBITS)
72 static DEFINE_MUTEX(minor_lock
);
73 static DEFINE_IDR(minor_idr
);
75 #define GOODCOPY_LEN 128
76 static struct class *macvtap_class
;
77 static struct cdev macvtap_cdev
;
79 static const struct proto_ops macvtap_socket_ops
;
81 #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
83 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
84 #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
86 static struct macvlan_dev
*macvtap_get_vlan_rcu(const struct net_device
*dev
)
88 return rcu_dereference(dev
->rx_handler_data
);
93 * The macvtap_queue and the macvlan_dev are loosely coupled, the
94 * pointers from one to the other can only be read while rcu_read_lock
97 * Both the file and the macvlan_dev hold a reference on the macvtap_queue
98 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
99 * q->vlan becomes inaccessible. When the files gets closed,
100 * macvtap_get_queue() fails.
102 * There may still be references to the struct sock inside of the
103 * queue from outbound SKBs, but these never reference back to the
104 * file or the dev. The data structure is freed through __sk_free
105 * when both our references and any pending SKBs are gone.
108 static int macvtap_enable_queue(struct net_device
*dev
, struct file
*file
,
109 struct macvtap_queue
*q
)
111 struct macvlan_dev
*vlan
= netdev_priv(dev
);
120 rcu_assign_pointer(vlan
->taps
[vlan
->numvtaps
], q
);
121 q
->queue_index
= vlan
->numvtaps
;
130 static int macvtap_set_queue(struct net_device
*dev
, struct file
*file
,
131 struct macvtap_queue
*q
)
133 struct macvlan_dev
*vlan
= netdev_priv(dev
);
135 if (vlan
->numqueues
== MAX_MACVTAP_QUEUES
)
138 rcu_assign_pointer(q
->vlan
, vlan
);
139 rcu_assign_pointer(vlan
->taps
[vlan
->numvtaps
], q
);
143 q
->queue_index
= vlan
->numvtaps
;
145 file
->private_data
= q
;
146 list_add_tail(&q
->next
, &vlan
->queue_list
);
154 static int macvtap_disable_queue(struct macvtap_queue
*q
)
156 struct macvlan_dev
*vlan
;
157 struct macvtap_queue
*nq
;
163 vlan
= rtnl_dereference(q
->vlan
);
166 int index
= q
->queue_index
;
167 BUG_ON(index
>= vlan
->numvtaps
);
168 nq
= rtnl_dereference(vlan
->taps
[vlan
->numvtaps
- 1]);
169 nq
->queue_index
= index
;
171 rcu_assign_pointer(vlan
->taps
[index
], nq
);
172 RCU_INIT_POINTER(vlan
->taps
[vlan
->numvtaps
- 1], NULL
);
182 * The file owning the queue got closed, give up both
183 * the reference that the files holds as well as the
184 * one from the macvlan_dev if that still exists.
186 * Using the spinlock makes sure that we don't get
187 * to the queue again after destroying it.
189 static void macvtap_put_queue(struct macvtap_queue
*q
)
191 struct macvlan_dev
*vlan
;
194 vlan
= rtnl_dereference(q
->vlan
);
198 BUG_ON(macvtap_disable_queue(q
));
201 RCU_INIT_POINTER(q
->vlan
, NULL
);
203 list_del_init(&q
->next
);
213 * Select a queue based on the rxq of the device on which this packet
214 * arrived. If the incoming device is not mq, calculate a flow hash
215 * to select a queue. If all fails, find the first available queue.
216 * Cache vlan->numvtaps since it can become zero during the execution
219 static struct macvtap_queue
*macvtap_get_queue(struct net_device
*dev
,
222 struct macvlan_dev
*vlan
= netdev_priv(dev
);
223 struct macvtap_queue
*tap
= NULL
;
224 /* Access to taps array is protected by rcu, but access to numvtaps
225 * isn't. Below we use it to lookup a queue, but treat it as a hint
226 * and validate that the result isn't NULL - in case we are
227 * racing against queue removal.
229 int numvtaps
= ACCESS_ONCE(vlan
->numvtaps
);
235 /* Check if we can use flow to select a queue */
236 rxq
= skb_get_hash(skb
);
238 tap
= rcu_dereference(vlan
->taps
[rxq
% numvtaps
]);
242 if (likely(skb_rx_queue_recorded(skb
))) {
243 rxq
= skb_get_rx_queue(skb
);
245 while (unlikely(rxq
>= numvtaps
))
248 tap
= rcu_dereference(vlan
->taps
[rxq
]);
252 tap
= rcu_dereference(vlan
->taps
[0]);
258 * The net_device is going away, give up the reference
259 * that it holds on all queues and safely set the pointer
260 * from the queues to NULL.
262 static void macvtap_del_queues(struct net_device
*dev
)
264 struct macvlan_dev
*vlan
= netdev_priv(dev
);
265 struct macvtap_queue
*q
, *tmp
, *qlist
[MAX_MACVTAP_QUEUES
];
269 list_for_each_entry_safe(q
, tmp
, &vlan
->queue_list
, next
) {
270 list_del_init(&q
->next
);
272 RCU_INIT_POINTER(q
->vlan
, NULL
);
277 for (i
= 0; i
< vlan
->numvtaps
; i
++)
278 RCU_INIT_POINTER(vlan
->taps
[i
], NULL
);
279 BUG_ON(vlan
->numvtaps
);
280 BUG_ON(vlan
->numqueues
);
281 /* guarantee that any future macvtap_set_queue will fail */
282 vlan
->numvtaps
= MAX_MACVTAP_QUEUES
;
284 for (--j
; j
>= 0; j
--)
285 sock_put(&qlist
[j
]->sk
);
288 static rx_handler_result_t
macvtap_handle_frame(struct sk_buff
**pskb
)
290 struct sk_buff
*skb
= *pskb
;
291 struct net_device
*dev
= skb
->dev
;
292 struct macvlan_dev
*vlan
;
293 struct macvtap_queue
*q
;
294 netdev_features_t features
= TAP_FEATURES
;
296 vlan
= macvtap_get_vlan_rcu(dev
);
298 return RX_HANDLER_PASS
;
300 q
= macvtap_get_queue(dev
, skb
);
302 return RX_HANDLER_PASS
;
304 if (skb_queue_len(&q
->sk
.sk_receive_queue
) >= dev
->tx_queue_len
)
307 skb_push(skb
, ETH_HLEN
);
309 /* Apply the forward feature mask so that we perform segmentation
310 * according to users wishes. This only works if VNET_HDR is
313 if (q
->flags
& IFF_VNET_HDR
)
314 features
|= vlan
->tap_features
;
315 if (netif_needs_gso(dev
, skb
, features
)) {
316 struct sk_buff
*segs
= __skb_gso_segment(skb
, features
, false);
322 skb_queue_tail(&q
->sk
.sk_receive_queue
, skb
);
328 struct sk_buff
*nskb
= segs
->next
;
331 skb_queue_tail(&q
->sk
.sk_receive_queue
, segs
);
335 /* If we receive a partial checksum and the tap side
336 * doesn't support checksum offload, compute the checksum.
337 * Note: it doesn't matter which checksum feature to
338 * check, we either support them all or none.
340 if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
341 !(features
& NETIF_F_ALL_CSUM
) &&
342 skb_checksum_help(skb
))
344 skb_queue_tail(&q
->sk
.sk_receive_queue
, skb
);
348 wake_up_interruptible_poll(sk_sleep(&q
->sk
), POLLIN
| POLLRDNORM
| POLLRDBAND
);
349 return RX_HANDLER_CONSUMED
;
352 /* Count errors/drops only here, thus don't care about args. */
353 macvlan_count_rx(vlan
, 0, 0, 0);
355 return RX_HANDLER_CONSUMED
;
358 static int macvtap_get_minor(struct macvlan_dev
*vlan
)
360 int retval
= -ENOMEM
;
362 mutex_lock(&minor_lock
);
363 retval
= idr_alloc(&minor_idr
, vlan
, 1, MACVTAP_NUM_DEVS
, GFP_KERNEL
);
365 vlan
->minor
= retval
;
366 } else if (retval
== -ENOSPC
) {
367 printk(KERN_ERR
"too many macvtap devices\n");
370 mutex_unlock(&minor_lock
);
371 return retval
< 0 ? retval
: 0;
374 static void macvtap_free_minor(struct macvlan_dev
*vlan
)
376 mutex_lock(&minor_lock
);
378 idr_remove(&minor_idr
, vlan
->minor
);
381 mutex_unlock(&minor_lock
);
384 static struct net_device
*dev_get_by_macvtap_minor(int minor
)
386 struct net_device
*dev
= NULL
;
387 struct macvlan_dev
*vlan
;
389 mutex_lock(&minor_lock
);
390 vlan
= idr_find(&minor_idr
, minor
);
395 mutex_unlock(&minor_lock
);
399 static int macvtap_newlink(struct net
*src_net
,
400 struct net_device
*dev
,
402 struct nlattr
*data
[])
404 struct macvlan_dev
*vlan
= netdev_priv(dev
);
407 INIT_LIST_HEAD(&vlan
->queue_list
);
409 /* Since macvlan supports all offloads by default, make
410 * tap support all offloads also.
412 vlan
->tap_features
= TUN_OFFLOADS
;
414 err
= netdev_rx_handler_register(dev
, macvtap_handle_frame
, vlan
);
418 /* Don't put anything that may fail after macvlan_common_newlink
419 * because we can't undo what it does.
421 return macvlan_common_newlink(src_net
, dev
, tb
, data
);
424 static void macvtap_dellink(struct net_device
*dev
,
425 struct list_head
*head
)
427 netdev_rx_handler_unregister(dev
);
428 macvtap_del_queues(dev
);
429 macvlan_dellink(dev
, head
);
432 static void macvtap_setup(struct net_device
*dev
)
434 macvlan_common_setup(dev
);
435 dev
->tx_queue_len
= TUN_READQ_SIZE
;
438 static struct rtnl_link_ops macvtap_link_ops __read_mostly
= {
440 .setup
= macvtap_setup
,
441 .newlink
= macvtap_newlink
,
442 .dellink
= macvtap_dellink
,
446 static void macvtap_sock_write_space(struct sock
*sk
)
448 wait_queue_head_t
*wqueue
;
450 if (!sock_writeable(sk
) ||
451 !test_and_clear_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
))
454 wqueue
= sk_sleep(sk
);
455 if (wqueue
&& waitqueue_active(wqueue
))
456 wake_up_interruptible_poll(wqueue
, POLLOUT
| POLLWRNORM
| POLLWRBAND
);
459 static void macvtap_sock_destruct(struct sock
*sk
)
461 skb_queue_purge(&sk
->sk_receive_queue
);
464 static int macvtap_open(struct inode
*inode
, struct file
*file
)
466 struct net
*net
= current
->nsproxy
->net_ns
;
467 struct net_device
*dev
;
468 struct macvtap_queue
*q
;
472 dev
= dev_get_by_macvtap_minor(iminor(inode
));
477 q
= (struct macvtap_queue
*)sk_alloc(net
, AF_UNSPEC
, GFP_KERNEL
,
482 RCU_INIT_POINTER(q
->sock
.wq
, &q
->wq
);
483 init_waitqueue_head(&q
->wq
.wait
);
484 q
->sock
.type
= SOCK_RAW
;
485 q
->sock
.state
= SS_CONNECTED
;
487 q
->sock
.ops
= &macvtap_socket_ops
;
488 sock_init_data(&q
->sock
, &q
->sk
);
489 q
->sk
.sk_write_space
= macvtap_sock_write_space
;
490 q
->sk
.sk_destruct
= macvtap_sock_destruct
;
491 q
->flags
= IFF_VNET_HDR
| IFF_NO_PI
| IFF_TAP
;
492 q
->vnet_hdr_sz
= sizeof(struct virtio_net_hdr
);
495 * so far only KVM virtio_net uses macvtap, enable zero copy between
496 * guest kernel and host kernel when lower device supports zerocopy
498 * The macvlan supports zerocopy iff the lower device supports zero
499 * copy so we don't have to look at the lower device directly.
501 if ((dev
->features
& NETIF_F_HIGHDMA
) && (dev
->features
& NETIF_F_SG
))
502 sock_set_flag(&q
->sk
, SOCK_ZEROCOPY
);
504 err
= macvtap_set_queue(dev
, file
, q
);
516 static int macvtap_release(struct inode
*inode
, struct file
*file
)
518 struct macvtap_queue
*q
= file
->private_data
;
519 macvtap_put_queue(q
);
523 static unsigned int macvtap_poll(struct file
*file
, poll_table
* wait
)
525 struct macvtap_queue
*q
= file
->private_data
;
526 unsigned int mask
= POLLERR
;
532 poll_wait(file
, &q
->wq
.wait
, wait
);
534 if (!skb_queue_empty(&q
->sk
.sk_receive_queue
))
535 mask
|= POLLIN
| POLLRDNORM
;
537 if (sock_writeable(&q
->sk
) ||
538 (!test_and_set_bit(SOCK_ASYNC_NOSPACE
, &q
->sock
.flags
) &&
539 sock_writeable(&q
->sk
)))
540 mask
|= POLLOUT
| POLLWRNORM
;
546 static inline struct sk_buff
*macvtap_alloc_skb(struct sock
*sk
, size_t prepad
,
547 size_t len
, size_t linear
,
548 int noblock
, int *err
)
552 /* Under a page? Don't bother with paged skb. */
553 if (prepad
+ len
< PAGE_SIZE
|| !linear
)
556 skb
= sock_alloc_send_pskb(sk
, prepad
+ linear
, len
- linear
, noblock
,
561 skb_reserve(skb
, prepad
);
562 skb_put(skb
, linear
);
563 skb
->data_len
= len
- linear
;
564 skb
->len
+= len
- linear
;
570 * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
571 * be shared with the tun/tap driver.
573 static int macvtap_skb_from_vnet_hdr(struct macvtap_queue
*q
,
575 struct virtio_net_hdr
*vnet_hdr
)
577 unsigned short gso_type
= 0;
578 if (vnet_hdr
->gso_type
!= VIRTIO_NET_HDR_GSO_NONE
) {
579 switch (vnet_hdr
->gso_type
& ~VIRTIO_NET_HDR_GSO_ECN
) {
580 case VIRTIO_NET_HDR_GSO_TCPV4
:
581 gso_type
= SKB_GSO_TCPV4
;
583 case VIRTIO_NET_HDR_GSO_TCPV6
:
584 gso_type
= SKB_GSO_TCPV6
;
586 case VIRTIO_NET_HDR_GSO_UDP
:
587 pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
589 gso_type
= SKB_GSO_UDP
;
590 if (skb
->protocol
== htons(ETH_P_IPV6
))
591 ipv6_proxy_select_ident(skb
);
597 if (vnet_hdr
->gso_type
& VIRTIO_NET_HDR_GSO_ECN
)
598 gso_type
|= SKB_GSO_TCP_ECN
;
600 if (vnet_hdr
->gso_size
== 0)
604 if (vnet_hdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) {
605 if (!skb_partial_csum_set(skb
, macvtap16_to_cpu(q
, vnet_hdr
->csum_start
),
606 macvtap16_to_cpu(q
, vnet_hdr
->csum_offset
)))
610 if (vnet_hdr
->gso_type
!= VIRTIO_NET_HDR_GSO_NONE
) {
611 skb_shinfo(skb
)->gso_size
= macvtap16_to_cpu(q
, vnet_hdr
->gso_size
);
612 skb_shinfo(skb
)->gso_type
= gso_type
;
614 /* Header must be checked, and gso_segs computed. */
615 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
616 skb_shinfo(skb
)->gso_segs
= 0;
621 static void macvtap_skb_to_vnet_hdr(struct macvtap_queue
*q
,
622 const struct sk_buff
*skb
,
623 struct virtio_net_hdr
*vnet_hdr
)
625 memset(vnet_hdr
, 0, sizeof(*vnet_hdr
));
627 if (skb_is_gso(skb
)) {
628 struct skb_shared_info
*sinfo
= skb_shinfo(skb
);
630 /* This is a hint as to how much should be linear. */
631 vnet_hdr
->hdr_len
= cpu_to_macvtap16(q
, skb_headlen(skb
));
632 vnet_hdr
->gso_size
= cpu_to_macvtap16(q
, sinfo
->gso_size
);
633 if (sinfo
->gso_type
& SKB_GSO_TCPV4
)
634 vnet_hdr
->gso_type
= VIRTIO_NET_HDR_GSO_TCPV4
;
635 else if (sinfo
->gso_type
& SKB_GSO_TCPV6
)
636 vnet_hdr
->gso_type
= VIRTIO_NET_HDR_GSO_TCPV6
;
639 if (sinfo
->gso_type
& SKB_GSO_TCP_ECN
)
640 vnet_hdr
->gso_type
|= VIRTIO_NET_HDR_GSO_ECN
;
642 vnet_hdr
->gso_type
= VIRTIO_NET_HDR_GSO_NONE
;
644 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
645 vnet_hdr
->flags
= VIRTIO_NET_HDR_F_NEEDS_CSUM
;
646 if (vlan_tx_tag_present(skb
))
647 vnet_hdr
->csum_start
= cpu_to_macvtap16(q
,
648 skb_checksum_start_offset(skb
) + VLAN_HLEN
);
650 vnet_hdr
->csum_start
= cpu_to_macvtap16(q
,
651 skb_checksum_start_offset(skb
));
652 vnet_hdr
->csum_offset
= cpu_to_macvtap16(q
, skb
->csum_offset
);
653 } else if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
) {
654 vnet_hdr
->flags
= VIRTIO_NET_HDR_F_DATA_VALID
;
655 } /* else everything is zero */
658 /* Get packet from user space buffer */
659 static ssize_t
macvtap_get_user(struct macvtap_queue
*q
, struct msghdr
*m
,
660 struct iov_iter
*from
, int noblock
)
662 int good_linear
= SKB_MAX_HEAD(NET_IP_ALIGN
);
664 struct macvlan_dev
*vlan
;
665 unsigned long total_len
= iov_iter_count(from
);
666 unsigned long len
= total_len
;
668 struct virtio_net_hdr vnet_hdr
= { 0 };
669 int vnet_hdr_len
= 0;
671 bool zerocopy
= false;
675 if (q
->flags
& IFF_VNET_HDR
) {
676 vnet_hdr_len
= q
->vnet_hdr_sz
;
679 if (len
< vnet_hdr_len
)
684 n
= copy_from_iter(&vnet_hdr
, sizeof(vnet_hdr
), from
);
685 if (n
!= sizeof(vnet_hdr
))
687 iov_iter_advance(from
, vnet_hdr_len
- sizeof(vnet_hdr
));
688 if ((vnet_hdr
.flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) &&
689 macvtap16_to_cpu(q
, vnet_hdr
.csum_start
) +
690 macvtap16_to_cpu(q
, vnet_hdr
.csum_offset
) + 2 >
691 macvtap16_to_cpu(q
, vnet_hdr
.hdr_len
))
692 vnet_hdr
.hdr_len
= cpu_to_macvtap16(q
,
693 macvtap16_to_cpu(q
, vnet_hdr
.csum_start
) +
694 macvtap16_to_cpu(q
, vnet_hdr
.csum_offset
) + 2);
696 if (macvtap16_to_cpu(q
, vnet_hdr
.hdr_len
) > len
)
701 if (unlikely(len
< ETH_HLEN
))
704 if (m
&& m
->msg_control
&& sock_flag(&q
->sk
, SOCK_ZEROCOPY
)) {
707 copylen
= vnet_hdr
.hdr_len
?
708 macvtap16_to_cpu(q
, vnet_hdr
.hdr_len
) : GOODCOPY_LEN
;
709 if (copylen
> good_linear
)
710 copylen
= good_linear
;
713 iov_iter_advance(&i
, copylen
);
714 if (iov_iter_npages(&i
, INT_MAX
) <= MAX_SKB_FRAGS
)
720 if (macvtap16_to_cpu(q
, vnet_hdr
.hdr_len
) > good_linear
)
721 linear
= good_linear
;
723 linear
= macvtap16_to_cpu(q
, vnet_hdr
.hdr_len
);
726 skb
= macvtap_alloc_skb(&q
->sk
, NET_IP_ALIGN
, copylen
,
727 linear
, noblock
, &err
);
732 err
= zerocopy_sg_from_iter(skb
, from
);
734 err
= skb_copy_datagram_from_iter(skb
, 0, from
, len
);
735 if (!err
&& m
&& m
->msg_control
) {
736 struct ubuf_info
*uarg
= m
->msg_control
;
737 uarg
->callback(uarg
, false);
744 skb_set_network_header(skb
, ETH_HLEN
);
745 skb_reset_mac_header(skb
);
746 skb
->protocol
= eth_hdr(skb
)->h_proto
;
749 err
= macvtap_skb_from_vnet_hdr(q
, skb
, &vnet_hdr
);
754 skb_probe_transport_header(skb
, ETH_HLEN
);
757 vlan
= rcu_dereference(q
->vlan
);
758 /* copy skb_ubuf_info for callback when skb has no error */
760 skb_shinfo(skb
)->destructor_arg
= m
->msg_control
;
761 skb_shinfo(skb
)->tx_flags
|= SKBTX_DEV_ZEROCOPY
;
762 skb_shinfo(skb
)->tx_flags
|= SKBTX_SHARED_FRAG
;
765 skb
->dev
= vlan
->dev
;
779 vlan
= rcu_dereference(q
->vlan
);
781 this_cpu_inc(vlan
->pcpu_stats
->tx_dropped
);
787 static ssize_t
macvtap_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
789 struct file
*file
= iocb
->ki_filp
;
790 struct macvtap_queue
*q
= file
->private_data
;
792 return macvtap_get_user(q
, NULL
, from
, file
->f_flags
& O_NONBLOCK
);
795 /* Put packet to the user space buffer */
796 static ssize_t
macvtap_put_user(struct macvtap_queue
*q
,
797 const struct sk_buff
*skb
,
798 struct iov_iter
*iter
)
801 int vnet_hdr_len
= 0;
805 if (q
->flags
& IFF_VNET_HDR
) {
806 struct virtio_net_hdr vnet_hdr
;
807 vnet_hdr_len
= q
->vnet_hdr_sz
;
808 if (iov_iter_count(iter
) < vnet_hdr_len
)
811 macvtap_skb_to_vnet_hdr(q
, skb
, &vnet_hdr
);
813 if (copy_to_iter(&vnet_hdr
, sizeof(vnet_hdr
), iter
) !=
817 iov_iter_advance(iter
, vnet_hdr_len
- sizeof(vnet_hdr
));
819 total
= vnet_hdr_len
;
822 if (vlan_tx_tag_present(skb
)) {
827 veth
.h_vlan_proto
= skb
->vlan_proto
;
828 veth
.h_vlan_TCI
= htons(vlan_tx_tag_get(skb
));
830 vlan_offset
= offsetof(struct vlan_ethhdr
, h_vlan_proto
);
833 ret
= skb_copy_datagram_iter(skb
, 0, iter
, vlan_offset
);
834 if (ret
|| !iov_iter_count(iter
))
837 ret
= copy_to_iter(&veth
, sizeof(veth
), iter
);
838 if (ret
!= sizeof(veth
) || !iov_iter_count(iter
))
842 ret
= skb_copy_datagram_iter(skb
, vlan_offset
, iter
,
843 skb
->len
- vlan_offset
);
846 return ret
? ret
: total
;
849 static ssize_t
macvtap_do_read(struct macvtap_queue
*q
,
857 if (!iov_iter_count(to
))
862 prepare_to_wait(sk_sleep(&q
->sk
), &wait
,
865 /* Read frames from the queue */
866 skb
= skb_dequeue(&q
->sk
.sk_receive_queue
);
873 if (signal_pending(current
)) {
877 /* Nothing to read, let's sleep */
881 ret
= macvtap_put_user(q
, skb
, to
);
882 if (unlikely(ret
< 0))
888 finish_wait(sk_sleep(&q
->sk
), &wait
);
892 static ssize_t
macvtap_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
894 struct file
*file
= iocb
->ki_filp
;
895 struct macvtap_queue
*q
= file
->private_data
;
896 ssize_t len
= iov_iter_count(to
), ret
;
898 ret
= macvtap_do_read(q
, to
, file
->f_flags
& O_NONBLOCK
);
899 ret
= min_t(ssize_t
, ret
, len
);
905 static struct macvlan_dev
*macvtap_get_vlan(struct macvtap_queue
*q
)
907 struct macvlan_dev
*vlan
;
910 vlan
= rtnl_dereference(q
->vlan
);
917 static void macvtap_put_vlan(struct macvlan_dev
*vlan
)
922 static int macvtap_ioctl_set_queue(struct file
*file
, unsigned int flags
)
924 struct macvtap_queue
*q
= file
->private_data
;
925 struct macvlan_dev
*vlan
;
928 vlan
= macvtap_get_vlan(q
);
932 if (flags
& IFF_ATTACH_QUEUE
)
933 ret
= macvtap_enable_queue(vlan
->dev
, file
, q
);
934 else if (flags
& IFF_DETACH_QUEUE
)
935 ret
= macvtap_disable_queue(q
);
939 macvtap_put_vlan(vlan
);
943 static int set_offload(struct macvtap_queue
*q
, unsigned long arg
)
945 struct macvlan_dev
*vlan
;
946 netdev_features_t features
;
947 netdev_features_t feature_mask
= 0;
949 vlan
= rtnl_dereference(q
->vlan
);
953 features
= vlan
->dev
->features
;
955 if (arg
& TUN_F_CSUM
) {
956 feature_mask
= NETIF_F_HW_CSUM
;
958 if (arg
& (TUN_F_TSO4
| TUN_F_TSO6
)) {
959 if (arg
& TUN_F_TSO_ECN
)
960 feature_mask
|= NETIF_F_TSO_ECN
;
961 if (arg
& TUN_F_TSO4
)
962 feature_mask
|= NETIF_F_TSO
;
963 if (arg
& TUN_F_TSO6
)
964 feature_mask
|= NETIF_F_TSO6
;
968 /* tun/tap driver inverts the usage for TSO offloads, where
969 * setting the TSO bit means that the userspace wants to
970 * accept TSO frames and turning it off means that user space
971 * does not support TSO.
972 * For macvtap, we have to invert it to mean the same thing.
973 * When user space turns off TSO, we turn off GSO/LRO so that
974 * user-space will not receive TSO frames.
976 if (feature_mask
& (NETIF_F_TSO
| NETIF_F_TSO6
))
977 features
|= RX_OFFLOADS
;
979 features
&= ~RX_OFFLOADS
;
981 /* tap_features are the same as features on tun/tap and
982 * reflect user expectations.
984 vlan
->tap_features
= feature_mask
;
985 vlan
->set_features
= features
;
986 netdev_update_features(vlan
->dev
);
992 * provide compatibility with generic tun/tap interface
994 static long macvtap_ioctl(struct file
*file
, unsigned int cmd
,
997 struct macvtap_queue
*q
= file
->private_data
;
998 struct macvlan_dev
*vlan
;
999 void __user
*argp
= (void __user
*)arg
;
1000 struct ifreq __user
*ifr
= argp
;
1001 unsigned int __user
*up
= argp
;
1003 int __user
*sp
= argp
;
1009 /* ignore the name, just look at flags */
1010 if (get_user(u
, &ifr
->ifr_flags
))
1014 if ((u
& ~MACVTAP_FEATURES
) != (IFF_NO_PI
| IFF_TAP
))
1023 vlan
= macvtap_get_vlan(q
);
1030 if (copy_to_user(&ifr
->ifr_name
, vlan
->dev
->name
, IFNAMSIZ
) ||
1031 put_user(q
->flags
, &ifr
->ifr_flags
))
1033 macvtap_put_vlan(vlan
);
1038 if (get_user(u
, &ifr
->ifr_flags
))
1041 ret
= macvtap_ioctl_set_queue(file
, u
);
1045 case TUNGETFEATURES
:
1046 if (put_user(IFF_TAP
| IFF_NO_PI
| MACVTAP_FEATURES
, up
))
1051 if (get_user(u
, up
))
1054 q
->sk
.sk_sndbuf
= u
;
1057 case TUNGETVNETHDRSZ
:
1059 if (put_user(s
, sp
))
1063 case TUNSETVNETHDRSZ
:
1064 if (get_user(s
, sp
))
1066 if (s
< (int)sizeof(struct virtio_net_hdr
))
1073 /* let the user check for future flags */
1074 if (arg
& ~(TUN_F_CSUM
| TUN_F_TSO4
| TUN_F_TSO6
|
1079 ret
= set_offload(q
, arg
);
1088 #ifdef CONFIG_COMPAT
1089 static long macvtap_compat_ioctl(struct file
*file
, unsigned int cmd
,
1092 return macvtap_ioctl(file
, cmd
, (unsigned long)compat_ptr(arg
));
1096 static const struct file_operations macvtap_fops
= {
1097 .owner
= THIS_MODULE
,
1098 .open
= macvtap_open
,
1099 .release
= macvtap_release
,
1100 .read
= new_sync_read
,
1101 .write
= new_sync_write
,
1102 .read_iter
= macvtap_read_iter
,
1103 .write_iter
= macvtap_write_iter
,
1104 .poll
= macvtap_poll
,
1105 .llseek
= no_llseek
,
1106 .unlocked_ioctl
= macvtap_ioctl
,
1107 #ifdef CONFIG_COMPAT
1108 .compat_ioctl
= macvtap_compat_ioctl
,
1112 static int macvtap_sendmsg(struct kiocb
*iocb
, struct socket
*sock
,
1113 struct msghdr
*m
, size_t total_len
)
1115 struct macvtap_queue
*q
= container_of(sock
, struct macvtap_queue
, sock
);
1116 return macvtap_get_user(q
, m
, &m
->msg_iter
, m
->msg_flags
& MSG_DONTWAIT
);
1119 static int macvtap_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1120 struct msghdr
*m
, size_t total_len
,
1123 struct macvtap_queue
*q
= container_of(sock
, struct macvtap_queue
, sock
);
1125 if (flags
& ~(MSG_DONTWAIT
|MSG_TRUNC
))
1127 ret
= macvtap_do_read(q
, &m
->msg_iter
, flags
& MSG_DONTWAIT
);
1128 if (ret
> total_len
) {
1129 m
->msg_flags
|= MSG_TRUNC
;
1130 ret
= flags
& MSG_TRUNC
? ret
: total_len
;
1135 /* Ops structure to mimic raw sockets with tun */
1136 static const struct proto_ops macvtap_socket_ops
= {
1137 .sendmsg
= macvtap_sendmsg
,
1138 .recvmsg
= macvtap_recvmsg
,
1141 /* Get an underlying socket object from tun file. Returns error unless file is
1142 * attached to a device. The returned object works like a packet socket, it
1143 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
1144 * holding a reference to the file for as long as the socket is in use. */
1145 struct socket
*macvtap_get_socket(struct file
*file
)
1147 struct macvtap_queue
*q
;
1148 if (file
->f_op
!= &macvtap_fops
)
1149 return ERR_PTR(-EINVAL
);
1150 q
= file
->private_data
;
1152 return ERR_PTR(-EBADFD
);
1155 EXPORT_SYMBOL_GPL(macvtap_get_socket
);
1157 static int macvtap_device_event(struct notifier_block
*unused
,
1158 unsigned long event
, void *ptr
)
1160 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1161 struct macvlan_dev
*vlan
;
1162 struct device
*classdev
;
1166 if (dev
->rtnl_link_ops
!= &macvtap_link_ops
)
1169 vlan
= netdev_priv(dev
);
1172 case NETDEV_REGISTER
:
1173 /* Create the device node here after the network device has
1174 * been registered but before register_netdevice has
1177 err
= macvtap_get_minor(vlan
);
1179 return notifier_from_errno(err
);
1181 devt
= MKDEV(MAJOR(macvtap_major
), vlan
->minor
);
1182 classdev
= device_create(macvtap_class
, &dev
->dev
, devt
,
1183 dev
, "tap%d", dev
->ifindex
);
1184 if (IS_ERR(classdev
)) {
1185 macvtap_free_minor(vlan
);
1186 return notifier_from_errno(PTR_ERR(classdev
));
1189 case NETDEV_UNREGISTER
:
1190 devt
= MKDEV(MAJOR(macvtap_major
), vlan
->minor
);
1191 device_destroy(macvtap_class
, devt
);
1192 macvtap_free_minor(vlan
);
1199 static struct notifier_block macvtap_notifier_block __read_mostly
= {
1200 .notifier_call
= macvtap_device_event
,
1203 static int macvtap_init(void)
1207 err
= alloc_chrdev_region(&macvtap_major
, 0,
1208 MACVTAP_NUM_DEVS
, "macvtap");
1212 cdev_init(&macvtap_cdev
, &macvtap_fops
);
1213 err
= cdev_add(&macvtap_cdev
, macvtap_major
, MACVTAP_NUM_DEVS
);
1217 macvtap_class
= class_create(THIS_MODULE
, "macvtap");
1218 if (IS_ERR(macvtap_class
)) {
1219 err
= PTR_ERR(macvtap_class
);
1223 err
= register_netdevice_notifier(&macvtap_notifier_block
);
1227 err
= macvlan_link_register(&macvtap_link_ops
);
1234 unregister_netdevice_notifier(&macvtap_notifier_block
);
1236 class_unregister(macvtap_class
);
1238 cdev_del(&macvtap_cdev
);
1240 unregister_chrdev_region(macvtap_major
, MACVTAP_NUM_DEVS
);
1244 module_init(macvtap_init
);
1246 static void macvtap_exit(void)
1248 rtnl_link_unregister(&macvtap_link_ops
);
1249 unregister_netdevice_notifier(&macvtap_notifier_block
);
1250 class_unregister(macvtap_class
);
1251 cdev_del(&macvtap_cdev
);
1252 unregister_chrdev_region(macvtap_major
, MACVTAP_NUM_DEVS
);
1254 module_exit(macvtap_exit
);
1256 MODULE_ALIAS_RTNL_LINK("macvtap");
1257 MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
1258 MODULE_LICENSE("GPL");