4 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
6 * Author: Pavel Emelianov <xemul@openvz.org>
7 * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
11 #include <linux/netdevice.h>
12 #include <linux/slab.h>
13 #include <linux/ethtool.h>
14 #include <linux/etherdevice.h>
15 #include <linux/u64_stats_sync.h>
17 #include <net/rtnetlink.h>
21 #include <linux/veth.h>
22 #include <linux/module.h>
23 #include <linux/bpf.h>
24 #include <linux/filter.h>
25 #include <linux/ptr_ring.h>
26 #include <linux/bpf_trace.h>
28 #define DRV_NAME "veth"
29 #define DRV_VERSION "1.0"
31 #define VETH_XDP_FLAG BIT(0)
32 #define VETH_RING_SIZE 256
33 #define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
35 /* Separating two types of XDP xmit */
36 #define VETH_XDP_TX BIT(0)
37 #define VETH_XDP_REDIR BIT(1)
42 struct u64_stats_sync syncp
;
46 struct napi_struct xdp_napi
;
47 struct net_device
*dev
;
48 struct bpf_prog __rcu
*xdp_prog
;
49 struct xdp_mem_info xdp_mem
;
50 bool rx_notify_masked
;
51 struct ptr_ring xdp_ring
;
52 struct xdp_rxq_info xdp_rxq
;
56 struct net_device __rcu
*peer
;
58 struct bpf_prog
*_xdp_prog
;
60 unsigned int requested_headroom
;
68 const char string
[ETH_GSTRING_LEN
];
69 } ethtool_stats_keys
[] = {
73 static int veth_get_link_ksettings(struct net_device
*dev
,
74 struct ethtool_link_ksettings
*cmd
)
76 cmd
->base
.speed
= SPEED_10000
;
77 cmd
->base
.duplex
= DUPLEX_FULL
;
78 cmd
->base
.port
= PORT_TP
;
79 cmd
->base
.autoneg
= AUTONEG_DISABLE
;
83 static void veth_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
85 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
86 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
89 static void veth_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
93 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
98 static int veth_get_sset_count(struct net_device
*dev
, int sset
)
102 return ARRAY_SIZE(ethtool_stats_keys
);
108 static void veth_get_ethtool_stats(struct net_device
*dev
,
109 struct ethtool_stats
*stats
, u64
*data
)
111 struct veth_priv
*priv
= netdev_priv(dev
);
112 struct net_device
*peer
= rtnl_dereference(priv
->peer
);
114 data
[0] = peer
? peer
->ifindex
: 0;
117 static const struct ethtool_ops veth_ethtool_ops
= {
118 .get_drvinfo
= veth_get_drvinfo
,
119 .get_link
= ethtool_op_get_link
,
120 .get_strings
= veth_get_strings
,
121 .get_sset_count
= veth_get_sset_count
,
122 .get_ethtool_stats
= veth_get_ethtool_stats
,
123 .get_link_ksettings
= veth_get_link_ksettings
,
126 /* general routines */
128 static bool veth_is_xdp_frame(void *ptr
)
130 return (unsigned long)ptr
& VETH_XDP_FLAG
;
133 static void *veth_ptr_to_xdp(void *ptr
)
135 return (void *)((unsigned long)ptr
& ~VETH_XDP_FLAG
);
138 static void *veth_xdp_to_ptr(void *ptr
)
140 return (void *)((unsigned long)ptr
| VETH_XDP_FLAG
);
143 static void veth_ptr_free(void *ptr
)
145 if (veth_is_xdp_frame(ptr
))
146 xdp_return_frame(veth_ptr_to_xdp(ptr
));
151 static void __veth_xdp_flush(struct veth_rq
*rq
)
153 /* Write ptr_ring before reading rx_notify_masked */
155 if (!rq
->rx_notify_masked
) {
156 rq
->rx_notify_masked
= true;
157 napi_schedule(&rq
->xdp_napi
);
161 static int veth_xdp_rx(struct veth_rq
*rq
, struct sk_buff
*skb
)
163 if (unlikely(ptr_ring_produce(&rq
->xdp_ring
, skb
))) {
164 dev_kfree_skb_any(skb
);
168 return NET_RX_SUCCESS
;
171 static int veth_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
,
172 struct veth_rq
*rq
, bool xdp
)
174 return __dev_forward_skb(dev
, skb
) ?: xdp
?
175 veth_xdp_rx(rq
, skb
) :
179 static netdev_tx_t
veth_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
181 struct veth_priv
*rcv_priv
, *priv
= netdev_priv(dev
);
182 struct veth_rq
*rq
= NULL
;
183 struct net_device
*rcv
;
184 int length
= skb
->len
;
185 bool rcv_xdp
= false;
189 rcv
= rcu_dereference(priv
->peer
);
190 if (unlikely(!rcv
)) {
195 rcv_priv
= netdev_priv(rcv
);
196 rxq
= skb_get_queue_mapping(skb
);
197 if (rxq
< rcv
->real_num_rx_queues
) {
198 rq
= &rcv_priv
->rq
[rxq
];
199 rcv_xdp
= rcu_access_pointer(rq
->xdp_prog
);
201 skb_record_rx_queue(skb
, rxq
);
204 if (likely(veth_forward_skb(rcv
, skb
, rq
, rcv_xdp
) == NET_RX_SUCCESS
)) {
205 struct pcpu_vstats
*stats
= this_cpu_ptr(dev
->vstats
);
207 u64_stats_update_begin(&stats
->syncp
);
208 stats
->bytes
+= length
;
210 u64_stats_update_end(&stats
->syncp
);
213 atomic64_inc(&priv
->dropped
);
217 __veth_xdp_flush(rq
);
224 static u64
veth_stats_one(struct pcpu_vstats
*result
, struct net_device
*dev
)
226 struct veth_priv
*priv
= netdev_priv(dev
);
231 for_each_possible_cpu(cpu
) {
232 struct pcpu_vstats
*stats
= per_cpu_ptr(dev
->vstats
, cpu
);
237 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
238 packets
= stats
->packets
;
239 bytes
= stats
->bytes
;
240 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
241 result
->packets
+= packets
;
242 result
->bytes
+= bytes
;
244 return atomic64_read(&priv
->dropped
);
247 static void veth_get_stats64(struct net_device
*dev
,
248 struct rtnl_link_stats64
*tot
)
250 struct veth_priv
*priv
= netdev_priv(dev
);
251 struct net_device
*peer
;
252 struct pcpu_vstats one
;
254 tot
->tx_dropped
= veth_stats_one(&one
, dev
);
255 tot
->tx_bytes
= one
.bytes
;
256 tot
->tx_packets
= one
.packets
;
259 peer
= rcu_dereference(priv
->peer
);
261 tot
->rx_dropped
= veth_stats_one(&one
, peer
);
262 tot
->rx_bytes
= one
.bytes
;
263 tot
->rx_packets
= one
.packets
;
268 /* fake multicast ability */
269 static void veth_set_multicast_list(struct net_device
*dev
)
273 static struct sk_buff
*veth_build_skb(void *head
, int headroom
, int len
,
279 buflen
= SKB_DATA_ALIGN(headroom
+ len
) +
280 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
282 skb
= build_skb(head
, buflen
);
286 skb_reserve(skb
, headroom
);
292 static int veth_select_rxq(struct net_device
*dev
)
294 return smp_processor_id() % dev
->real_num_rx_queues
;
297 static int veth_xdp_xmit(struct net_device
*dev
, int n
,
298 struct xdp_frame
**frames
, u32 flags
)
300 struct veth_priv
*rcv_priv
, *priv
= netdev_priv(dev
);
301 struct net_device
*rcv
;
302 unsigned int max_len
;
306 if (unlikely(flags
& ~XDP_XMIT_FLAGS_MASK
))
309 rcv
= rcu_dereference(priv
->peer
);
313 rcv_priv
= netdev_priv(rcv
);
314 rq
= &rcv_priv
->rq
[veth_select_rxq(rcv
)];
315 /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive
316 * side. This means an XDP program is loaded on the peer and the peer
319 if (!rcu_access_pointer(rq
->xdp_prog
))
322 max_len
= rcv
->mtu
+ rcv
->hard_header_len
+ VLAN_HLEN
;
324 spin_lock(&rq
->xdp_ring
.producer_lock
);
325 for (i
= 0; i
< n
; i
++) {
326 struct xdp_frame
*frame
= frames
[i
];
327 void *ptr
= veth_xdp_to_ptr(frame
);
329 if (unlikely(frame
->len
> max_len
||
330 __ptr_ring_produce(&rq
->xdp_ring
, ptr
))) {
331 xdp_return_frame_rx_napi(frame
);
335 spin_unlock(&rq
->xdp_ring
.producer_lock
);
337 if (flags
& XDP_XMIT_FLUSH
)
338 __veth_xdp_flush(rq
);
343 static void veth_xdp_flush(struct net_device
*dev
)
345 struct veth_priv
*rcv_priv
, *priv
= netdev_priv(dev
);
346 struct net_device
*rcv
;
350 rcv
= rcu_dereference(priv
->peer
);
354 rcv_priv
= netdev_priv(rcv
);
355 rq
= &rcv_priv
->rq
[veth_select_rxq(rcv
)];
356 /* xdp_ring is initialized on receive side? */
357 if (unlikely(!rcu_access_pointer(rq
->xdp_prog
)))
360 __veth_xdp_flush(rq
);
365 static int veth_xdp_tx(struct net_device
*dev
, struct xdp_buff
*xdp
)
367 struct xdp_frame
*frame
= convert_to_xdp_frame(xdp
);
369 if (unlikely(!frame
))
372 return veth_xdp_xmit(dev
, 1, &frame
, 0);
375 static struct sk_buff
*veth_xdp_rcv_one(struct veth_rq
*rq
,
376 struct xdp_frame
*frame
,
377 unsigned int *xdp_xmit
)
379 void *hard_start
= frame
->data
- frame
->headroom
;
380 int len
= frame
->len
, delta
= 0;
381 struct xdp_frame orig_frame
;
382 struct bpf_prog
*xdp_prog
;
383 unsigned int headroom
;
386 /* bpf_xdp_adjust_head() assures BPF cannot access xdp_frame area */
387 hard_start
-= sizeof(struct xdp_frame
);
390 xdp_prog
= rcu_dereference(rq
->xdp_prog
);
391 if (likely(xdp_prog
)) {
395 xdp
.data_hard_start
= hard_start
;
396 xdp
.data
= frame
->data
;
397 xdp
.data_end
= frame
->data
+ frame
->len
;
398 xdp
.data_meta
= frame
->data
- frame
->metasize
;
399 xdp
.rxq
= &rq
->xdp_rxq
;
401 act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
405 delta
= frame
->data
- xdp
.data
;
406 len
= xdp
.data_end
- xdp
.data
;
410 xdp
.rxq
->mem
= frame
->mem
;
411 if (unlikely(veth_xdp_tx(rq
->dev
, &xdp
) < 0)) {
412 trace_xdp_exception(rq
->dev
, xdp_prog
, act
);
416 *xdp_xmit
|= VETH_XDP_TX
;
421 xdp
.rxq
->mem
= frame
->mem
;
422 if (xdp_do_redirect(rq
->dev
, &xdp
, xdp_prog
)) {
426 *xdp_xmit
|= VETH_XDP_REDIR
;
430 bpf_warn_invalid_xdp_action(act
);
432 trace_xdp_exception(rq
->dev
, xdp_prog
, act
);
439 headroom
= sizeof(struct xdp_frame
) + frame
->headroom
- delta
;
440 skb
= veth_build_skb(hard_start
, headroom
, len
, 0);
442 xdp_return_frame(frame
);
446 xdp_scrub_frame(frame
);
447 skb
->protocol
= eth_type_trans(skb
, rq
->dev
);
452 xdp_return_frame(frame
);
457 static struct sk_buff
*veth_xdp_rcv_skb(struct veth_rq
*rq
, struct sk_buff
*skb
,
458 unsigned int *xdp_xmit
)
460 u32 pktlen
, headroom
, act
, metalen
;
461 void *orig_data
, *orig_data_end
;
462 struct bpf_prog
*xdp_prog
;
463 int mac_len
, delta
, off
;
469 xdp_prog
= rcu_dereference(rq
->xdp_prog
);
470 if (unlikely(!xdp_prog
)) {
475 mac_len
= skb
->data
- skb_mac_header(skb
);
476 pktlen
= skb
->len
+ mac_len
;
477 headroom
= skb_headroom(skb
) - mac_len
;
479 if (skb_shared(skb
) || skb_head_is_locked(skb
) ||
480 skb_is_nonlinear(skb
) || headroom
< XDP_PACKET_HEADROOM
) {
481 struct sk_buff
*nskb
;
486 size
= SKB_DATA_ALIGN(VETH_XDP_HEADROOM
+ pktlen
) +
487 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
488 if (size
> PAGE_SIZE
)
491 page
= alloc_page(GFP_ATOMIC
| __GFP_NOWARN
);
495 head
= page_address(page
);
496 start
= head
+ VETH_XDP_HEADROOM
;
497 if (skb_copy_bits(skb
, -mac_len
, start
, pktlen
)) {
498 page_frag_free(head
);
502 nskb
= veth_build_skb(head
,
503 VETH_XDP_HEADROOM
+ mac_len
, skb
->len
,
506 page_frag_free(head
);
510 skb_copy_header(nskb
, skb
);
511 head_off
= skb_headroom(nskb
) - skb_headroom(skb
);
512 skb_headers_offset_update(nskb
, head_off
);
517 xdp
.data_hard_start
= skb
->head
;
518 xdp
.data
= skb_mac_header(skb
);
519 xdp
.data_end
= xdp
.data
+ pktlen
;
520 xdp
.data_meta
= xdp
.data
;
521 xdp
.rxq
= &rq
->xdp_rxq
;
522 orig_data
= xdp
.data
;
523 orig_data_end
= xdp
.data_end
;
525 act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
531 get_page(virt_to_page(xdp
.data
));
533 xdp
.rxq
->mem
= rq
->xdp_mem
;
534 if (unlikely(veth_xdp_tx(rq
->dev
, &xdp
) < 0)) {
535 trace_xdp_exception(rq
->dev
, xdp_prog
, act
);
538 *xdp_xmit
|= VETH_XDP_TX
;
542 get_page(virt_to_page(xdp
.data
));
544 xdp
.rxq
->mem
= rq
->xdp_mem
;
545 if (xdp_do_redirect(rq
->dev
, &xdp
, xdp_prog
))
547 *xdp_xmit
|= VETH_XDP_REDIR
;
551 bpf_warn_invalid_xdp_action(act
);
553 trace_xdp_exception(rq
->dev
, xdp_prog
, act
);
559 delta
= orig_data
- xdp
.data
;
560 off
= mac_len
+ delta
;
562 __skb_push(skb
, off
);
564 __skb_pull(skb
, -off
);
565 skb
->mac_header
-= delta
;
566 off
= xdp
.data_end
- orig_data_end
;
569 skb
->protocol
= eth_type_trans(skb
, rq
->dev
);
571 metalen
= xdp
.data
- xdp
.data_meta
;
573 skb_metadata_set(skb
, metalen
);
582 page_frag_free(xdp
.data
);
587 static int veth_xdp_rcv(struct veth_rq
*rq
, int budget
, unsigned int *xdp_xmit
)
591 for (i
= 0; i
< budget
; i
++) {
592 void *ptr
= __ptr_ring_consume(&rq
->xdp_ring
);
598 if (veth_is_xdp_frame(ptr
)) {
599 skb
= veth_xdp_rcv_one(rq
, veth_ptr_to_xdp(ptr
),
602 skb
= veth_xdp_rcv_skb(rq
, ptr
, xdp_xmit
);
606 napi_gro_receive(&rq
->xdp_napi
, skb
);
614 static int veth_poll(struct napi_struct
*napi
, int budget
)
617 container_of(napi
, struct veth_rq
, xdp_napi
);
618 unsigned int xdp_xmit
= 0;
621 xdp_set_return_frame_no_direct();
622 done
= veth_xdp_rcv(rq
, budget
, &xdp_xmit
);
624 if (done
< budget
&& napi_complete_done(napi
, done
)) {
625 /* Write rx_notify_masked before reading ptr_ring */
626 smp_store_mb(rq
->rx_notify_masked
, false);
627 if (unlikely(!__ptr_ring_empty(&rq
->xdp_ring
))) {
628 rq
->rx_notify_masked
= true;
629 napi_schedule(&rq
->xdp_napi
);
633 if (xdp_xmit
& VETH_XDP_TX
)
634 veth_xdp_flush(rq
->dev
);
635 if (xdp_xmit
& VETH_XDP_REDIR
)
637 xdp_clear_return_frame_no_direct();
642 static int veth_napi_add(struct net_device
*dev
)
644 struct veth_priv
*priv
= netdev_priv(dev
);
647 for (i
= 0; i
< dev
->real_num_rx_queues
; i
++) {
648 struct veth_rq
*rq
= &priv
->rq
[i
];
650 err
= ptr_ring_init(&rq
->xdp_ring
, VETH_RING_SIZE
, GFP_KERNEL
);
655 for (i
= 0; i
< dev
->real_num_rx_queues
; i
++) {
656 struct veth_rq
*rq
= &priv
->rq
[i
];
658 netif_napi_add(dev
, &rq
->xdp_napi
, veth_poll
, NAPI_POLL_WEIGHT
);
659 napi_enable(&rq
->xdp_napi
);
664 for (i
--; i
>= 0; i
--)
665 ptr_ring_cleanup(&priv
->rq
[i
].xdp_ring
, veth_ptr_free
);
670 static void veth_napi_del(struct net_device
*dev
)
672 struct veth_priv
*priv
= netdev_priv(dev
);
675 for (i
= 0; i
< dev
->real_num_rx_queues
; i
++) {
676 struct veth_rq
*rq
= &priv
->rq
[i
];
678 napi_disable(&rq
->xdp_napi
);
679 napi_hash_del(&rq
->xdp_napi
);
683 for (i
= 0; i
< dev
->real_num_rx_queues
; i
++) {
684 struct veth_rq
*rq
= &priv
->rq
[i
];
686 netif_napi_del(&rq
->xdp_napi
);
687 rq
->rx_notify_masked
= false;
688 ptr_ring_cleanup(&rq
->xdp_ring
, veth_ptr_free
);
692 static int veth_enable_xdp(struct net_device
*dev
)
694 struct veth_priv
*priv
= netdev_priv(dev
);
697 if (!xdp_rxq_info_is_reg(&priv
->rq
[0].xdp_rxq
)) {
698 for (i
= 0; i
< dev
->real_num_rx_queues
; i
++) {
699 struct veth_rq
*rq
= &priv
->rq
[i
];
701 err
= xdp_rxq_info_reg(&rq
->xdp_rxq
, dev
, i
);
705 err
= xdp_rxq_info_reg_mem_model(&rq
->xdp_rxq
,
706 MEM_TYPE_PAGE_SHARED
,
711 /* Save original mem info as it can be overwritten */
712 rq
->xdp_mem
= rq
->xdp_rxq
.mem
;
715 err
= veth_napi_add(dev
);
720 for (i
= 0; i
< dev
->real_num_rx_queues
; i
++)
721 rcu_assign_pointer(priv
->rq
[i
].xdp_prog
, priv
->_xdp_prog
);
725 xdp_rxq_info_unreg(&priv
->rq
[i
].xdp_rxq
);
727 for (i
--; i
>= 0; i
--)
728 xdp_rxq_info_unreg(&priv
->rq
[i
].xdp_rxq
);
733 static void veth_disable_xdp(struct net_device
*dev
)
735 struct veth_priv
*priv
= netdev_priv(dev
);
738 for (i
= 0; i
< dev
->real_num_rx_queues
; i
++)
739 rcu_assign_pointer(priv
->rq
[i
].xdp_prog
, NULL
);
741 for (i
= 0; i
< dev
->real_num_rx_queues
; i
++) {
742 struct veth_rq
*rq
= &priv
->rq
[i
];
744 rq
->xdp_rxq
.mem
= rq
->xdp_mem
;
745 xdp_rxq_info_unreg(&rq
->xdp_rxq
);
749 static int veth_open(struct net_device
*dev
)
751 struct veth_priv
*priv
= netdev_priv(dev
);
752 struct net_device
*peer
= rtnl_dereference(priv
->peer
);
758 if (priv
->_xdp_prog
) {
759 err
= veth_enable_xdp(dev
);
764 if (peer
->flags
& IFF_UP
) {
765 netif_carrier_on(dev
);
766 netif_carrier_on(peer
);
772 static int veth_close(struct net_device
*dev
)
774 struct veth_priv
*priv
= netdev_priv(dev
);
775 struct net_device
*peer
= rtnl_dereference(priv
->peer
);
777 netif_carrier_off(dev
);
779 netif_carrier_off(peer
);
782 veth_disable_xdp(dev
);
787 static int is_valid_veth_mtu(int mtu
)
789 return mtu
>= ETH_MIN_MTU
&& mtu
<= ETH_MAX_MTU
;
792 static int veth_alloc_queues(struct net_device
*dev
)
794 struct veth_priv
*priv
= netdev_priv(dev
);
797 priv
->rq
= kcalloc(dev
->num_rx_queues
, sizeof(*priv
->rq
), GFP_KERNEL
);
801 for (i
= 0; i
< dev
->num_rx_queues
; i
++)
802 priv
->rq
[i
].dev
= dev
;
807 static void veth_free_queues(struct net_device
*dev
)
809 struct veth_priv
*priv
= netdev_priv(dev
);
814 static int veth_dev_init(struct net_device
*dev
)
818 dev
->vstats
= netdev_alloc_pcpu_stats(struct pcpu_vstats
);
822 err
= veth_alloc_queues(dev
);
824 free_percpu(dev
->vstats
);
831 static void veth_dev_free(struct net_device
*dev
)
833 veth_free_queues(dev
);
834 free_percpu(dev
->vstats
);
837 #ifdef CONFIG_NET_POLL_CONTROLLER
838 static void veth_poll_controller(struct net_device
*dev
)
840 /* veth only receives frames when its peer sends one
841 * Since it has nothing to do with disabling irqs, we are guaranteed
842 * never to have pending data when we poll for it so
843 * there is nothing to do here.
845 * We need this though so netpoll recognizes us as an interface that
846 * supports polling, which enables bridge devices in virt setups to
847 * still use netconsole
850 #endif /* CONFIG_NET_POLL_CONTROLLER */
852 static int veth_get_iflink(const struct net_device
*dev
)
854 struct veth_priv
*priv
= netdev_priv(dev
);
855 struct net_device
*peer
;
859 peer
= rcu_dereference(priv
->peer
);
860 iflink
= peer
? peer
->ifindex
: 0;
866 static netdev_features_t
veth_fix_features(struct net_device
*dev
,
867 netdev_features_t features
)
869 struct veth_priv
*priv
= netdev_priv(dev
);
870 struct net_device
*peer
;
872 peer
= rtnl_dereference(priv
->peer
);
874 struct veth_priv
*peer_priv
= netdev_priv(peer
);
876 if (peer_priv
->_xdp_prog
)
877 features
&= ~NETIF_F_GSO_SOFTWARE
;
883 static void veth_set_rx_headroom(struct net_device
*dev
, int new_hr
)
885 struct veth_priv
*peer_priv
, *priv
= netdev_priv(dev
);
886 struct net_device
*peer
;
892 peer
= rcu_dereference(priv
->peer
);
896 peer_priv
= netdev_priv(peer
);
897 priv
->requested_headroom
= new_hr
;
898 new_hr
= max(priv
->requested_headroom
, peer_priv
->requested_headroom
);
899 dev
->needed_headroom
= new_hr
;
900 peer
->needed_headroom
= new_hr
;
906 static int veth_xdp_set(struct net_device
*dev
, struct bpf_prog
*prog
,
907 struct netlink_ext_ack
*extack
)
909 struct veth_priv
*priv
= netdev_priv(dev
);
910 struct bpf_prog
*old_prog
;
911 struct net_device
*peer
;
912 unsigned int max_mtu
;
915 old_prog
= priv
->_xdp_prog
;
916 priv
->_xdp_prog
= prog
;
917 peer
= rtnl_dereference(priv
->peer
);
921 NL_SET_ERR_MSG_MOD(extack
, "Cannot set XDP when peer is detached");
926 max_mtu
= PAGE_SIZE
- VETH_XDP_HEADROOM
-
927 peer
->hard_header_len
-
928 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
929 if (peer
->mtu
> max_mtu
) {
930 NL_SET_ERR_MSG_MOD(extack
, "Peer MTU is too large to set XDP");
935 if (dev
->real_num_rx_queues
< peer
->real_num_tx_queues
) {
936 NL_SET_ERR_MSG_MOD(extack
, "XDP expects number of rx queues not less than peer tx queues");
941 if (dev
->flags
& IFF_UP
) {
942 err
= veth_enable_xdp(dev
);
944 NL_SET_ERR_MSG_MOD(extack
, "Setup for XDP failed");
950 peer
->hw_features
&= ~NETIF_F_GSO_SOFTWARE
;
951 peer
->max_mtu
= max_mtu
;
957 if (dev
->flags
& IFF_UP
)
958 veth_disable_xdp(dev
);
961 peer
->hw_features
|= NETIF_F_GSO_SOFTWARE
;
962 peer
->max_mtu
= ETH_MAX_MTU
;
965 bpf_prog_put(old_prog
);
968 if ((!!old_prog
^ !!prog
) && peer
)
969 netdev_update_features(peer
);
973 priv
->_xdp_prog
= old_prog
;
978 static u32
veth_xdp_query(struct net_device
*dev
)
980 struct veth_priv
*priv
= netdev_priv(dev
);
981 const struct bpf_prog
*xdp_prog
;
983 xdp_prog
= priv
->_xdp_prog
;
985 return xdp_prog
->aux
->id
;
990 static int veth_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
)
992 switch (xdp
->command
) {
994 return veth_xdp_set(dev
, xdp
->prog
, xdp
->extack
);
996 xdp
->prog_id
= veth_xdp_query(dev
);
1003 static const struct net_device_ops veth_netdev_ops
= {
1004 .ndo_init
= veth_dev_init
,
1005 .ndo_open
= veth_open
,
1006 .ndo_stop
= veth_close
,
1007 .ndo_start_xmit
= veth_xmit
,
1008 .ndo_get_stats64
= veth_get_stats64
,
1009 .ndo_set_rx_mode
= veth_set_multicast_list
,
1010 .ndo_set_mac_address
= eth_mac_addr
,
1011 #ifdef CONFIG_NET_POLL_CONTROLLER
1012 .ndo_poll_controller
= veth_poll_controller
,
1014 .ndo_get_iflink
= veth_get_iflink
,
1015 .ndo_fix_features
= veth_fix_features
,
1016 .ndo_features_check
= passthru_features_check
,
1017 .ndo_set_rx_headroom
= veth_set_rx_headroom
,
1018 .ndo_bpf
= veth_xdp
,
1019 .ndo_xdp_xmit
= veth_xdp_xmit
,
1022 #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
1023 NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
1024 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
1025 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
1026 NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
1028 static void veth_setup(struct net_device
*dev
)
1032 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
1033 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
1034 dev
->priv_flags
|= IFF_NO_QUEUE
;
1035 dev
->priv_flags
|= IFF_PHONY_HEADROOM
;
1037 dev
->netdev_ops
= &veth_netdev_ops
;
1038 dev
->ethtool_ops
= &veth_ethtool_ops
;
1039 dev
->features
|= NETIF_F_LLTX
;
1040 dev
->features
|= VETH_FEATURES
;
1041 dev
->vlan_features
= dev
->features
&
1042 ~(NETIF_F_HW_VLAN_CTAG_TX
|
1043 NETIF_F_HW_VLAN_STAG_TX
|
1044 NETIF_F_HW_VLAN_CTAG_RX
|
1045 NETIF_F_HW_VLAN_STAG_RX
);
1046 dev
->needs_free_netdev
= true;
1047 dev
->priv_destructor
= veth_dev_free
;
1048 dev
->max_mtu
= ETH_MAX_MTU
;
1050 dev
->hw_features
= VETH_FEATURES
;
1051 dev
->hw_enc_features
= VETH_FEATURES
;
1052 dev
->mpls_features
= NETIF_F_HW_CSUM
| NETIF_F_GSO_SOFTWARE
;
1059 static int veth_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
1060 struct netlink_ext_ack
*extack
)
1062 if (tb
[IFLA_ADDRESS
]) {
1063 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
)
1065 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
])))
1066 return -EADDRNOTAVAIL
;
1069 if (!is_valid_veth_mtu(nla_get_u32(tb
[IFLA_MTU
])))
1075 static struct rtnl_link_ops veth_link_ops
;
1077 static int veth_newlink(struct net
*src_net
, struct net_device
*dev
,
1078 struct nlattr
*tb
[], struct nlattr
*data
[],
1079 struct netlink_ext_ack
*extack
)
1082 struct net_device
*peer
;
1083 struct veth_priv
*priv
;
1084 char ifname
[IFNAMSIZ
];
1085 struct nlattr
*peer_tb
[IFLA_MAX
+ 1], **tbp
;
1086 unsigned char name_assign_type
;
1087 struct ifinfomsg
*ifmp
;
1091 * create and register peer first
1093 if (data
!= NULL
&& data
[VETH_INFO_PEER
] != NULL
) {
1094 struct nlattr
*nla_peer
;
1096 nla_peer
= data
[VETH_INFO_PEER
];
1097 ifmp
= nla_data(nla_peer
);
1098 err
= rtnl_nla_parse_ifla(peer_tb
,
1099 nla_data(nla_peer
) + sizeof(struct ifinfomsg
),
1100 nla_len(nla_peer
) - sizeof(struct ifinfomsg
),
1105 err
= veth_validate(peer_tb
, NULL
, extack
);
1115 if (ifmp
&& tbp
[IFLA_IFNAME
]) {
1116 nla_strlcpy(ifname
, tbp
[IFLA_IFNAME
], IFNAMSIZ
);
1117 name_assign_type
= NET_NAME_USER
;
1119 snprintf(ifname
, IFNAMSIZ
, DRV_NAME
"%%d");
1120 name_assign_type
= NET_NAME_ENUM
;
1123 net
= rtnl_link_get_net(src_net
, tbp
);
1125 return PTR_ERR(net
);
1127 peer
= rtnl_create_link(net
, ifname
, name_assign_type
,
1128 &veth_link_ops
, tbp
);
1131 return PTR_ERR(peer
);
1134 if (!ifmp
|| !tbp
[IFLA_ADDRESS
])
1135 eth_hw_addr_random(peer
);
1137 if (ifmp
&& (dev
->ifindex
!= 0))
1138 peer
->ifindex
= ifmp
->ifi_index
;
1140 peer
->gso_max_size
= dev
->gso_max_size
;
1141 peer
->gso_max_segs
= dev
->gso_max_segs
;
1143 err
= register_netdevice(peer
);
1147 goto err_register_peer
;
1149 netif_carrier_off(peer
);
1151 err
= rtnl_configure_link(peer
, ifmp
);
1153 goto err_configure_peer
;
1158 * note, that since we've registered new device the dev's name
1159 * should be re-allocated
1162 if (tb
[IFLA_ADDRESS
] == NULL
)
1163 eth_hw_addr_random(dev
);
1165 if (tb
[IFLA_IFNAME
])
1166 nla_strlcpy(dev
->name
, tb
[IFLA_IFNAME
], IFNAMSIZ
);
1168 snprintf(dev
->name
, IFNAMSIZ
, DRV_NAME
"%%d");
1170 err
= register_netdevice(dev
);
1172 goto err_register_dev
;
1174 netif_carrier_off(dev
);
1177 * tie the deviced together
1180 priv
= netdev_priv(dev
);
1181 rcu_assign_pointer(priv
->peer
, peer
);
1183 priv
= netdev_priv(peer
);
1184 rcu_assign_pointer(priv
->peer
, dev
);
1191 unregister_netdevice(peer
);
1199 static void veth_dellink(struct net_device
*dev
, struct list_head
*head
)
1201 struct veth_priv
*priv
;
1202 struct net_device
*peer
;
1204 priv
= netdev_priv(dev
);
1205 peer
= rtnl_dereference(priv
->peer
);
1207 /* Note : dellink() is called from default_device_exit_batch(),
1208 * before a rcu_synchronize() point. The devices are guaranteed
1209 * not being freed before one RCU grace period.
1211 RCU_INIT_POINTER(priv
->peer
, NULL
);
1212 unregister_netdevice_queue(dev
, head
);
1215 priv
= netdev_priv(peer
);
1216 RCU_INIT_POINTER(priv
->peer
, NULL
);
1217 unregister_netdevice_queue(peer
, head
);
1221 static const struct nla_policy veth_policy
[VETH_INFO_MAX
+ 1] = {
1222 [VETH_INFO_PEER
] = { .len
= sizeof(struct ifinfomsg
) },
1225 static struct net
*veth_get_link_net(const struct net_device
*dev
)
1227 struct veth_priv
*priv
= netdev_priv(dev
);
1228 struct net_device
*peer
= rtnl_dereference(priv
->peer
);
1230 return peer
? dev_net(peer
) : dev_net(dev
);
1233 static struct rtnl_link_ops veth_link_ops
= {
1235 .priv_size
= sizeof(struct veth_priv
),
1236 .setup
= veth_setup
,
1237 .validate
= veth_validate
,
1238 .newlink
= veth_newlink
,
1239 .dellink
= veth_dellink
,
1240 .policy
= veth_policy
,
1241 .maxtype
= VETH_INFO_MAX
,
1242 .get_link_net
= veth_get_link_net
,
1249 static __init
int veth_init(void)
1251 return rtnl_link_register(&veth_link_ops
);
1254 static __exit
void veth_exit(void)
1256 rtnl_link_unregister(&veth_link_ops
);
1259 module_init(veth_init
);
1260 module_exit(veth_exit
);
1262 MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
1263 MODULE_LICENSE("GPL v2");
1264 MODULE_ALIAS_RTNL_LINK(DRV_NAME
);