1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/ethtool.h>
15 #include <linux/etherdevice.h>
16 #include <linux/u64_stats_sync.h>
18 #include <net/rtnetlink.h>
22 #include <linux/veth.h>
23 #include <linux/module.h>
24 #include <linux/bpf.h>
25 #include <linux/filter.h>
26 #include <linux/ptr_ring.h>
27 #include <linux/bpf_trace.h>
28 #include <linux/net_tstamp.h>
30 #define DRV_NAME "veth"
31 #define DRV_VERSION "1.0"
33 #define VETH_XDP_FLAG BIT(0)
34 #define VETH_RING_SIZE 256
35 #define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
37 #define VETH_XDP_TX_BULK_SIZE 16
49 u64 peer_tq_xdp_xmit_err
;
52 struct veth_rq_stats
{
54 struct u64_stats_sync syncp
;
58 struct napi_struct xdp_napi
;
59 struct net_device
*dev
;
60 struct bpf_prog __rcu
*xdp_prog
;
61 struct xdp_mem_info xdp_mem
;
62 struct veth_rq_stats stats
;
63 bool rx_notify_masked
;
64 struct ptr_ring xdp_ring
;
65 struct xdp_rxq_info xdp_rxq
;
69 struct net_device __rcu
*peer
;
71 struct bpf_prog
*_xdp_prog
;
73 unsigned int requested_headroom
;
76 struct veth_xdp_tx_bq
{
77 struct xdp_frame
*q
[VETH_XDP_TX_BULK_SIZE
];
85 struct veth_q_stat_desc
{
86 char desc
[ETH_GSTRING_LEN
];
90 #define VETH_RQ_STAT(m) offsetof(struct veth_stats, m)
92 static const struct veth_q_stat_desc veth_rq_stats_desc
[] = {
93 { "xdp_packets", VETH_RQ_STAT(xdp_packets
) },
94 { "xdp_bytes", VETH_RQ_STAT(xdp_bytes
) },
95 { "drops", VETH_RQ_STAT(rx_drops
) },
96 { "xdp_redirect", VETH_RQ_STAT(xdp_redirect
) },
97 { "xdp_drops", VETH_RQ_STAT(xdp_drops
) },
98 { "xdp_tx", VETH_RQ_STAT(xdp_tx
) },
99 { "xdp_tx_errors", VETH_RQ_STAT(xdp_tx_err
) },
102 #define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
104 static const struct veth_q_stat_desc veth_tq_stats_desc
[] = {
105 { "xdp_xmit", VETH_RQ_STAT(peer_tq_xdp_xmit
) },
106 { "xdp_xmit_errors", VETH_RQ_STAT(peer_tq_xdp_xmit_err
) },
109 #define VETH_TQ_STATS_LEN ARRAY_SIZE(veth_tq_stats_desc)
112 const char string
[ETH_GSTRING_LEN
];
113 } ethtool_stats_keys
[] = {
117 static int veth_get_link_ksettings(struct net_device
*dev
,
118 struct ethtool_link_ksettings
*cmd
)
120 cmd
->base
.speed
= SPEED_10000
;
121 cmd
->base
.duplex
= DUPLEX_FULL
;
122 cmd
->base
.port
= PORT_TP
;
123 cmd
->base
.autoneg
= AUTONEG_DISABLE
;
127 static void veth_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
129 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
130 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
133 static void veth_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
135 char *p
= (char *)buf
;
140 memcpy(p
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
141 p
+= sizeof(ethtool_stats_keys
);
142 for (i
= 0; i
< dev
->real_num_rx_queues
; i
++) {
143 for (j
= 0; j
< VETH_RQ_STATS_LEN
; j
++) {
144 snprintf(p
, ETH_GSTRING_LEN
,
146 i
, veth_rq_stats_desc
[j
].desc
);
147 p
+= ETH_GSTRING_LEN
;
150 for (i
= 0; i
< dev
->real_num_tx_queues
; i
++) {
151 for (j
= 0; j
< VETH_TQ_STATS_LEN
; j
++) {
152 snprintf(p
, ETH_GSTRING_LEN
,
154 i
, veth_tq_stats_desc
[j
].desc
);
155 p
+= ETH_GSTRING_LEN
;
162 static int veth_get_sset_count(struct net_device
*dev
, int sset
)
166 return ARRAY_SIZE(ethtool_stats_keys
) +
167 VETH_RQ_STATS_LEN
* dev
->real_num_rx_queues
+
168 VETH_TQ_STATS_LEN
* dev
->real_num_tx_queues
;
174 static void veth_get_ethtool_stats(struct net_device
*dev
,
175 struct ethtool_stats
*stats
, u64
*data
)
177 struct veth_priv
*rcv_priv
, *priv
= netdev_priv(dev
);
178 struct net_device
*peer
= rtnl_dereference(priv
->peer
);
181 data
[0] = peer
? peer
->ifindex
: 0;
183 for (i
= 0; i
< dev
->real_num_rx_queues
; i
++) {
184 const struct veth_rq_stats
*rq_stats
= &priv
->rq
[i
].stats
;
185 const void *stats_base
= (void *)&rq_stats
->vs
;
190 start
= u64_stats_fetch_begin_irq(&rq_stats
->syncp
);
191 for (j
= 0; j
< VETH_RQ_STATS_LEN
; j
++) {
192 offset
= veth_rq_stats_desc
[j
].offset
;
193 data
[idx
+ j
] = *(u64
*)(stats_base
+ offset
);
195 } while (u64_stats_fetch_retry_irq(&rq_stats
->syncp
, start
));
196 idx
+= VETH_RQ_STATS_LEN
;
202 rcv_priv
= netdev_priv(peer
);
203 for (i
= 0; i
< peer
->real_num_rx_queues
; i
++) {
204 const struct veth_rq_stats
*rq_stats
= &rcv_priv
->rq
[i
].stats
;
205 const void *base
= (void *)&rq_stats
->vs
;
206 unsigned int start
, tx_idx
= idx
;
209 tx_idx
+= (i
% dev
->real_num_tx_queues
) * VETH_TQ_STATS_LEN
;
211 start
= u64_stats_fetch_begin_irq(&rq_stats
->syncp
);
212 for (j
= 0; j
< VETH_TQ_STATS_LEN
; j
++) {
213 offset
= veth_tq_stats_desc
[j
].offset
;
214 data
[tx_idx
+ j
] += *(u64
*)(base
+ offset
);
216 } while (u64_stats_fetch_retry_irq(&rq_stats
->syncp
, start
));
220 static const struct ethtool_ops veth_ethtool_ops
= {
221 .get_drvinfo
= veth_get_drvinfo
,
222 .get_link
= ethtool_op_get_link
,
223 .get_strings
= veth_get_strings
,
224 .get_sset_count
= veth_get_sset_count
,
225 .get_ethtool_stats
= veth_get_ethtool_stats
,
226 .get_link_ksettings
= veth_get_link_ksettings
,
227 .get_ts_info
= ethtool_op_get_ts_info
,
230 /* general routines */
232 static bool veth_is_xdp_frame(void *ptr
)
234 return (unsigned long)ptr
& VETH_XDP_FLAG
;
237 static struct xdp_frame
*veth_ptr_to_xdp(void *ptr
)
239 return (void *)((unsigned long)ptr
& ~VETH_XDP_FLAG
);
242 static void *veth_xdp_to_ptr(struct xdp_frame
*xdp
)
244 return (void *)((unsigned long)xdp
| VETH_XDP_FLAG
);
247 static void veth_ptr_free(void *ptr
)
249 if (veth_is_xdp_frame(ptr
))
250 xdp_return_frame(veth_ptr_to_xdp(ptr
));
255 static void __veth_xdp_flush(struct veth_rq
*rq
)
257 /* Write ptr_ring before reading rx_notify_masked */
259 if (!rq
->rx_notify_masked
) {
260 rq
->rx_notify_masked
= true;
261 napi_schedule(&rq
->xdp_napi
);
265 static int veth_xdp_rx(struct veth_rq
*rq
, struct sk_buff
*skb
)
267 if (unlikely(ptr_ring_produce(&rq
->xdp_ring
, skb
))) {
268 dev_kfree_skb_any(skb
);
272 return NET_RX_SUCCESS
;
275 static int veth_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
,
276 struct veth_rq
*rq
, bool xdp
)
278 return __dev_forward_skb(dev
, skb
) ?: xdp
?
279 veth_xdp_rx(rq
, skb
) :
283 static netdev_tx_t
veth_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
285 struct veth_priv
*rcv_priv
, *priv
= netdev_priv(dev
);
286 struct veth_rq
*rq
= NULL
;
287 struct net_device
*rcv
;
288 int length
= skb
->len
;
289 bool rcv_xdp
= false;
293 rcv
= rcu_dereference(priv
->peer
);
294 if (unlikely(!rcv
)) {
299 rcv_priv
= netdev_priv(rcv
);
300 rxq
= skb_get_queue_mapping(skb
);
301 if (rxq
< rcv
->real_num_rx_queues
) {
302 rq
= &rcv_priv
->rq
[rxq
];
303 rcv_xdp
= rcu_access_pointer(rq
->xdp_prog
);
305 skb_record_rx_queue(skb
, rxq
);
308 skb_tx_timestamp(skb
);
309 if (likely(veth_forward_skb(rcv
, skb
, rq
, rcv_xdp
) == NET_RX_SUCCESS
)) {
311 dev_lstats_add(dev
, length
);
314 atomic64_inc(&priv
->dropped
);
318 __veth_xdp_flush(rq
);
325 static u64
veth_stats_tx(struct net_device
*dev
, u64
*packets
, u64
*bytes
)
327 struct veth_priv
*priv
= netdev_priv(dev
);
329 dev_lstats_read(dev
, packets
, bytes
);
330 return atomic64_read(&priv
->dropped
);
333 static void veth_stats_rx(struct veth_stats
*result
, struct net_device
*dev
)
335 struct veth_priv
*priv
= netdev_priv(dev
);
338 result
->peer_tq_xdp_xmit_err
= 0;
339 result
->xdp_packets
= 0;
340 result
->xdp_tx_err
= 0;
341 result
->xdp_bytes
= 0;
342 result
->rx_drops
= 0;
343 for (i
= 0; i
< dev
->num_rx_queues
; i
++) {
344 u64 packets
, bytes
, drops
, xdp_tx_err
, peer_tq_xdp_xmit_err
;
345 struct veth_rq_stats
*stats
= &priv
->rq
[i
].stats
;
349 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
350 peer_tq_xdp_xmit_err
= stats
->vs
.peer_tq_xdp_xmit_err
;
351 xdp_tx_err
= stats
->vs
.xdp_tx_err
;
352 packets
= stats
->vs
.xdp_packets
;
353 bytes
= stats
->vs
.xdp_bytes
;
354 drops
= stats
->vs
.rx_drops
;
355 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
356 result
->peer_tq_xdp_xmit_err
+= peer_tq_xdp_xmit_err
;
357 result
->xdp_tx_err
+= xdp_tx_err
;
358 result
->xdp_packets
+= packets
;
359 result
->xdp_bytes
+= bytes
;
360 result
->rx_drops
+= drops
;
364 static void veth_get_stats64(struct net_device
*dev
,
365 struct rtnl_link_stats64
*tot
)
367 struct veth_priv
*priv
= netdev_priv(dev
);
368 struct net_device
*peer
;
369 struct veth_stats rx
;
372 tot
->tx_dropped
= veth_stats_tx(dev
, &packets
, &bytes
);
373 tot
->tx_bytes
= bytes
;
374 tot
->tx_packets
= packets
;
376 veth_stats_rx(&rx
, dev
);
377 tot
->tx_dropped
+= rx
.xdp_tx_err
;
378 tot
->rx_dropped
= rx
.rx_drops
+ rx
.peer_tq_xdp_xmit_err
;
379 tot
->rx_bytes
= rx
.xdp_bytes
;
380 tot
->rx_packets
= rx
.xdp_packets
;
383 peer
= rcu_dereference(priv
->peer
);
385 veth_stats_tx(peer
, &packets
, &bytes
);
386 tot
->rx_bytes
+= bytes
;
387 tot
->rx_packets
+= packets
;
389 veth_stats_rx(&rx
, peer
);
390 tot
->tx_dropped
+= rx
.peer_tq_xdp_xmit_err
;
391 tot
->rx_dropped
+= rx
.xdp_tx_err
;
392 tot
->tx_bytes
+= rx
.xdp_bytes
;
393 tot
->tx_packets
+= rx
.xdp_packets
;
398 /* fake multicast ability */
399 static void veth_set_multicast_list(struct net_device
*dev
)
403 static struct sk_buff
*veth_build_skb(void *head
, int headroom
, int len
,
408 skb
= build_skb(head
, buflen
);
412 skb_reserve(skb
, headroom
);
418 static int veth_select_rxq(struct net_device
*dev
)
420 return smp_processor_id() % dev
->real_num_rx_queues
;
423 static struct net_device
*veth_peer_dev(struct net_device
*dev
)
425 struct veth_priv
*priv
= netdev_priv(dev
);
427 /* Callers must be under RCU read side. */
428 return rcu_dereference(priv
->peer
);
431 static int veth_xdp_xmit(struct net_device
*dev
, int n
,
432 struct xdp_frame
**frames
,
433 u32 flags
, bool ndo_xmit
)
435 struct veth_priv
*rcv_priv
, *priv
= netdev_priv(dev
);
436 int i
, ret
= -ENXIO
, drops
= 0;
437 struct net_device
*rcv
;
438 unsigned int max_len
;
441 if (unlikely(flags
& ~XDP_XMIT_FLAGS_MASK
))
445 rcv
= rcu_dereference(priv
->peer
);
449 rcv_priv
= netdev_priv(rcv
);
450 rq
= &rcv_priv
->rq
[veth_select_rxq(rcv
)];
451 /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive
452 * side. This means an XDP program is loaded on the peer and the peer
455 if (!rcu_access_pointer(rq
->xdp_prog
))
458 max_len
= rcv
->mtu
+ rcv
->hard_header_len
+ VLAN_HLEN
;
460 spin_lock(&rq
->xdp_ring
.producer_lock
);
461 for (i
= 0; i
< n
; i
++) {
462 struct xdp_frame
*frame
= frames
[i
];
463 void *ptr
= veth_xdp_to_ptr(frame
);
465 if (unlikely(frame
->len
> max_len
||
466 __ptr_ring_produce(&rq
->xdp_ring
, ptr
))) {
467 xdp_return_frame_rx_napi(frame
);
471 spin_unlock(&rq
->xdp_ring
.producer_lock
);
473 if (flags
& XDP_XMIT_FLUSH
)
474 __veth_xdp_flush(rq
);
478 u64_stats_update_begin(&rq
->stats
.syncp
);
479 rq
->stats
.vs
.peer_tq_xdp_xmit
+= n
- drops
;
480 rq
->stats
.vs
.peer_tq_xdp_xmit_err
+= drops
;
481 u64_stats_update_end(&rq
->stats
.syncp
);
490 static int veth_ndo_xdp_xmit(struct net_device
*dev
, int n
,
491 struct xdp_frame
**frames
, u32 flags
)
495 err
= veth_xdp_xmit(dev
, n
, frames
, flags
, true);
497 struct veth_priv
*priv
= netdev_priv(dev
);
499 atomic64_add(n
, &priv
->dropped
);
505 static void veth_xdp_flush_bq(struct veth_rq
*rq
, struct veth_xdp_tx_bq
*bq
)
507 int sent
, i
, err
= 0;
509 sent
= veth_xdp_xmit(rq
->dev
, bq
->count
, bq
->q
, 0, false);
513 for (i
= 0; i
< bq
->count
; i
++)
514 xdp_return_frame(bq
->q
[i
]);
516 trace_xdp_bulk_tx(rq
->dev
, sent
, bq
->count
- sent
, err
);
518 u64_stats_update_begin(&rq
->stats
.syncp
);
519 rq
->stats
.vs
.xdp_tx
+= sent
;
520 rq
->stats
.vs
.xdp_tx_err
+= bq
->count
- sent
;
521 u64_stats_update_end(&rq
->stats
.syncp
);
526 static void veth_xdp_flush(struct veth_rq
*rq
, struct veth_xdp_tx_bq
*bq
)
528 struct veth_priv
*rcv_priv
, *priv
= netdev_priv(rq
->dev
);
529 struct net_device
*rcv
;
530 struct veth_rq
*rcv_rq
;
533 veth_xdp_flush_bq(rq
, bq
);
534 rcv
= rcu_dereference(priv
->peer
);
538 rcv_priv
= netdev_priv(rcv
);
539 rcv_rq
= &rcv_priv
->rq
[veth_select_rxq(rcv
)];
540 /* xdp_ring is initialized on receive side? */
541 if (unlikely(!rcu_access_pointer(rcv_rq
->xdp_prog
)))
544 __veth_xdp_flush(rcv_rq
);
549 static int veth_xdp_tx(struct veth_rq
*rq
, struct xdp_buff
*xdp
,
550 struct veth_xdp_tx_bq
*bq
)
552 struct xdp_frame
*frame
= xdp_convert_buff_to_frame(xdp
);
554 if (unlikely(!frame
))
557 if (unlikely(bq
->count
== VETH_XDP_TX_BULK_SIZE
))
558 veth_xdp_flush_bq(rq
, bq
);
560 bq
->q
[bq
->count
++] = frame
;
565 static struct sk_buff
*veth_xdp_rcv_one(struct veth_rq
*rq
,
566 struct xdp_frame
*frame
,
567 struct veth_xdp_tx_bq
*bq
,
568 struct veth_stats
*stats
)
570 void *hard_start
= frame
->data
- frame
->headroom
;
571 int len
= frame
->len
, delta
= 0;
572 struct xdp_frame orig_frame
;
573 struct bpf_prog
*xdp_prog
;
574 unsigned int headroom
;
577 /* bpf_xdp_adjust_head() assures BPF cannot access xdp_frame area */
578 hard_start
-= sizeof(struct xdp_frame
);
581 xdp_prog
= rcu_dereference(rq
->xdp_prog
);
582 if (likely(xdp_prog
)) {
586 xdp_convert_frame_to_buff(frame
, &xdp
);
587 xdp
.rxq
= &rq
->xdp_rxq
;
589 act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
593 delta
= frame
->data
- xdp
.data
;
594 len
= xdp
.data_end
- xdp
.data
;
598 xdp
.rxq
->mem
= frame
->mem
;
599 if (unlikely(veth_xdp_tx(rq
, &xdp
, bq
) < 0)) {
600 trace_xdp_exception(rq
->dev
, xdp_prog
, act
);
610 xdp
.rxq
->mem
= frame
->mem
;
611 if (xdp_do_redirect(rq
->dev
, &xdp
, xdp_prog
)) {
616 stats
->xdp_redirect
++;
620 bpf_warn_invalid_xdp_action(act
);
623 trace_xdp_exception(rq
->dev
, xdp_prog
, act
);
632 headroom
= sizeof(struct xdp_frame
) + frame
->headroom
- delta
;
633 skb
= veth_build_skb(hard_start
, headroom
, len
, frame
->frame_sz
);
635 xdp_return_frame(frame
);
640 xdp_release_frame(frame
);
641 xdp_scrub_frame(frame
);
642 skb
->protocol
= eth_type_trans(skb
, rq
->dev
);
647 xdp_return_frame(frame
);
652 static struct sk_buff
*veth_xdp_rcv_skb(struct veth_rq
*rq
,
654 struct veth_xdp_tx_bq
*bq
,
655 struct veth_stats
*stats
)
657 u32 pktlen
, headroom
, act
, metalen
;
658 void *orig_data
, *orig_data_end
;
659 struct bpf_prog
*xdp_prog
;
660 int mac_len
, delta
, off
;
666 xdp_prog
= rcu_dereference(rq
->xdp_prog
);
667 if (unlikely(!xdp_prog
)) {
672 mac_len
= skb
->data
- skb_mac_header(skb
);
673 pktlen
= skb
->len
+ mac_len
;
674 headroom
= skb_headroom(skb
) - mac_len
;
676 if (skb_shared(skb
) || skb_head_is_locked(skb
) ||
677 skb_is_nonlinear(skb
) || headroom
< XDP_PACKET_HEADROOM
) {
678 struct sk_buff
*nskb
;
683 size
= SKB_DATA_ALIGN(VETH_XDP_HEADROOM
+ pktlen
) +
684 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
685 if (size
> PAGE_SIZE
)
688 page
= alloc_page(GFP_ATOMIC
| __GFP_NOWARN
);
692 head
= page_address(page
);
693 start
= head
+ VETH_XDP_HEADROOM
;
694 if (skb_copy_bits(skb
, -mac_len
, start
, pktlen
)) {
695 page_frag_free(head
);
699 nskb
= veth_build_skb(head
, VETH_XDP_HEADROOM
+ mac_len
,
700 skb
->len
, PAGE_SIZE
);
702 page_frag_free(head
);
706 skb_copy_header(nskb
, skb
);
707 head_off
= skb_headroom(nskb
) - skb_headroom(skb
);
708 skb_headers_offset_update(nskb
, head_off
);
713 xdp
.data_hard_start
= skb
->head
;
714 xdp
.data
= skb_mac_header(skb
);
715 xdp
.data_end
= xdp
.data
+ pktlen
;
716 xdp
.data_meta
= xdp
.data
;
717 xdp
.rxq
= &rq
->xdp_rxq
;
719 /* SKB "head" area always have tailroom for skb_shared_info */
720 xdp
.frame_sz
= (void *)skb_end_pointer(skb
) - xdp
.data_hard_start
;
721 xdp
.frame_sz
+= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
723 orig_data
= xdp
.data
;
724 orig_data_end
= xdp
.data_end
;
726 act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
732 get_page(virt_to_page(xdp
.data
));
734 xdp
.rxq
->mem
= rq
->xdp_mem
;
735 if (unlikely(veth_xdp_tx(rq
, &xdp
, bq
) < 0)) {
736 trace_xdp_exception(rq
->dev
, xdp_prog
, act
);
744 get_page(virt_to_page(xdp
.data
));
746 xdp
.rxq
->mem
= rq
->xdp_mem
;
747 if (xdp_do_redirect(rq
->dev
, &xdp
, xdp_prog
)) {
751 stats
->xdp_redirect
++;
755 bpf_warn_invalid_xdp_action(act
);
758 trace_xdp_exception(rq
->dev
, xdp_prog
, act
);
766 /* check if bpf_xdp_adjust_head was used */
767 delta
= orig_data
- xdp
.data
;
768 off
= mac_len
+ delta
;
770 __skb_push(skb
, off
);
772 __skb_pull(skb
, -off
);
773 skb
->mac_header
-= delta
;
775 /* check if bpf_xdp_adjust_tail was used */
776 off
= xdp
.data_end
- orig_data_end
;
778 __skb_put(skb
, off
); /* positive on grow, negative on shrink */
779 skb
->protocol
= eth_type_trans(skb
, rq
->dev
);
781 metalen
= xdp
.data
- xdp
.data_meta
;
783 skb_metadata_set(skb
, metalen
);
794 page_frag_free(xdp
.data
);
799 static int veth_xdp_rcv(struct veth_rq
*rq
, int budget
,
800 struct veth_xdp_tx_bq
*bq
,
801 struct veth_stats
*stats
)
805 for (i
= 0; i
< budget
; i
++) {
806 void *ptr
= __ptr_ring_consume(&rq
->xdp_ring
);
812 if (veth_is_xdp_frame(ptr
)) {
813 struct xdp_frame
*frame
= veth_ptr_to_xdp(ptr
);
815 stats
->xdp_bytes
+= frame
->len
;
816 skb
= veth_xdp_rcv_one(rq
, frame
, bq
, stats
);
819 stats
->xdp_bytes
+= skb
->len
;
820 skb
= veth_xdp_rcv_skb(rq
, skb
, bq
, stats
);
824 napi_gro_receive(&rq
->xdp_napi
, skb
);
829 u64_stats_update_begin(&rq
->stats
.syncp
);
830 rq
->stats
.vs
.xdp_redirect
+= stats
->xdp_redirect
;
831 rq
->stats
.vs
.xdp_bytes
+= stats
->xdp_bytes
;
832 rq
->stats
.vs
.xdp_drops
+= stats
->xdp_drops
;
833 rq
->stats
.vs
.rx_drops
+= stats
->rx_drops
;
834 rq
->stats
.vs
.xdp_packets
+= done
;
835 u64_stats_update_end(&rq
->stats
.syncp
);
840 static int veth_poll(struct napi_struct
*napi
, int budget
)
843 container_of(napi
, struct veth_rq
, xdp_napi
);
844 struct veth_stats stats
= {};
845 struct veth_xdp_tx_bq bq
;
850 xdp_set_return_frame_no_direct();
851 done
= veth_xdp_rcv(rq
, budget
, &bq
, &stats
);
853 if (done
< budget
&& napi_complete_done(napi
, done
)) {
854 /* Write rx_notify_masked before reading ptr_ring */
855 smp_store_mb(rq
->rx_notify_masked
, false);
856 if (unlikely(!__ptr_ring_empty(&rq
->xdp_ring
))) {
857 rq
->rx_notify_masked
= true;
858 napi_schedule(&rq
->xdp_napi
);
862 if (stats
.xdp_tx
> 0)
863 veth_xdp_flush(rq
, &bq
);
864 if (stats
.xdp_redirect
> 0)
866 xdp_clear_return_frame_no_direct();
871 static int veth_napi_add(struct net_device
*dev
)
873 struct veth_priv
*priv
= netdev_priv(dev
);
876 for (i
= 0; i
< dev
->real_num_rx_queues
; i
++) {
877 struct veth_rq
*rq
= &priv
->rq
[i
];
879 err
= ptr_ring_init(&rq
->xdp_ring
, VETH_RING_SIZE
, GFP_KERNEL
);
884 for (i
= 0; i
< dev
->real_num_rx_queues
; i
++) {
885 struct veth_rq
*rq
= &priv
->rq
[i
];
887 napi_enable(&rq
->xdp_napi
);
892 for (i
--; i
>= 0; i
--)
893 ptr_ring_cleanup(&priv
->rq
[i
].xdp_ring
, veth_ptr_free
);
898 static void veth_napi_del(struct net_device
*dev
)
900 struct veth_priv
*priv
= netdev_priv(dev
);
903 for (i
= 0; i
< dev
->real_num_rx_queues
; i
++) {
904 struct veth_rq
*rq
= &priv
->rq
[i
];
906 napi_disable(&rq
->xdp_napi
);
907 __netif_napi_del(&rq
->xdp_napi
);
911 for (i
= 0; i
< dev
->real_num_rx_queues
; i
++) {
912 struct veth_rq
*rq
= &priv
->rq
[i
];
914 rq
->rx_notify_masked
= false;
915 ptr_ring_cleanup(&rq
->xdp_ring
, veth_ptr_free
);
919 static int veth_enable_xdp(struct net_device
*dev
)
921 struct veth_priv
*priv
= netdev_priv(dev
);
924 if (!xdp_rxq_info_is_reg(&priv
->rq
[0].xdp_rxq
)) {
925 for (i
= 0; i
< dev
->real_num_rx_queues
; i
++) {
926 struct veth_rq
*rq
= &priv
->rq
[i
];
928 netif_napi_add(dev
, &rq
->xdp_napi
, veth_poll
, NAPI_POLL_WEIGHT
);
929 err
= xdp_rxq_info_reg(&rq
->xdp_rxq
, dev
, i
, rq
->xdp_napi
.napi_id
);
933 err
= xdp_rxq_info_reg_mem_model(&rq
->xdp_rxq
,
934 MEM_TYPE_PAGE_SHARED
,
939 /* Save original mem info as it can be overwritten */
940 rq
->xdp_mem
= rq
->xdp_rxq
.mem
;
943 err
= veth_napi_add(dev
);
948 for (i
= 0; i
< dev
->real_num_rx_queues
; i
++)
949 rcu_assign_pointer(priv
->rq
[i
].xdp_prog
, priv
->_xdp_prog
);
953 xdp_rxq_info_unreg(&priv
->rq
[i
].xdp_rxq
);
955 for (i
--; i
>= 0; i
--) {
956 struct veth_rq
*rq
= &priv
->rq
[i
];
958 xdp_rxq_info_unreg(&rq
->xdp_rxq
);
959 netif_napi_del(&rq
->xdp_napi
);
965 static void veth_disable_xdp(struct net_device
*dev
)
967 struct veth_priv
*priv
= netdev_priv(dev
);
970 for (i
= 0; i
< dev
->real_num_rx_queues
; i
++)
971 rcu_assign_pointer(priv
->rq
[i
].xdp_prog
, NULL
);
973 for (i
= 0; i
< dev
->real_num_rx_queues
; i
++) {
974 struct veth_rq
*rq
= &priv
->rq
[i
];
976 rq
->xdp_rxq
.mem
= rq
->xdp_mem
;
977 xdp_rxq_info_unreg(&rq
->xdp_rxq
);
981 static int veth_open(struct net_device
*dev
)
983 struct veth_priv
*priv
= netdev_priv(dev
);
984 struct net_device
*peer
= rtnl_dereference(priv
->peer
);
990 if (priv
->_xdp_prog
) {
991 err
= veth_enable_xdp(dev
);
996 if (peer
->flags
& IFF_UP
) {
997 netif_carrier_on(dev
);
998 netif_carrier_on(peer
);
1004 static int veth_close(struct net_device
*dev
)
1006 struct veth_priv
*priv
= netdev_priv(dev
);
1007 struct net_device
*peer
= rtnl_dereference(priv
->peer
);
1009 netif_carrier_off(dev
);
1011 netif_carrier_off(peer
);
1013 if (priv
->_xdp_prog
)
1014 veth_disable_xdp(dev
);
1019 static int is_valid_veth_mtu(int mtu
)
1021 return mtu
>= ETH_MIN_MTU
&& mtu
<= ETH_MAX_MTU
;
1024 static int veth_alloc_queues(struct net_device
*dev
)
1026 struct veth_priv
*priv
= netdev_priv(dev
);
1029 priv
->rq
= kcalloc(dev
->num_rx_queues
, sizeof(*priv
->rq
), GFP_KERNEL
);
1033 for (i
= 0; i
< dev
->num_rx_queues
; i
++) {
1034 priv
->rq
[i
].dev
= dev
;
1035 u64_stats_init(&priv
->rq
[i
].stats
.syncp
);
1041 static void veth_free_queues(struct net_device
*dev
)
1043 struct veth_priv
*priv
= netdev_priv(dev
);
1048 static int veth_dev_init(struct net_device
*dev
)
1052 dev
->lstats
= netdev_alloc_pcpu_stats(struct pcpu_lstats
);
1056 err
= veth_alloc_queues(dev
);
1058 free_percpu(dev
->lstats
);
1065 static void veth_dev_free(struct net_device
*dev
)
1067 veth_free_queues(dev
);
1068 free_percpu(dev
->lstats
);
1071 #ifdef CONFIG_NET_POLL_CONTROLLER
1072 static void veth_poll_controller(struct net_device
*dev
)
1074 /* veth only receives frames when its peer sends one
1075 * Since it has nothing to do with disabling irqs, we are guaranteed
1076 * never to have pending data when we poll for it so
1077 * there is nothing to do here.
1079 * We need this though so netpoll recognizes us as an interface that
1080 * supports polling, which enables bridge devices in virt setups to
1081 * still use netconsole
1084 #endif /* CONFIG_NET_POLL_CONTROLLER */
1086 static int veth_get_iflink(const struct net_device
*dev
)
1088 struct veth_priv
*priv
= netdev_priv(dev
);
1089 struct net_device
*peer
;
1093 peer
= rcu_dereference(priv
->peer
);
1094 iflink
= peer
? peer
->ifindex
: 0;
1100 static netdev_features_t
veth_fix_features(struct net_device
*dev
,
1101 netdev_features_t features
)
1103 struct veth_priv
*priv
= netdev_priv(dev
);
1104 struct net_device
*peer
;
1106 peer
= rtnl_dereference(priv
->peer
);
1108 struct veth_priv
*peer_priv
= netdev_priv(peer
);
1110 if (peer_priv
->_xdp_prog
)
1111 features
&= ~NETIF_F_GSO_SOFTWARE
;
1117 static void veth_set_rx_headroom(struct net_device
*dev
, int new_hr
)
1119 struct veth_priv
*peer_priv
, *priv
= netdev_priv(dev
);
1120 struct net_device
*peer
;
1126 peer
= rcu_dereference(priv
->peer
);
1127 if (unlikely(!peer
))
1130 peer_priv
= netdev_priv(peer
);
1131 priv
->requested_headroom
= new_hr
;
1132 new_hr
= max(priv
->requested_headroom
, peer_priv
->requested_headroom
);
1133 dev
->needed_headroom
= new_hr
;
1134 peer
->needed_headroom
= new_hr
;
1140 static int veth_xdp_set(struct net_device
*dev
, struct bpf_prog
*prog
,
1141 struct netlink_ext_ack
*extack
)
1143 struct veth_priv
*priv
= netdev_priv(dev
);
1144 struct bpf_prog
*old_prog
;
1145 struct net_device
*peer
;
1146 unsigned int max_mtu
;
1149 old_prog
= priv
->_xdp_prog
;
1150 priv
->_xdp_prog
= prog
;
1151 peer
= rtnl_dereference(priv
->peer
);
1155 NL_SET_ERR_MSG_MOD(extack
, "Cannot set XDP when peer is detached");
1160 max_mtu
= PAGE_SIZE
- VETH_XDP_HEADROOM
-
1161 peer
->hard_header_len
-
1162 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
1163 if (peer
->mtu
> max_mtu
) {
1164 NL_SET_ERR_MSG_MOD(extack
, "Peer MTU is too large to set XDP");
1169 if (dev
->real_num_rx_queues
< peer
->real_num_tx_queues
) {
1170 NL_SET_ERR_MSG_MOD(extack
, "XDP expects number of rx queues not less than peer tx queues");
1175 if (dev
->flags
& IFF_UP
) {
1176 err
= veth_enable_xdp(dev
);
1178 NL_SET_ERR_MSG_MOD(extack
, "Setup for XDP failed");
1184 peer
->hw_features
&= ~NETIF_F_GSO_SOFTWARE
;
1185 peer
->max_mtu
= max_mtu
;
1191 if (dev
->flags
& IFF_UP
)
1192 veth_disable_xdp(dev
);
1195 peer
->hw_features
|= NETIF_F_GSO_SOFTWARE
;
1196 peer
->max_mtu
= ETH_MAX_MTU
;
1199 bpf_prog_put(old_prog
);
1202 if ((!!old_prog
^ !!prog
) && peer
)
1203 netdev_update_features(peer
);
1207 priv
->_xdp_prog
= old_prog
;
1212 static int veth_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
)
1214 switch (xdp
->command
) {
1215 case XDP_SETUP_PROG
:
1216 return veth_xdp_set(dev
, xdp
->prog
, xdp
->extack
);
1222 static const struct net_device_ops veth_netdev_ops
= {
1223 .ndo_init
= veth_dev_init
,
1224 .ndo_open
= veth_open
,
1225 .ndo_stop
= veth_close
,
1226 .ndo_start_xmit
= veth_xmit
,
1227 .ndo_get_stats64
= veth_get_stats64
,
1228 .ndo_set_rx_mode
= veth_set_multicast_list
,
1229 .ndo_set_mac_address
= eth_mac_addr
,
1230 #ifdef CONFIG_NET_POLL_CONTROLLER
1231 .ndo_poll_controller
= veth_poll_controller
,
1233 .ndo_get_iflink
= veth_get_iflink
,
1234 .ndo_fix_features
= veth_fix_features
,
1235 .ndo_features_check
= passthru_features_check
,
1236 .ndo_set_rx_headroom
= veth_set_rx_headroom
,
1237 .ndo_bpf
= veth_xdp
,
1238 .ndo_xdp_xmit
= veth_ndo_xdp_xmit
,
1239 .ndo_get_peer_dev
= veth_peer_dev
,
1242 #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
1243 NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
1244 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
1245 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
1246 NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
1248 static void veth_setup(struct net_device
*dev
)
1252 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
1253 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
1254 dev
->priv_flags
|= IFF_NO_QUEUE
;
1255 dev
->priv_flags
|= IFF_PHONY_HEADROOM
;
1257 dev
->netdev_ops
= &veth_netdev_ops
;
1258 dev
->ethtool_ops
= &veth_ethtool_ops
;
1259 dev
->features
|= NETIF_F_LLTX
;
1260 dev
->features
|= VETH_FEATURES
;
1261 dev
->vlan_features
= dev
->features
&
1262 ~(NETIF_F_HW_VLAN_CTAG_TX
|
1263 NETIF_F_HW_VLAN_STAG_TX
|
1264 NETIF_F_HW_VLAN_CTAG_RX
|
1265 NETIF_F_HW_VLAN_STAG_RX
);
1266 dev
->needs_free_netdev
= true;
1267 dev
->priv_destructor
= veth_dev_free
;
1268 dev
->max_mtu
= ETH_MAX_MTU
;
1270 dev
->hw_features
= VETH_FEATURES
;
1271 dev
->hw_enc_features
= VETH_FEATURES
;
1272 dev
->mpls_features
= NETIF_F_HW_CSUM
| NETIF_F_GSO_SOFTWARE
;
1279 static int veth_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
1280 struct netlink_ext_ack
*extack
)
1282 if (tb
[IFLA_ADDRESS
]) {
1283 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
)
1285 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
])))
1286 return -EADDRNOTAVAIL
;
1289 if (!is_valid_veth_mtu(nla_get_u32(tb
[IFLA_MTU
])))
1295 static struct rtnl_link_ops veth_link_ops
;
1297 static int veth_newlink(struct net
*src_net
, struct net_device
*dev
,
1298 struct nlattr
*tb
[], struct nlattr
*data
[],
1299 struct netlink_ext_ack
*extack
)
1302 struct net_device
*peer
;
1303 struct veth_priv
*priv
;
1304 char ifname
[IFNAMSIZ
];
1305 struct nlattr
*peer_tb
[IFLA_MAX
+ 1], **tbp
;
1306 unsigned char name_assign_type
;
1307 struct ifinfomsg
*ifmp
;
1311 * create and register peer first
1313 if (data
!= NULL
&& data
[VETH_INFO_PEER
] != NULL
) {
1314 struct nlattr
*nla_peer
;
1316 nla_peer
= data
[VETH_INFO_PEER
];
1317 ifmp
= nla_data(nla_peer
);
1318 err
= rtnl_nla_parse_ifla(peer_tb
,
1319 nla_data(nla_peer
) + sizeof(struct ifinfomsg
),
1320 nla_len(nla_peer
) - sizeof(struct ifinfomsg
),
1325 err
= veth_validate(peer_tb
, NULL
, extack
);
1335 if (ifmp
&& tbp
[IFLA_IFNAME
]) {
1336 nla_strscpy(ifname
, tbp
[IFLA_IFNAME
], IFNAMSIZ
);
1337 name_assign_type
= NET_NAME_USER
;
1339 snprintf(ifname
, IFNAMSIZ
, DRV_NAME
"%%d");
1340 name_assign_type
= NET_NAME_ENUM
;
1343 net
= rtnl_link_get_net(src_net
, tbp
);
1345 return PTR_ERR(net
);
1347 peer
= rtnl_create_link(net
, ifname
, name_assign_type
,
1348 &veth_link_ops
, tbp
, extack
);
1351 return PTR_ERR(peer
);
1354 if (!ifmp
|| !tbp
[IFLA_ADDRESS
])
1355 eth_hw_addr_random(peer
);
1357 if (ifmp
&& (dev
->ifindex
!= 0))
1358 peer
->ifindex
= ifmp
->ifi_index
;
1360 peer
->gso_max_size
= dev
->gso_max_size
;
1361 peer
->gso_max_segs
= dev
->gso_max_segs
;
1363 err
= register_netdevice(peer
);
1367 goto err_register_peer
;
1369 netif_carrier_off(peer
);
1371 err
= rtnl_configure_link(peer
, ifmp
);
1373 goto err_configure_peer
;
1378 * note, that since we've registered new device the dev's name
1379 * should be re-allocated
1382 if (tb
[IFLA_ADDRESS
] == NULL
)
1383 eth_hw_addr_random(dev
);
1385 if (tb
[IFLA_IFNAME
])
1386 nla_strscpy(dev
->name
, tb
[IFLA_IFNAME
], IFNAMSIZ
);
1388 snprintf(dev
->name
, IFNAMSIZ
, DRV_NAME
"%%d");
1390 err
= register_netdevice(dev
);
1392 goto err_register_dev
;
1394 netif_carrier_off(dev
);
1397 * tie the deviced together
1400 priv
= netdev_priv(dev
);
1401 rcu_assign_pointer(priv
->peer
, peer
);
1403 priv
= netdev_priv(peer
);
1404 rcu_assign_pointer(priv
->peer
, dev
);
1411 unregister_netdevice(peer
);
1419 static void veth_dellink(struct net_device
*dev
, struct list_head
*head
)
1421 struct veth_priv
*priv
;
1422 struct net_device
*peer
;
1424 priv
= netdev_priv(dev
);
1425 peer
= rtnl_dereference(priv
->peer
);
1427 /* Note : dellink() is called from default_device_exit_batch(),
1428 * before a rcu_synchronize() point. The devices are guaranteed
1429 * not being freed before one RCU grace period.
1431 RCU_INIT_POINTER(priv
->peer
, NULL
);
1432 unregister_netdevice_queue(dev
, head
);
1435 priv
= netdev_priv(peer
);
1436 RCU_INIT_POINTER(priv
->peer
, NULL
);
1437 unregister_netdevice_queue(peer
, head
);
1441 static const struct nla_policy veth_policy
[VETH_INFO_MAX
+ 1] = {
1442 [VETH_INFO_PEER
] = { .len
= sizeof(struct ifinfomsg
) },
1445 static struct net
*veth_get_link_net(const struct net_device
*dev
)
1447 struct veth_priv
*priv
= netdev_priv(dev
);
1448 struct net_device
*peer
= rtnl_dereference(priv
->peer
);
1450 return peer
? dev_net(peer
) : dev_net(dev
);
1453 static struct rtnl_link_ops veth_link_ops
= {
1455 .priv_size
= sizeof(struct veth_priv
),
1456 .setup
= veth_setup
,
1457 .validate
= veth_validate
,
1458 .newlink
= veth_newlink
,
1459 .dellink
= veth_dellink
,
1460 .policy
= veth_policy
,
1461 .maxtype
= VETH_INFO_MAX
,
1462 .get_link_net
= veth_get_link_net
,
1469 static __init
int veth_init(void)
1471 return rtnl_link_register(&veth_link_ops
);
1474 static __exit
void veth_exit(void)
1476 rtnl_link_unregister(&veth_link_ops
);
1479 module_init(veth_init
);
1480 module_exit(veth_exit
);
1482 MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
1483 MODULE_LICENSE("GPL v2");
1484 MODULE_ALIAS_RTNL_LINK(DRV_NAME
);