1 #include <linux/skbuff.h>
2 #include <linux/netdevice.h>
3 #include <linux/if_vlan.h>
4 #include <linux/netpoll.h>
7 /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
8 int __vlan_hwaccel_rx(struct sk_buff
*skb
, struct vlan_group
*grp
,
9 u16 vlan_tci
, int polling
)
14 if (skb_bond_should_drop(skb
, ACCESS_ONCE(skb
->dev
->master
)))
17 skb
->skb_iif
= skb
->dev
->ifindex
;
18 __vlan_hwaccel_put_tag(skb
, vlan_tci
);
19 skb
->dev
= vlan_group_get_device(grp
, vlan_tci
& VLAN_VID_MASK
);
24 return (polling
? netif_receive_skb(skb
) : netif_rx(skb
));
27 dev_kfree_skb_any(skb
);
30 EXPORT_SYMBOL(__vlan_hwaccel_rx
);
32 int vlan_hwaccel_do_receive(struct sk_buff
*skb
)
34 struct net_device
*dev
= skb
->dev
;
35 struct vlan_rx_stats
*rx_stats
;
37 skb
->dev
= vlan_dev_info(dev
)->real_dev
;
38 netif_nit_deliver(skb
);
41 skb
->priority
= vlan_get_ingress_priority(dev
, skb
->vlan_tci
);
44 rx_stats
= per_cpu_ptr(vlan_dev_info(dev
)->vlan_rx_stats
,
47 rx_stats
->rx_packets
++;
48 rx_stats
->rx_bytes
+= skb
->len
;
50 switch (skb
->pkt_type
) {
51 case PACKET_BROADCAST
:
53 case PACKET_MULTICAST
:
54 rx_stats
->multicast
++;
56 case PACKET_OTHERHOST
:
57 /* Our lower layer thinks this is not local, let's make sure.
58 * This allows the VLAN to have a different MAC than the
59 * underlying device, and still route correctly. */
60 if (!compare_ether_addr(eth_hdr(skb
)->h_dest
,
62 skb
->pkt_type
= PACKET_HOST
;
68 struct net_device
*vlan_dev_real_dev(const struct net_device
*dev
)
70 return vlan_dev_info(dev
)->real_dev
;
72 EXPORT_SYMBOL(vlan_dev_real_dev
);
74 u16
vlan_dev_vlan_id(const struct net_device
*dev
)
76 return vlan_dev_info(dev
)->vlan_id
;
78 EXPORT_SYMBOL(vlan_dev_vlan_id
);
81 vlan_gro_common(struct napi_struct
*napi
, struct vlan_group
*grp
,
82 unsigned int vlan_tci
, struct sk_buff
*skb
)
86 if (skb_bond_should_drop(skb
, ACCESS_ONCE(skb
->dev
->master
)))
89 skb
->skb_iif
= skb
->dev
->ifindex
;
90 __vlan_hwaccel_put_tag(skb
, vlan_tci
);
91 skb
->dev
= vlan_group_get_device(grp
, vlan_tci
& VLAN_VID_MASK
);
96 for (p
= napi
->gro_list
; p
; p
= p
->next
) {
97 NAPI_GRO_CB(p
)->same_flow
=
98 p
->dev
== skb
->dev
&& !compare_ether_header(
99 skb_mac_header(p
), skb_gro_mac_header(skb
));
100 NAPI_GRO_CB(p
)->flush
= 0;
103 return dev_gro_receive(napi
, skb
);
109 gro_result_t
vlan_gro_receive(struct napi_struct
*napi
, struct vlan_group
*grp
,
110 unsigned int vlan_tci
, struct sk_buff
*skb
)
112 if (netpoll_rx_on(skb
))
113 return vlan_hwaccel_receive_skb(skb
, grp
, vlan_tci
)
114 ? GRO_DROP
: GRO_NORMAL
;
116 skb_gro_reset_offset(skb
);
118 return napi_skb_finish(vlan_gro_common(napi
, grp
, vlan_tci
, skb
), skb
);
120 EXPORT_SYMBOL(vlan_gro_receive
);
122 gro_result_t
vlan_gro_frags(struct napi_struct
*napi
, struct vlan_group
*grp
,
123 unsigned int vlan_tci
)
125 struct sk_buff
*skb
= napi_frags_skb(napi
);
130 if (netpoll_rx_on(skb
)) {
131 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
132 return vlan_hwaccel_receive_skb(skb
, grp
, vlan_tci
)
133 ? GRO_DROP
: GRO_NORMAL
;
136 return napi_frags_finish(napi
, skb
,
137 vlan_gro_common(napi
, grp
, vlan_tci
, skb
));
139 EXPORT_SYMBOL(vlan_gro_frags
);