1 #include <linux/skbuff.h>
2 #include <linux/netdevice.h>
3 #include <linux/if_vlan.h>
4 #include <linux/netpoll.h>
7 /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
8 int __vlan_hwaccel_rx(struct sk_buff
*skb
, struct vlan_group
*grp
,
9 u16 vlan_tci
, int polling
)
14 if (skb_bond_should_drop(skb
))
17 skb
->vlan_tci
= vlan_tci
;
18 skb
->dev
= vlan_group_get_device(grp
, vlan_tci
& VLAN_VID_MASK
);
23 return (polling
? netif_receive_skb(skb
) : netif_rx(skb
));
26 dev_kfree_skb_any(skb
);
29 EXPORT_SYMBOL(__vlan_hwaccel_rx
);
31 int vlan_hwaccel_do_receive(struct sk_buff
*skb
)
33 struct net_device
*dev
= skb
->dev
;
34 struct net_device_stats
*stats
;
36 skb
->dev
= vlan_dev_info(dev
)->real_dev
;
37 netif_nit_deliver(skb
);
40 skb
->priority
= vlan_get_ingress_priority(dev
, skb
->vlan_tci
);
45 stats
->rx_bytes
+= skb
->len
;
47 switch (skb
->pkt_type
) {
48 case PACKET_BROADCAST
:
50 case PACKET_MULTICAST
:
53 case PACKET_OTHERHOST
:
54 /* Our lower layer thinks this is not local, let's make sure.
55 * This allows the VLAN to have a different MAC than the
56 * underlying device, and still route correctly. */
57 if (!compare_ether_addr(eth_hdr(skb
)->h_dest
,
59 skb
->pkt_type
= PACKET_HOST
;
65 struct net_device
*vlan_dev_real_dev(const struct net_device
*dev
)
67 return vlan_dev_info(dev
)->real_dev
;
69 EXPORT_SYMBOL(vlan_dev_real_dev
);
71 u16
vlan_dev_vlan_id(const struct net_device
*dev
)
73 return vlan_dev_info(dev
)->vlan_id
;
75 EXPORT_SYMBOL(vlan_dev_vlan_id
);
77 static int vlan_gro_common(struct napi_struct
*napi
, struct vlan_group
*grp
,
78 unsigned int vlan_tci
, struct sk_buff
*skb
)
82 if (skb_bond_should_drop(skb
))
85 skb
->vlan_tci
= vlan_tci
;
86 skb
->dev
= vlan_group_get_device(grp
, vlan_tci
& VLAN_VID_MASK
);
91 for (p
= napi
->gro_list
; p
; p
= p
->next
) {
92 NAPI_GRO_CB(p
)->same_flow
=
93 p
->dev
== skb
->dev
&& !compare_ether_header(
94 skb_mac_header(p
), skb_gro_mac_header(skb
));
95 NAPI_GRO_CB(p
)->flush
= 0;
98 return dev_gro_receive(napi
, skb
);
104 int vlan_gro_receive(struct napi_struct
*napi
, struct vlan_group
*grp
,
105 unsigned int vlan_tci
, struct sk_buff
*skb
)
107 if (netpoll_rx_on(skb
))
108 return vlan_hwaccel_receive_skb(skb
, grp
, vlan_tci
);
110 skb_gro_reset_offset(skb
);
112 return napi_skb_finish(vlan_gro_common(napi
, grp
, vlan_tci
, skb
), skb
);
114 EXPORT_SYMBOL(vlan_gro_receive
);
116 int vlan_gro_frags(struct napi_struct
*napi
, struct vlan_group
*grp
,
117 unsigned int vlan_tci
, struct napi_gro_fraginfo
*info
)
119 struct sk_buff
*skb
= napi_fraginfo_skb(napi
, info
);
124 if (netpoll_rx_on(skb
))
125 return vlan_hwaccel_receive_skb(skb
, grp
, vlan_tci
);
127 return napi_frags_finish(napi
, skb
,
128 vlan_gro_common(napi
, grp
, vlan_tci
, skb
));
130 EXPORT_SYMBOL(vlan_gro_frags
);