3 * Linux ethernet bridge
6 * Lennert Buytenhek <buytenh@gnu.org>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 #include <linux/err.h>
15 #include <linux/slab.h>
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/netpoll.h>
19 #include <linux/skbuff.h>
20 #include <linux/if_vlan.h>
21 #include <linux/netfilter_bridge.h>
22 #include "br_private.h"
24 static int deliver_clone(const struct net_bridge_port
*prev
,
26 void (*__packet_hook
)(const struct net_bridge_port
*p
,
27 struct sk_buff
*skb
));
29 /* Don't forward packets to originating port or forwarding disabled */
30 static inline int should_deliver(const struct net_bridge_port
*p
,
31 const struct sk_buff
*skb
)
33 struct net_bridge_vlan_group
*vg
;
35 vg
= nbp_vlan_group_rcu(p
);
36 return ((p
->flags
& BR_HAIRPIN_MODE
) || skb
->dev
!= p
->dev
) &&
37 br_allowed_egress(vg
, skb
) && p
->state
== BR_STATE_FORWARDING
;
40 int br_dev_queue_push_xmit(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
42 if (!is_skb_forwardable(skb
->dev
, skb
))
45 skb_push(skb
, ETH_HLEN
);
46 br_drop_fake_rtable(skb
);
48 if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
49 (skb
->protocol
== htons(ETH_P_8021Q
) ||
50 skb
->protocol
== htons(ETH_P_8021AD
))) {
53 if (!__vlan_get_protocol(skb
, skb
->protocol
, &depth
))
56 skb_set_network_header(skb
, depth
);
67 EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit
);
69 int br_forward_finish(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
71 return NF_HOOK(NFPROTO_BRIDGE
, NF_BR_POST_ROUTING
,
72 net
, sk
, skb
, NULL
, skb
->dev
,
73 br_dev_queue_push_xmit
);
76 EXPORT_SYMBOL_GPL(br_forward_finish
);
78 static void __br_deliver(const struct net_bridge_port
*to
, struct sk_buff
*skb
)
80 struct net_bridge_vlan_group
*vg
;
82 vg
= nbp_vlan_group_rcu(to
);
83 skb
= br_handle_vlan(to
->br
, vg
, skb
);
89 if (unlikely(netpoll_tx_running(to
->br
->dev
))) {
90 if (!is_skb_forwardable(skb
->dev
, skb
))
93 skb_push(skb
, ETH_HLEN
);
94 br_netpoll_send_skb(to
, skb
);
99 NF_HOOK(NFPROTO_BRIDGE
, NF_BR_LOCAL_OUT
,
100 dev_net(skb
->dev
), NULL
, skb
,NULL
, skb
->dev
,
104 static void __br_forward(const struct net_bridge_port
*to
, struct sk_buff
*skb
)
106 struct net_bridge_vlan_group
*vg
;
107 struct net_device
*indev
;
109 if (skb_warn_if_lro(skb
)) {
114 vg
= nbp_vlan_group_rcu(to
);
115 skb
= br_handle_vlan(to
->br
, vg
, skb
);
121 skb_forward_csum(skb
);
123 NF_HOOK(NFPROTO_BRIDGE
, NF_BR_FORWARD
,
124 dev_net(indev
), NULL
, skb
, indev
, skb
->dev
,
128 /* called with rcu_read_lock */
129 void br_deliver(const struct net_bridge_port
*to
, struct sk_buff
*skb
)
131 if (to
&& should_deliver(to
, skb
)) {
132 __br_deliver(to
, skb
);
138 EXPORT_SYMBOL_GPL(br_deliver
);
140 /* called with rcu_read_lock */
141 void br_forward(const struct net_bridge_port
*to
, struct sk_buff
*skb
, struct sk_buff
*skb0
)
143 if (to
&& should_deliver(to
, skb
)) {
145 deliver_clone(to
, skb
, __br_forward
);
147 __br_forward(to
, skb
);
155 static int deliver_clone(const struct net_bridge_port
*prev
,
157 void (*__packet_hook
)(const struct net_bridge_port
*p
,
158 struct sk_buff
*skb
))
160 struct net_device
*dev
= BR_INPUT_SKB_CB(skb
)->brdev
;
162 skb
= skb_clone(skb
, GFP_ATOMIC
);
164 dev
->stats
.tx_dropped
++;
168 __packet_hook(prev
, skb
);
172 static struct net_bridge_port
*maybe_deliver(
173 struct net_bridge_port
*prev
, struct net_bridge_port
*p
,
175 void (*__packet_hook
)(const struct net_bridge_port
*p
,
176 struct sk_buff
*skb
))
180 if (!should_deliver(p
, skb
))
186 err
= deliver_clone(prev
, skb
, __packet_hook
);
194 /* called under bridge lock */
195 static void br_flood(struct net_bridge
*br
, struct sk_buff
*skb
,
196 struct sk_buff
*skb0
,
197 void (*__packet_hook
)(const struct net_bridge_port
*p
,
198 struct sk_buff
*skb
),
201 struct net_bridge_port
*p
;
202 struct net_bridge_port
*prev
;
206 list_for_each_entry_rcu(p
, &br
->port_list
, list
) {
207 /* Do not flood unicast traffic to ports that turn it off */
208 if (unicast
&& !(p
->flags
& BR_FLOOD
))
211 /* Do not flood to ports that enable proxy ARP */
212 if (p
->flags
& BR_PROXYARP
)
214 if ((p
->flags
& BR_PROXYARP_WIFI
) &&
215 BR_INPUT_SKB_CB(skb
)->proxyarp_replied
)
218 prev
= maybe_deliver(prev
, p
, skb
, __packet_hook
);
227 deliver_clone(prev
, skb
, __packet_hook
);
229 __packet_hook(prev
, skb
);
238 /* called with rcu_read_lock */
239 void br_flood_deliver(struct net_bridge
*br
, struct sk_buff
*skb
, bool unicast
)
241 br_flood(br
, skb
, NULL
, __br_deliver
, unicast
);
244 /* called under bridge lock */
245 void br_flood_forward(struct net_bridge
*br
, struct sk_buff
*skb
,
246 struct sk_buff
*skb2
, bool unicast
)
248 br_flood(br
, skb
, skb2
, __br_forward
, unicast
);
251 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
252 /* called with rcu_read_lock */
253 static void br_multicast_flood(struct net_bridge_mdb_entry
*mdst
,
254 struct sk_buff
*skb
, struct sk_buff
*skb0
,
255 void (*__packet_hook
)(
256 const struct net_bridge_port
*p
,
257 struct sk_buff
*skb
))
259 struct net_device
*dev
= BR_INPUT_SKB_CB(skb
)->brdev
;
260 struct net_bridge
*br
= netdev_priv(dev
);
261 struct net_bridge_port
*prev
= NULL
;
262 struct net_bridge_port_group
*p
;
263 struct hlist_node
*rp
;
265 rp
= rcu_dereference(hlist_first_rcu(&br
->router_list
));
266 p
= mdst
? rcu_dereference(mdst
->ports
) : NULL
;
268 struct net_bridge_port
*port
, *lport
, *rport
;
270 lport
= p
? p
->port
: NULL
;
271 rport
= rp
? hlist_entry(rp
, struct net_bridge_port
, rlist
) :
274 port
= (unsigned long)lport
> (unsigned long)rport
?
277 prev
= maybe_deliver(prev
, port
, skb
, __packet_hook
);
281 if ((unsigned long)lport
>= (unsigned long)port
)
282 p
= rcu_dereference(p
->next
);
283 if ((unsigned long)rport
>= (unsigned long)port
)
284 rp
= rcu_dereference(hlist_next_rcu(rp
));
291 deliver_clone(prev
, skb
, __packet_hook
);
293 __packet_hook(prev
, skb
);
301 /* called with rcu_read_lock */
302 void br_multicast_deliver(struct net_bridge_mdb_entry
*mdst
,
305 br_multicast_flood(mdst
, skb
, NULL
, __br_deliver
);
308 /* called with rcu_read_lock */
309 void br_multicast_forward(struct net_bridge_mdb_entry
*mdst
,
310 struct sk_buff
*skb
, struct sk_buff
*skb2
)
312 br_multicast_flood(mdst
, skb
, skb2
, __br_forward
);