3 * Linux ethernet bridge
6 * Lennert Buytenhek <buytenh@gnu.org>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 #include <linux/err.h>
15 #include <linux/slab.h>
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/netpoll.h>
19 #include <linux/skbuff.h>
20 #include <linux/if_vlan.h>
21 #include <linux/netfilter_bridge.h>
22 #include "br_private.h"
24 static int deliver_clone(const struct net_bridge_port
*prev
,
26 void (*__packet_hook
)(const struct net_bridge_port
*p
,
27 struct sk_buff
*skb
));
29 /* Don't forward packets to originating port or forwarding disabled */
30 static inline int should_deliver(const struct net_bridge_port
*p
,
31 const struct sk_buff
*skb
)
33 struct net_bridge_vlan_group
*vg
;
35 vg
= nbp_vlan_group_rcu(p
);
36 return ((p
->flags
& BR_HAIRPIN_MODE
) || skb
->dev
!= p
->dev
) &&
37 br_allowed_egress(vg
, skb
) && p
->state
== BR_STATE_FORWARDING
;
40 int br_dev_queue_push_xmit(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
42 if (!is_skb_forwardable(skb
->dev
, skb
))
45 skb_push(skb
, ETH_HLEN
);
46 br_drop_fake_rtable(skb
);
47 skb_sender_cpu_clear(skb
);
49 if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
50 (skb
->protocol
== htons(ETH_P_8021Q
) ||
51 skb
->protocol
== htons(ETH_P_8021AD
))) {
54 if (!__vlan_get_protocol(skb
, skb
->protocol
, &depth
))
57 skb_set_network_header(skb
, depth
);
68 EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit
);
70 int br_forward_finish(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
72 return NF_HOOK(NFPROTO_BRIDGE
, NF_BR_POST_ROUTING
,
73 net
, sk
, skb
, NULL
, skb
->dev
,
74 br_dev_queue_push_xmit
);
77 EXPORT_SYMBOL_GPL(br_forward_finish
);
79 static void __br_deliver(const struct net_bridge_port
*to
, struct sk_buff
*skb
)
81 struct net_bridge_vlan_group
*vg
;
83 vg
= nbp_vlan_group_rcu(to
);
84 skb
= br_handle_vlan(to
->br
, vg
, skb
);
90 if (unlikely(netpoll_tx_running(to
->br
->dev
))) {
91 if (!is_skb_forwardable(skb
->dev
, skb
))
94 skb_push(skb
, ETH_HLEN
);
95 br_netpoll_send_skb(to
, skb
);
100 NF_HOOK(NFPROTO_BRIDGE
, NF_BR_LOCAL_OUT
,
101 dev_net(skb
->dev
), NULL
, skb
,NULL
, skb
->dev
,
105 static void __br_forward(const struct net_bridge_port
*to
, struct sk_buff
*skb
)
107 struct net_bridge_vlan_group
*vg
;
108 struct net_device
*indev
;
110 if (skb_warn_if_lro(skb
)) {
115 vg
= nbp_vlan_group_rcu(to
);
116 skb
= br_handle_vlan(to
->br
, vg
, skb
);
122 skb_forward_csum(skb
);
124 NF_HOOK(NFPROTO_BRIDGE
, NF_BR_FORWARD
,
125 dev_net(indev
), NULL
, skb
, indev
, skb
->dev
,
129 /* called with rcu_read_lock */
130 void br_deliver(const struct net_bridge_port
*to
, struct sk_buff
*skb
)
132 if (to
&& should_deliver(to
, skb
)) {
133 __br_deliver(to
, skb
);
139 EXPORT_SYMBOL_GPL(br_deliver
);
141 /* called with rcu_read_lock */
142 void br_forward(const struct net_bridge_port
*to
, struct sk_buff
*skb
, struct sk_buff
*skb0
)
144 if (to
&& should_deliver(to
, skb
)) {
146 deliver_clone(to
, skb
, __br_forward
);
148 __br_forward(to
, skb
);
156 static int deliver_clone(const struct net_bridge_port
*prev
,
158 void (*__packet_hook
)(const struct net_bridge_port
*p
,
159 struct sk_buff
*skb
))
161 struct net_device
*dev
= BR_INPUT_SKB_CB(skb
)->brdev
;
163 skb
= skb_clone(skb
, GFP_ATOMIC
);
165 dev
->stats
.tx_dropped
++;
169 __packet_hook(prev
, skb
);
173 static struct net_bridge_port
*maybe_deliver(
174 struct net_bridge_port
*prev
, struct net_bridge_port
*p
,
176 void (*__packet_hook
)(const struct net_bridge_port
*p
,
177 struct sk_buff
*skb
))
181 if (!should_deliver(p
, skb
))
187 err
= deliver_clone(prev
, skb
, __packet_hook
);
195 /* called under bridge lock */
196 static void br_flood(struct net_bridge
*br
, struct sk_buff
*skb
,
197 struct sk_buff
*skb0
,
198 void (*__packet_hook
)(const struct net_bridge_port
*p
,
199 struct sk_buff
*skb
),
202 struct net_bridge_port
*p
;
203 struct net_bridge_port
*prev
;
207 list_for_each_entry_rcu(p
, &br
->port_list
, list
) {
208 /* Do not flood unicast traffic to ports that turn it off */
209 if (unicast
&& !(p
->flags
& BR_FLOOD
))
212 /* Do not flood to ports that enable proxy ARP */
213 if (p
->flags
& BR_PROXYARP
)
215 if ((p
->flags
& BR_PROXYARP_WIFI
) &&
216 BR_INPUT_SKB_CB(skb
)->proxyarp_replied
)
219 prev
= maybe_deliver(prev
, p
, skb
, __packet_hook
);
228 deliver_clone(prev
, skb
, __packet_hook
);
230 __packet_hook(prev
, skb
);
239 /* called with rcu_read_lock */
240 void br_flood_deliver(struct net_bridge
*br
, struct sk_buff
*skb
, bool unicast
)
242 br_flood(br
, skb
, NULL
, __br_deliver
, unicast
);
245 /* called under bridge lock */
246 void br_flood_forward(struct net_bridge
*br
, struct sk_buff
*skb
,
247 struct sk_buff
*skb2
, bool unicast
)
249 br_flood(br
, skb
, skb2
, __br_forward
, unicast
);
252 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
253 /* called with rcu_read_lock */
254 static void br_multicast_flood(struct net_bridge_mdb_entry
*mdst
,
255 struct sk_buff
*skb
, struct sk_buff
*skb0
,
256 void (*__packet_hook
)(
257 const struct net_bridge_port
*p
,
258 struct sk_buff
*skb
))
260 struct net_device
*dev
= BR_INPUT_SKB_CB(skb
)->brdev
;
261 struct net_bridge
*br
= netdev_priv(dev
);
262 struct net_bridge_port
*prev
= NULL
;
263 struct net_bridge_port_group
*p
;
264 struct hlist_node
*rp
;
266 rp
= rcu_dereference(hlist_first_rcu(&br
->router_list
));
267 p
= mdst
? rcu_dereference(mdst
->ports
) : NULL
;
269 struct net_bridge_port
*port
, *lport
, *rport
;
271 lport
= p
? p
->port
: NULL
;
272 rport
= rp
? hlist_entry(rp
, struct net_bridge_port
, rlist
) :
275 port
= (unsigned long)lport
> (unsigned long)rport
?
278 prev
= maybe_deliver(prev
, port
, skb
, __packet_hook
);
282 if ((unsigned long)lport
>= (unsigned long)port
)
283 p
= rcu_dereference(p
->next
);
284 if ((unsigned long)rport
>= (unsigned long)port
)
285 rp
= rcu_dereference(hlist_next_rcu(rp
));
292 deliver_clone(prev
, skb
, __packet_hook
);
294 __packet_hook(prev
, skb
);
302 /* called with rcu_read_lock */
303 void br_multicast_deliver(struct net_bridge_mdb_entry
*mdst
,
306 br_multicast_flood(mdst
, skb
, NULL
, __br_deliver
);
309 /* called with rcu_read_lock */
310 void br_multicast_forward(struct net_bridge_mdb_entry
*mdst
,
311 struct sk_buff
*skb
, struct sk_buff
*skb2
)
313 br_multicast_flood(mdst
, skb
, skb2
, __br_forward
);