3 * Linux ethernet bridge
6 * Lennert Buytenhek <buytenh@gnu.org>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 #include <linux/err.h>
15 #include <linux/slab.h>
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/netpoll.h>
19 #include <linux/skbuff.h>
20 #include <linux/if_vlan.h>
21 #include <linux/netfilter_bridge.h>
22 #include "br_private.h"
24 static int deliver_clone(const struct net_bridge_port
*prev
,
26 void (*__packet_hook
)(const struct net_bridge_port
*p
,
27 struct sk_buff
*skb
));
29 /* Don't forward packets to originating port or forwarding diasabled */
30 static inline int should_deliver(const struct net_bridge_port
*p
,
31 const struct sk_buff
*skb
)
33 return (((p
->flags
& BR_HAIRPIN_MODE
) || skb
->dev
!= p
->dev
) &&
34 p
->state
== BR_STATE_FORWARDING
);
37 static inline unsigned packet_length(const struct sk_buff
*skb
)
39 return skb
->len
- (skb
->protocol
== htons(ETH_P_8021Q
) ? VLAN_HLEN
: 0);
42 int br_dev_queue_push_xmit(struct sk_buff
*skb
)
44 /* drop mtu oversized packets except gso */
45 if (packet_length(skb
) > skb
->dev
->mtu
&& !skb_is_gso(skb
))
48 /* ip_fragment doesn't copy the MAC header */
49 if (nf_bridge_maybe_copy_header(skb
))
52 skb_push(skb
, ETH_HLEN
);
54 #ifdef CONFIG_NET_POLL_CONTROLLER
55 if (unlikely(skb
->dev
->priv_flags
& IFF_IN_NETPOLL
)) {
56 netpoll_send_skb(skb
->dev
->npinfo
->netpoll
, skb
);
57 skb
->dev
->priv_flags
&= ~IFF_IN_NETPOLL
;
67 int br_forward_finish(struct sk_buff
*skb
)
69 return NF_HOOK(NFPROTO_BRIDGE
, NF_BR_POST_ROUTING
, skb
, NULL
, skb
->dev
,
70 br_dev_queue_push_xmit
);
74 static void __br_deliver(const struct net_bridge_port
*to
, struct sk_buff
*skb
)
76 #ifdef CONFIG_NET_POLL_CONTROLLER
77 struct net_bridge
*br
= to
->br
;
78 if (unlikely(br
->dev
->priv_flags
& IFF_IN_NETPOLL
)) {
80 to
->dev
->npinfo
= skb
->dev
->npinfo
;
81 np
= skb
->dev
->npinfo
->netpoll
;
82 np
->real_dev
= np
->dev
= to
->dev
;
83 to
->dev
->priv_flags
|= IFF_IN_NETPOLL
;
87 NF_HOOK(NFPROTO_BRIDGE
, NF_BR_LOCAL_OUT
, skb
, NULL
, skb
->dev
,
89 #ifdef CONFIG_NET_POLL_CONTROLLER
91 skb
->dev
->npinfo
->netpoll
->dev
= br
->dev
;
95 static void __br_forward(const struct net_bridge_port
*to
, struct sk_buff
*skb
)
97 struct net_device
*indev
;
99 if (skb_warn_if_lro(skb
)) {
106 skb_forward_csum(skb
);
108 NF_HOOK(NFPROTO_BRIDGE
, NF_BR_FORWARD
, skb
, indev
, skb
->dev
,
112 /* called with rcu_read_lock */
113 void br_deliver(const struct net_bridge_port
*to
, struct sk_buff
*skb
)
115 if (should_deliver(to
, skb
)) {
116 __br_deliver(to
, skb
);
123 /* called with rcu_read_lock */
124 void br_forward(const struct net_bridge_port
*to
, struct sk_buff
*skb
, struct sk_buff
*skb0
)
126 if (should_deliver(to
, skb
)) {
128 deliver_clone(to
, skb
, __br_forward
);
130 __br_forward(to
, skb
);
138 static int deliver_clone(const struct net_bridge_port
*prev
,
140 void (*__packet_hook
)(const struct net_bridge_port
*p
,
141 struct sk_buff
*skb
))
143 struct net_device
*dev
= BR_INPUT_SKB_CB(skb
)->brdev
;
145 skb
= skb_clone(skb
, GFP_ATOMIC
);
147 dev
->stats
.tx_dropped
++;
151 __packet_hook(prev
, skb
);
155 static struct net_bridge_port
*maybe_deliver(
156 struct net_bridge_port
*prev
, struct net_bridge_port
*p
,
158 void (*__packet_hook
)(const struct net_bridge_port
*p
,
159 struct sk_buff
*skb
))
163 if (!should_deliver(p
, skb
))
169 err
= deliver_clone(prev
, skb
, __packet_hook
);
177 /* called under bridge lock */
178 static void br_flood(struct net_bridge
*br
, struct sk_buff
*skb
,
179 struct sk_buff
*skb0
,
180 void (*__packet_hook
)(const struct net_bridge_port
*p
,
181 struct sk_buff
*skb
))
183 struct net_bridge_port
*p
;
184 struct net_bridge_port
*prev
;
188 list_for_each_entry_rcu(p
, &br
->port_list
, list
) {
189 prev
= maybe_deliver(prev
, p
, skb
, __packet_hook
);
198 deliver_clone(prev
, skb
, __packet_hook
);
200 __packet_hook(prev
, skb
);
209 /* called with rcu_read_lock */
210 void br_flood_deliver(struct net_bridge
*br
, struct sk_buff
*skb
)
212 br_flood(br
, skb
, NULL
, __br_deliver
);
215 /* called under bridge lock */
216 void br_flood_forward(struct net_bridge
*br
, struct sk_buff
*skb
,
217 struct sk_buff
*skb2
)
219 br_flood(br
, skb
, skb2
, __br_forward
);
222 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
223 /* called with rcu_read_lock */
224 static void br_multicast_flood(struct net_bridge_mdb_entry
*mdst
,
225 struct sk_buff
*skb
, struct sk_buff
*skb0
,
226 void (*__packet_hook
)(
227 const struct net_bridge_port
*p
,
228 struct sk_buff
*skb
))
230 struct net_device
*dev
= BR_INPUT_SKB_CB(skb
)->brdev
;
231 struct net_bridge
*br
= netdev_priv(dev
);
232 struct net_bridge_port
*prev
= NULL
;
233 struct net_bridge_port_group
*p
;
234 struct hlist_node
*rp
;
236 rp
= rcu_dereference(br
->router_list
.first
);
237 p
= mdst
? rcu_dereference(mdst
->ports
) : NULL
;
239 struct net_bridge_port
*port
, *lport
, *rport
;
241 lport
= p
? p
->port
: NULL
;
242 rport
= rp
? hlist_entry(rp
, struct net_bridge_port
, rlist
) :
245 port
= (unsigned long)lport
> (unsigned long)rport
?
248 prev
= maybe_deliver(prev
, port
, skb
, __packet_hook
);
252 if ((unsigned long)lport
>= (unsigned long)port
)
253 p
= rcu_dereference(p
->next
);
254 if ((unsigned long)rport
>= (unsigned long)port
)
255 rp
= rcu_dereference(rp
->next
);
262 deliver_clone(prev
, skb
, __packet_hook
);
264 __packet_hook(prev
, skb
);
272 /* called with rcu_read_lock */
273 void br_multicast_deliver(struct net_bridge_mdb_entry
*mdst
,
276 br_multicast_flood(mdst
, skb
, NULL
, __br_deliver
);
279 /* called with rcu_read_lock */
280 void br_multicast_forward(struct net_bridge_mdb_entry
*mdst
,
281 struct sk_buff
*skb
, struct sk_buff
*skb2
)
283 br_multicast_flood(mdst
, skb
, skb2
, __br_forward
);