3 * Linux ethernet bridge
6 * Lennert Buytenhek <buytenh@gnu.org>
7 * Bart De Schuymer <bdschuym@pandora.be>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
14 * Lennert dedicates this file to Kerstin Wurdinger.
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <linux/if_pppox.h>
27 #include <linux/ppp_defs.h>
28 #include <linux/netfilter_bridge.h>
29 #include <linux/netfilter_ipv4.h>
30 #include <linux/netfilter_ipv6.h>
31 #include <linux/netfilter_arp.h>
32 #include <linux/in_route.h>
33 #include <linux/inetdevice.h>
37 #include <net/route.h>
39 #include <asm/uaccess.h>
40 #include "br_private.h"
42 #include <linux/sysctl.h>
45 #define skb_origaddr(skb) (((struct bridge_skb_cb *) \
46 (skb->nf_bridge->data))->daddr.ipv4)
47 #define store_orig_dstaddr(skb) (skb_origaddr(skb) = ip_hdr(skb)->daddr)
48 #define dnat_took_place(skb) (skb_origaddr(skb) != ip_hdr(skb)->daddr)
51 static struct ctl_table_header
*brnf_sysctl_header
;
52 static int brnf_call_iptables __read_mostly
= 1;
53 static int brnf_call_ip6tables __read_mostly
= 1;
54 static int brnf_call_arptables __read_mostly
= 1;
55 static int brnf_filter_vlan_tagged __read_mostly
= 0;
56 static int brnf_filter_pppoe_tagged __read_mostly
= 0;
58 #define brnf_call_iptables 1
59 #define brnf_call_ip6tables 1
60 #define brnf_call_arptables 1
61 #define brnf_filter_vlan_tagged 0
62 #define brnf_filter_pppoe_tagged 0
65 static inline __be16
vlan_proto(const struct sk_buff
*skb
)
67 if (vlan_tx_tag_present(skb
))
69 else if (skb
->protocol
== htons(ETH_P_8021Q
))
70 return vlan_eth_hdr(skb
)->h_vlan_encapsulated_proto
;
75 #define IS_VLAN_IP(skb) \
76 (vlan_proto(skb) == htons(ETH_P_IP) && \
77 brnf_filter_vlan_tagged)
79 #define IS_VLAN_IPV6(skb) \
80 (vlan_proto(skb) == htons(ETH_P_IPV6) && \
81 brnf_filter_vlan_tagged)
83 #define IS_VLAN_ARP(skb) \
84 (vlan_proto(skb) == htons(ETH_P_ARP) && \
85 brnf_filter_vlan_tagged)
87 static inline __be16
pppoe_proto(const struct sk_buff
*skb
)
89 return *((__be16
*)(skb_mac_header(skb
) + ETH_HLEN
+
90 sizeof(struct pppoe_hdr
)));
93 #define IS_PPPOE_IP(skb) \
94 (skb->protocol == htons(ETH_P_PPP_SES) && \
95 pppoe_proto(skb) == htons(PPP_IP) && \
96 brnf_filter_pppoe_tagged)
98 #define IS_PPPOE_IPV6(skb) \
99 (skb->protocol == htons(ETH_P_PPP_SES) && \
100 pppoe_proto(skb) == htons(PPP_IPV6) && \
101 brnf_filter_pppoe_tagged)
103 static void fake_update_pmtu(struct dst_entry
*dst
, u32 mtu
)
107 static struct dst_ops fake_dst_ops
= {
109 .protocol
= cpu_to_be16(ETH_P_IP
),
110 .update_pmtu
= fake_update_pmtu
,
114 * Initialize bogus route table used to keep netfilter happy.
115 * Currently, we fill in the PMTU entry because netfilter
116 * refragmentation needs it, and the rt_flags entry because
117 * ipt_REJECT needs it. Future netfilter modules might
118 * require us to fill additional fields.
120 void br_netfilter_rtable_init(struct net_bridge
*br
)
122 struct rtable
*rt
= &br
->fake_rtable
;
124 atomic_set(&rt
->dst
.__refcnt
, 1);
125 rt
->dst
.dev
= br
->dev
;
126 rt
->dst
.path
= &rt
->dst
;
127 dst_metric_set(&rt
->dst
, RTAX_MTU
, 1500);
128 rt
->dst
.flags
= DST_NOXFRM
;
129 rt
->dst
.ops
= &fake_dst_ops
;
132 static inline struct rtable
*bridge_parent_rtable(const struct net_device
*dev
)
134 struct net_bridge_port
*port
;
136 port
= br_port_get_rcu(dev
);
137 return port
? &port
->br
->fake_rtable
: NULL
;
140 static inline struct net_device
*bridge_parent(const struct net_device
*dev
)
142 struct net_bridge_port
*port
;
144 port
= br_port_get_rcu(dev
);
145 return port
? port
->br
->dev
: NULL
;
148 static inline struct nf_bridge_info
*nf_bridge_alloc(struct sk_buff
*skb
)
150 skb
->nf_bridge
= kzalloc(sizeof(struct nf_bridge_info
), GFP_ATOMIC
);
151 if (likely(skb
->nf_bridge
))
152 atomic_set(&(skb
->nf_bridge
->use
), 1);
154 return skb
->nf_bridge
;
157 static inline struct nf_bridge_info
*nf_bridge_unshare(struct sk_buff
*skb
)
159 struct nf_bridge_info
*nf_bridge
= skb
->nf_bridge
;
161 if (atomic_read(&nf_bridge
->use
) > 1) {
162 struct nf_bridge_info
*tmp
= nf_bridge_alloc(skb
);
165 memcpy(tmp
, nf_bridge
, sizeof(struct nf_bridge_info
));
166 atomic_set(&tmp
->use
, 1);
168 nf_bridge_put(nf_bridge
);
174 static inline void nf_bridge_push_encap_header(struct sk_buff
*skb
)
176 unsigned int len
= nf_bridge_encap_header_len(skb
);
179 skb
->network_header
-= len
;
182 static inline void nf_bridge_pull_encap_header(struct sk_buff
*skb
)
184 unsigned int len
= nf_bridge_encap_header_len(skb
);
187 skb
->network_header
+= len
;
190 static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff
*skb
)
192 unsigned int len
= nf_bridge_encap_header_len(skb
);
194 skb_pull_rcsum(skb
, len
);
195 skb
->network_header
+= len
;
198 static inline void nf_bridge_save_header(struct sk_buff
*skb
)
200 int header_size
= ETH_HLEN
+ nf_bridge_encap_header_len(skb
);
202 skb_copy_from_linear_data_offset(skb
, -header_size
,
203 skb
->nf_bridge
->data
, header_size
);
206 static inline void nf_bridge_update_protocol(struct sk_buff
*skb
)
208 if (skb
->nf_bridge
->mask
& BRNF_8021Q
)
209 skb
->protocol
= htons(ETH_P_8021Q
);
210 else if (skb
->nf_bridge
->mask
& BRNF_PPPoE
)
211 skb
->protocol
= htons(ETH_P_PPP_SES
);
214 /* When handing a packet over to the IP layer
215 * check whether we have a skb that is in the
219 static int br_parse_ip_options(struct sk_buff
*skb
)
221 struct ip_options
*opt
;
223 struct net_device
*dev
= skb
->dev
;
227 opt
= &(IPCB(skb
)->opt
);
229 /* Basic sanity checks */
230 if (iph
->ihl
< 5 || iph
->version
!= 4)
233 if (!pskb_may_pull(skb
, iph
->ihl
*4))
237 if (unlikely(ip_fast_csum((u8
*)iph
, iph
->ihl
)))
240 len
= ntohs(iph
->tot_len
);
241 if (skb
->len
< len
) {
242 IP_INC_STATS_BH(dev_net(dev
), IPSTATS_MIB_INTRUNCATEDPKTS
);
244 } else if (len
< (iph
->ihl
*4))
247 if (pskb_trim_rcsum(skb
, len
)) {
248 IP_INC_STATS_BH(dev_net(dev
), IPSTATS_MIB_INDISCARDS
);
252 /* Zero out the CB buffer if no options present */
254 memset(IPCB(skb
), 0, sizeof(struct inet_skb_parm
));
258 opt
->optlen
= iph
->ihl
*4 - sizeof(struct iphdr
);
259 if (ip_options_compile(dev_net(dev
), opt
, skb
))
262 /* Check correct handling of SRR option */
263 if (unlikely(opt
->srr
)) {
264 struct in_device
*in_dev
= __in_dev_get_rcu(dev
);
265 if (in_dev
&& !IN_DEV_SOURCE_ROUTE(in_dev
))
268 if (ip_options_rcv_srr(skb
))
275 IP_INC_STATS_BH(dev_net(dev
), IPSTATS_MIB_INHDRERRORS
);
280 /* Fill in the header for fragmented IP packets handled by
281 * the IPv4 connection tracking code.
283 int nf_bridge_copy_header(struct sk_buff
*skb
)
286 unsigned int header_size
;
288 nf_bridge_update_protocol(skb
);
289 header_size
= ETH_HLEN
+ nf_bridge_encap_header_len(skb
);
290 err
= skb_cow_head(skb
, header_size
);
294 skb_copy_to_linear_data_offset(skb
, -header_size
,
295 skb
->nf_bridge
->data
, header_size
);
296 __skb_push(skb
, nf_bridge_encap_header_len(skb
));
300 /* PF_BRIDGE/PRE_ROUTING *********************************************/
301 /* Undo the changes made for ip6tables PREROUTING and continue the
302 * bridge PRE_ROUTING hook. */
303 static int br_nf_pre_routing_finish_ipv6(struct sk_buff
*skb
)
305 struct nf_bridge_info
*nf_bridge
= skb
->nf_bridge
;
308 if (nf_bridge
->mask
& BRNF_PKT_TYPE
) {
309 skb
->pkt_type
= PACKET_OTHERHOST
;
310 nf_bridge
->mask
^= BRNF_PKT_TYPE
;
312 nf_bridge
->mask
^= BRNF_NF_BRIDGE_PREROUTING
;
314 rt
= bridge_parent_rtable(nf_bridge
->physindev
);
319 skb_dst_set_noref(skb
, &rt
->dst
);
321 skb
->dev
= nf_bridge
->physindev
;
322 nf_bridge_update_protocol(skb
);
323 nf_bridge_push_encap_header(skb
);
324 NF_HOOK_THRESH(NFPROTO_BRIDGE
, NF_BR_PRE_ROUTING
, skb
, skb
->dev
, NULL
,
325 br_handle_frame_finish
, 1);
330 /* Obtain the correct destination MAC address, while preserving the original
331 * source MAC address. If we already know this address, we just copy it. If we
332 * don't, we use the neighbour framework to find out. In both cases, we make
333 * sure that br_handle_frame_finish() is called afterwards.
335 static int br_nf_pre_routing_finish_bridge(struct sk_buff
*skb
)
337 struct nf_bridge_info
*nf_bridge
= skb
->nf_bridge
;
338 struct dst_entry
*dst
;
340 skb
->dev
= bridge_parent(skb
->dev
);
345 neigh_hh_bridge(dst
->hh
, skb
);
346 skb
->dev
= nf_bridge
->physindev
;
347 return br_handle_frame_finish(skb
);
348 } else if (dst
->neighbour
) {
349 /* the neighbour function below overwrites the complete
350 * MAC header, so we save the Ethernet source address and
351 * protocol number. */
352 skb_copy_from_linear_data_offset(skb
, -(ETH_HLEN
-ETH_ALEN
), skb
->nf_bridge
->data
, ETH_HLEN
-ETH_ALEN
);
353 /* tell br_dev_xmit to continue with forwarding */
354 nf_bridge
->mask
|= BRNF_BRIDGED_DNAT
;
355 return dst
->neighbour
->output(skb
);
362 /* This requires some explaining. If DNAT has taken place,
363 * we will need to fix up the destination Ethernet address.
365 * There are two cases to consider:
366 * 1. The packet was DNAT'ed to a device in the same bridge
367 * port group as it was received on. We can still bridge
369 * 2. The packet was DNAT'ed to a different device, either
370 * a non-bridged device or another bridge port group.
371 * The packet will need to be routed.
373 * The correct way of distinguishing between these two cases is to
374 * call ip_route_input() and to look at skb->dst->dev, which is
375 * changed to the destination device if ip_route_input() succeeds.
377 * Let's first consider the case that ip_route_input() succeeds:
379 * If the output device equals the logical bridge device the packet
380 * came in on, we can consider this bridging. The corresponding MAC
381 * address will be obtained in br_nf_pre_routing_finish_bridge.
382 * Otherwise, the packet is considered to be routed and we just
383 * change the destination MAC address so that the packet will
384 * later be passed up to the IP stack to be routed. For a redirected
385 * packet, ip_route_input() will give back the localhost as output device,
386 * which differs from the bridge device.
388 * Let's now consider the case that ip_route_input() fails:
390 * This can be because the destination address is martian, in which case
391 * the packet will be dropped.
392 * If IP forwarding is disabled, ip_route_input() will fail, while
393 * ip_route_output_key() can return success. The source
394 * address for ip_route_output_key() is set to zero, so ip_route_output_key()
395 * thinks we're handling a locally generated packet and won't care
396 * if IP forwarding is enabled. If the output device equals the logical bridge
397 * device, we proceed as if ip_route_input() succeeded. If it differs from the
398 * logical bridge port or if ip_route_output_key() fails we drop the packet.
400 static int br_nf_pre_routing_finish(struct sk_buff
*skb
)
402 struct net_device
*dev
= skb
->dev
;
403 struct iphdr
*iph
= ip_hdr(skb
);
404 struct nf_bridge_info
*nf_bridge
= skb
->nf_bridge
;
408 if (nf_bridge
->mask
& BRNF_PKT_TYPE
) {
409 skb
->pkt_type
= PACKET_OTHERHOST
;
410 nf_bridge
->mask
^= BRNF_PKT_TYPE
;
412 nf_bridge
->mask
^= BRNF_NF_BRIDGE_PREROUTING
;
413 if (dnat_took_place(skb
)) {
414 if ((err
= ip_route_input(skb
, iph
->daddr
, iph
->saddr
, iph
->tos
, dev
))) {
416 .fl4_dst
= iph
->daddr
,
417 .fl4_tos
= RT_TOS(iph
->tos
),
419 struct in_device
*in_dev
= __in_dev_get_rcu(dev
);
421 /* If err equals -EHOSTUNREACH the error is due to a
422 * martian destination or due to the fact that
423 * forwarding is disabled. For most martian packets,
424 * ip_route_output_key() will fail. It won't fail for 2 types of
425 * martian destinations: loopback destinations and destination
426 * 0.0.0.0. In both cases the packet will be dropped because the
427 * destination is the loopback device and not the bridge. */
428 if (err
!= -EHOSTUNREACH
|| !in_dev
|| IN_DEV_FORWARD(in_dev
))
431 if (!ip_route_output_key(dev_net(dev
), &rt
, &fl
)) {
432 /* - Bridged-and-DNAT'ed traffic doesn't
433 * require ip_forwarding. */
434 if (((struct dst_entry
*)rt
)->dev
== dev
) {
435 skb_dst_set(skb
, (struct dst_entry
*)rt
);
438 dst_release((struct dst_entry
*)rt
);
444 if (skb_dst(skb
)->dev
== dev
) {
446 skb
->dev
= nf_bridge
->physindev
;
447 nf_bridge_update_protocol(skb
);
448 nf_bridge_push_encap_header(skb
);
449 NF_HOOK_THRESH(NFPROTO_BRIDGE
,
452 br_nf_pre_routing_finish_bridge
,
456 memcpy(eth_hdr(skb
)->h_dest
, dev
->dev_addr
, ETH_ALEN
);
457 skb
->pkt_type
= PACKET_HOST
;
460 rt
= bridge_parent_rtable(nf_bridge
->physindev
);
465 skb_dst_set_noref(skb
, &rt
->dst
);
468 skb
->dev
= nf_bridge
->physindev
;
469 nf_bridge_update_protocol(skb
);
470 nf_bridge_push_encap_header(skb
);
471 NF_HOOK_THRESH(NFPROTO_BRIDGE
, NF_BR_PRE_ROUTING
, skb
, skb
->dev
, NULL
,
472 br_handle_frame_finish
, 1);
477 /* Some common code for IPv4/IPv6 */
478 static struct net_device
*setup_pre_routing(struct sk_buff
*skb
)
480 struct nf_bridge_info
*nf_bridge
= skb
->nf_bridge
;
482 if (skb
->pkt_type
== PACKET_OTHERHOST
) {
483 skb
->pkt_type
= PACKET_HOST
;
484 nf_bridge
->mask
|= BRNF_PKT_TYPE
;
487 nf_bridge
->mask
|= BRNF_NF_BRIDGE_PREROUTING
;
488 nf_bridge
->physindev
= skb
->dev
;
489 skb
->dev
= bridge_parent(skb
->dev
);
490 if (skb
->protocol
== htons(ETH_P_8021Q
))
491 nf_bridge
->mask
|= BRNF_8021Q
;
492 else if (skb
->protocol
== htons(ETH_P_PPP_SES
))
493 nf_bridge
->mask
|= BRNF_PPPoE
;
498 /* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */
499 static int check_hbh_len(struct sk_buff
*skb
)
501 unsigned char *raw
= (u8
*)(ipv6_hdr(skb
) + 1);
503 const unsigned char *nh
= skb_network_header(skb
);
505 int len
= (raw
[1] + 1) << 3;
507 if ((raw
+ len
) - skb
->data
> skb_headlen(skb
))
514 int optlen
= nh
[off
+ 1] + 2;
525 if (nh
[off
+ 1] != 4 || (off
& 3) != 2)
527 pkt_len
= ntohl(*(__be32
*) (nh
+ off
+ 2));
528 if (pkt_len
<= IPV6_MAXPLEN
||
529 ipv6_hdr(skb
)->payload_len
)
531 if (pkt_len
> skb
->len
- sizeof(struct ipv6hdr
))
533 if (pskb_trim_rcsum(skb
,
534 pkt_len
+ sizeof(struct ipv6hdr
)))
536 nh
= skb_network_header(skb
);
553 /* Replicate the checks that IPv6 does on packet reception and pass the packet
554 * to ip6tables, which doesn't support NAT, so things are fairly simple. */
555 static unsigned int br_nf_pre_routing_ipv6(unsigned int hook
,
557 const struct net_device
*in
,
558 const struct net_device
*out
,
559 int (*okfn
)(struct sk_buff
*))
564 if (skb
->len
< sizeof(struct ipv6hdr
))
567 if (!pskb_may_pull(skb
, sizeof(struct ipv6hdr
)))
572 if (hdr
->version
!= 6)
575 pkt_len
= ntohs(hdr
->payload_len
);
577 if (pkt_len
|| hdr
->nexthdr
!= NEXTHDR_HOP
) {
578 if (pkt_len
+ sizeof(struct ipv6hdr
) > skb
->len
)
580 if (pskb_trim_rcsum(skb
, pkt_len
+ sizeof(struct ipv6hdr
)))
583 if (hdr
->nexthdr
== NEXTHDR_HOP
&& check_hbh_len(skb
))
586 nf_bridge_put(skb
->nf_bridge
);
587 if (!nf_bridge_alloc(skb
))
589 if (!setup_pre_routing(skb
))
592 skb
->protocol
= htons(ETH_P_IPV6
);
593 NF_HOOK(NFPROTO_IPV6
, NF_INET_PRE_ROUTING
, skb
, skb
->dev
, NULL
,
594 br_nf_pre_routing_finish_ipv6
);
599 /* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
600 * Replicate the checks that IPv4 does on packet reception.
601 * Set skb->dev to the bridge device (i.e. parent of the
602 * receiving device) to make netfilter happy, the REDIRECT
603 * target in particular. Save the original destination IP
604 * address to be able to detect DNAT afterwards. */
605 static unsigned int br_nf_pre_routing(unsigned int hook
, struct sk_buff
*skb
,
606 const struct net_device
*in
,
607 const struct net_device
*out
,
608 int (*okfn
)(struct sk_buff
*))
610 struct net_bridge_port
*p
;
611 struct net_bridge
*br
;
612 __u32 len
= nf_bridge_encap_header_len(skb
);
614 if (unlikely(!pskb_may_pull(skb
, len
)))
617 p
= br_port_get_rcu(in
);
622 if (skb
->protocol
== htons(ETH_P_IPV6
) || IS_VLAN_IPV6(skb
) ||
623 IS_PPPOE_IPV6(skb
)) {
624 if (!brnf_call_ip6tables
&& !br
->nf_call_ip6tables
)
627 nf_bridge_pull_encap_header_rcsum(skb
);
628 return br_nf_pre_routing_ipv6(hook
, skb
, in
, out
, okfn
);
631 if (!brnf_call_iptables
&& !br
->nf_call_iptables
)
634 if (skb
->protocol
!= htons(ETH_P_IP
) && !IS_VLAN_IP(skb
) &&
638 nf_bridge_pull_encap_header_rcsum(skb
);
640 if (br_parse_ip_options(skb
))
643 nf_bridge_put(skb
->nf_bridge
);
644 if (!nf_bridge_alloc(skb
))
646 if (!setup_pre_routing(skb
))
648 store_orig_dstaddr(skb
);
649 skb
->protocol
= htons(ETH_P_IP
);
651 NF_HOOK(NFPROTO_IPV4
, NF_INET_PRE_ROUTING
, skb
, skb
->dev
, NULL
,
652 br_nf_pre_routing_finish
);
658 /* PF_BRIDGE/LOCAL_IN ************************************************/
659 /* The packet is locally destined, which requires a real
660 * dst_entry, so detach the fake one. On the way up, the
661 * packet would pass through PRE_ROUTING again (which already
662 * took place when the packet entered the bridge), but we
663 * register an IPv4 PRE_ROUTING 'sabotage' hook that will
664 * prevent this from happening. */
665 static unsigned int br_nf_local_in(unsigned int hook
, struct sk_buff
*skb
,
666 const struct net_device
*in
,
667 const struct net_device
*out
,
668 int (*okfn
)(struct sk_buff
*))
670 struct rtable
*rt
= skb_rtable(skb
);
672 if (rt
&& rt
== bridge_parent_rtable(in
))
678 /* PF_BRIDGE/FORWARD *************************************************/
679 static int br_nf_forward_finish(struct sk_buff
*skb
)
681 struct nf_bridge_info
*nf_bridge
= skb
->nf_bridge
;
682 struct net_device
*in
;
684 if (skb
->protocol
!= htons(ETH_P_ARP
) && !IS_VLAN_ARP(skb
)) {
685 in
= nf_bridge
->physindev
;
686 if (nf_bridge
->mask
& BRNF_PKT_TYPE
) {
687 skb
->pkt_type
= PACKET_OTHERHOST
;
688 nf_bridge
->mask
^= BRNF_PKT_TYPE
;
690 nf_bridge_update_protocol(skb
);
692 in
= *((struct net_device
**)(skb
->cb
));
694 nf_bridge_push_encap_header(skb
);
696 NF_HOOK_THRESH(NFPROTO_BRIDGE
, NF_BR_FORWARD
, skb
, in
,
697 skb
->dev
, br_forward_finish
, 1);
701 /* This is the 'purely bridged' case. For IP, we pass the packet to
702 * netfilter with indev and outdev set to the bridge device,
703 * but we are still able to filter on the 'real' indev/outdev
704 * because of the physdev module. For ARP, indev and outdev are the
706 static unsigned int br_nf_forward_ip(unsigned int hook
, struct sk_buff
*skb
,
707 const struct net_device
*in
,
708 const struct net_device
*out
,
709 int (*okfn
)(struct sk_buff
*))
711 struct nf_bridge_info
*nf_bridge
;
712 struct net_device
*parent
;
718 /* Need exclusive nf_bridge_info since we might have multiple
719 * different physoutdevs. */
720 if (!nf_bridge_unshare(skb
))
723 parent
= bridge_parent(out
);
727 if (skb
->protocol
== htons(ETH_P_IP
) || IS_VLAN_IP(skb
) ||
730 else if (skb
->protocol
== htons(ETH_P_IPV6
) || IS_VLAN_IPV6(skb
) ||
736 nf_bridge_pull_encap_header(skb
);
738 nf_bridge
= skb
->nf_bridge
;
739 if (skb
->pkt_type
== PACKET_OTHERHOST
) {
740 skb
->pkt_type
= PACKET_HOST
;
741 nf_bridge
->mask
|= BRNF_PKT_TYPE
;
744 /* The physdev module checks on this */
745 nf_bridge
->mask
|= BRNF_BRIDGED
;
746 nf_bridge
->physoutdev
= skb
->dev
;
748 skb
->protocol
= htons(ETH_P_IP
);
750 skb
->protocol
= htons(ETH_P_IPV6
);
752 NF_HOOK(pf
, NF_INET_FORWARD
, skb
, bridge_parent(in
), parent
,
753 br_nf_forward_finish
);
758 static unsigned int br_nf_forward_arp(unsigned int hook
, struct sk_buff
*skb
,
759 const struct net_device
*in
,
760 const struct net_device
*out
,
761 int (*okfn
)(struct sk_buff
*))
763 struct net_bridge_port
*p
;
764 struct net_bridge
*br
;
765 struct net_device
**d
= (struct net_device
**)(skb
->cb
);
767 p
= br_port_get_rcu(out
);
772 if (!brnf_call_arptables
&& !br
->nf_call_arptables
)
775 if (skb
->protocol
!= htons(ETH_P_ARP
)) {
776 if (!IS_VLAN_ARP(skb
))
778 nf_bridge_pull_encap_header(skb
);
781 if (arp_hdr(skb
)->ar_pln
!= 4) {
782 if (IS_VLAN_ARP(skb
))
783 nf_bridge_push_encap_header(skb
);
786 *d
= (struct net_device
*)in
;
787 NF_HOOK(NFPROTO_ARP
, NF_ARP_FORWARD
, skb
, (struct net_device
*)in
,
788 (struct net_device
*)out
, br_nf_forward_finish
);
793 #if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE)
794 static int br_nf_dev_queue_xmit(struct sk_buff
*skb
)
798 if (skb
->nfct
!= NULL
&& skb
->protocol
== htons(ETH_P_IP
) &&
799 skb
->len
+ nf_bridge_mtu_reduction(skb
) > skb
->dev
->mtu
&&
801 if (br_parse_ip_options(skb
))
802 /* Drop invalid packet */
804 ret
= ip_fragment(skb
, br_dev_queue_push_xmit
);
806 ret
= br_dev_queue_push_xmit(skb
);
811 static int br_nf_dev_queue_xmit(struct sk_buff
*skb
)
813 return br_dev_queue_push_xmit(skb
);
817 /* PF_BRIDGE/POST_ROUTING ********************************************/
818 static unsigned int br_nf_post_routing(unsigned int hook
, struct sk_buff
*skb
,
819 const struct net_device
*in
,
820 const struct net_device
*out
,
821 int (*okfn
)(struct sk_buff
*))
823 struct nf_bridge_info
*nf_bridge
= skb
->nf_bridge
;
824 struct net_device
*realoutdev
= bridge_parent(skb
->dev
);
827 if (!nf_bridge
|| !(nf_bridge
->mask
& BRNF_BRIDGED
))
833 if (skb
->protocol
== htons(ETH_P_IP
) || IS_VLAN_IP(skb
) ||
836 else if (skb
->protocol
== htons(ETH_P_IPV6
) || IS_VLAN_IPV6(skb
) ||
842 /* We assume any code from br_dev_queue_push_xmit onwards doesn't care
843 * about the value of skb->pkt_type. */
844 if (skb
->pkt_type
== PACKET_OTHERHOST
) {
845 skb
->pkt_type
= PACKET_HOST
;
846 nf_bridge
->mask
|= BRNF_PKT_TYPE
;
849 nf_bridge_pull_encap_header(skb
);
850 nf_bridge_save_header(skb
);
852 skb
->protocol
= htons(ETH_P_IP
);
854 skb
->protocol
= htons(ETH_P_IPV6
);
856 NF_HOOK(pf
, NF_INET_POST_ROUTING
, skb
, NULL
, realoutdev
,
857 br_nf_dev_queue_xmit
);
862 /* IP/SABOTAGE *****************************************************/
863 /* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
864 * for the second time. */
865 static unsigned int ip_sabotage_in(unsigned int hook
, struct sk_buff
*skb
,
866 const struct net_device
*in
,
867 const struct net_device
*out
,
868 int (*okfn
)(struct sk_buff
*))
870 if (skb
->nf_bridge
&&
871 !(skb
->nf_bridge
->mask
& BRNF_NF_BRIDGE_PREROUTING
)) {
878 /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
879 * br_dev_queue_push_xmit is called afterwards */
880 static struct nf_hook_ops br_nf_ops
[] __read_mostly
= {
882 .hook
= br_nf_pre_routing
,
883 .owner
= THIS_MODULE
,
885 .hooknum
= NF_BR_PRE_ROUTING
,
886 .priority
= NF_BR_PRI_BRNF
,
889 .hook
= br_nf_local_in
,
890 .owner
= THIS_MODULE
,
892 .hooknum
= NF_BR_LOCAL_IN
,
893 .priority
= NF_BR_PRI_BRNF
,
896 .hook
= br_nf_forward_ip
,
897 .owner
= THIS_MODULE
,
899 .hooknum
= NF_BR_FORWARD
,
900 .priority
= NF_BR_PRI_BRNF
- 1,
903 .hook
= br_nf_forward_arp
,
904 .owner
= THIS_MODULE
,
906 .hooknum
= NF_BR_FORWARD
,
907 .priority
= NF_BR_PRI_BRNF
,
910 .hook
= br_nf_post_routing
,
911 .owner
= THIS_MODULE
,
913 .hooknum
= NF_BR_POST_ROUTING
,
914 .priority
= NF_BR_PRI_LAST
,
917 .hook
= ip_sabotage_in
,
918 .owner
= THIS_MODULE
,
920 .hooknum
= NF_INET_PRE_ROUTING
,
921 .priority
= NF_IP_PRI_FIRST
,
924 .hook
= ip_sabotage_in
,
925 .owner
= THIS_MODULE
,
927 .hooknum
= NF_INET_PRE_ROUTING
,
928 .priority
= NF_IP6_PRI_FIRST
,
934 int brnf_sysctl_call_tables(ctl_table
* ctl
, int write
,
935 void __user
* buffer
, size_t * lenp
, loff_t
* ppos
)
939 ret
= proc_dointvec(ctl
, write
, buffer
, lenp
, ppos
);
941 if (write
&& *(int *)(ctl
->data
))
942 *(int *)(ctl
->data
) = 1;
946 static ctl_table brnf_table
[] = {
948 .procname
= "bridge-nf-call-arptables",
949 .data
= &brnf_call_arptables
,
950 .maxlen
= sizeof(int),
952 .proc_handler
= brnf_sysctl_call_tables
,
955 .procname
= "bridge-nf-call-iptables",
956 .data
= &brnf_call_iptables
,
957 .maxlen
= sizeof(int),
959 .proc_handler
= brnf_sysctl_call_tables
,
962 .procname
= "bridge-nf-call-ip6tables",
963 .data
= &brnf_call_ip6tables
,
964 .maxlen
= sizeof(int),
966 .proc_handler
= brnf_sysctl_call_tables
,
969 .procname
= "bridge-nf-filter-vlan-tagged",
970 .data
= &brnf_filter_vlan_tagged
,
971 .maxlen
= sizeof(int),
973 .proc_handler
= brnf_sysctl_call_tables
,
976 .procname
= "bridge-nf-filter-pppoe-tagged",
977 .data
= &brnf_filter_pppoe_tagged
,
978 .maxlen
= sizeof(int),
980 .proc_handler
= brnf_sysctl_call_tables
,
985 static struct ctl_path brnf_path
[] = {
986 { .procname
= "net", },
987 { .procname
= "bridge", },
992 int __init
br_netfilter_init(void)
996 ret
= dst_entries_init(&fake_dst_ops
);
1000 ret
= nf_register_hooks(br_nf_ops
, ARRAY_SIZE(br_nf_ops
));
1002 dst_entries_destroy(&fake_dst_ops
);
1005 #ifdef CONFIG_SYSCTL
1006 brnf_sysctl_header
= register_sysctl_paths(brnf_path
, brnf_table
);
1007 if (brnf_sysctl_header
== NULL
) {
1009 "br_netfilter: can't register to sysctl.\n");
1010 nf_unregister_hooks(br_nf_ops
, ARRAY_SIZE(br_nf_ops
));
1011 dst_entries_destroy(&fake_dst_ops
);
1015 printk(KERN_NOTICE
"Bridge firewalling registered\n");
1019 void br_netfilter_fini(void)
1021 nf_unregister_hooks(br_nf_ops
, ARRAY_SIZE(br_nf_ops
));
1022 #ifdef CONFIG_SYSCTL
1023 unregister_sysctl_table(brnf_sysctl_header
);
1025 dst_entries_destroy(&fake_dst_ops
);