Staging: hv: hv_mouse: fix up pipe size field name
[zen-stable.git] / net / bridge / br_netfilter.c
blob4b5b66d07bba7d43a521d2f450386ceea1bf2ca9
1 /*
2 * Handle firewalling
3 * Linux ethernet bridge
5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org>
7 * Bart De Schuymer <bdschuym@pandora.be>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
14 * Lennert dedicates this file to Kerstin Wurdinger.
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/ip.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <linux/if_pppox.h>
27 #include <linux/ppp_defs.h>
28 #include <linux/netfilter_bridge.h>
29 #include <linux/netfilter_ipv4.h>
30 #include <linux/netfilter_ipv6.h>
31 #include <linux/netfilter_arp.h>
32 #include <linux/in_route.h>
33 #include <linux/inetdevice.h>
35 #include <net/ip.h>
36 #include <net/ipv6.h>
37 #include <net/route.h>
39 #include <asm/uaccess.h>
40 #include "br_private.h"
41 #ifdef CONFIG_SYSCTL
42 #include <linux/sysctl.h>
43 #endif
45 #define skb_origaddr(skb) (((struct bridge_skb_cb *) \
46 (skb->nf_bridge->data))->daddr.ipv4)
47 #define store_orig_dstaddr(skb) (skb_origaddr(skb) = ip_hdr(skb)->daddr)
48 #define dnat_took_place(skb) (skb_origaddr(skb) != ip_hdr(skb)->daddr)
50 #ifdef CONFIG_SYSCTL
51 static struct ctl_table_header *brnf_sysctl_header;
52 static int brnf_call_iptables __read_mostly = 1;
53 static int brnf_call_ip6tables __read_mostly = 1;
54 static int brnf_call_arptables __read_mostly = 1;
55 static int brnf_filter_vlan_tagged __read_mostly = 0;
56 static int brnf_filter_pppoe_tagged __read_mostly = 0;
57 #else
58 #define brnf_call_iptables 1
59 #define brnf_call_ip6tables 1
60 #define brnf_call_arptables 1
61 #define brnf_filter_vlan_tagged 0
62 #define brnf_filter_pppoe_tagged 0
63 #endif
65 static inline __be16 vlan_proto(const struct sk_buff *skb)
67 if (vlan_tx_tag_present(skb))
68 return skb->protocol;
69 else if (skb->protocol == htons(ETH_P_8021Q))
70 return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
71 else
72 return 0;
75 #define IS_VLAN_IP(skb) \
76 (vlan_proto(skb) == htons(ETH_P_IP) && \
77 brnf_filter_vlan_tagged)
79 #define IS_VLAN_IPV6(skb) \
80 (vlan_proto(skb) == htons(ETH_P_IPV6) && \
81 brnf_filter_vlan_tagged)
83 #define IS_VLAN_ARP(skb) \
84 (vlan_proto(skb) == htons(ETH_P_ARP) && \
85 brnf_filter_vlan_tagged)
87 static inline __be16 pppoe_proto(const struct sk_buff *skb)
89 return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
90 sizeof(struct pppoe_hdr)));
93 #define IS_PPPOE_IP(skb) \
94 (skb->protocol == htons(ETH_P_PPP_SES) && \
95 pppoe_proto(skb) == htons(PPP_IP) && \
96 brnf_filter_pppoe_tagged)
98 #define IS_PPPOE_IPV6(skb) \
99 (skb->protocol == htons(ETH_P_PPP_SES) && \
100 pppoe_proto(skb) == htons(PPP_IPV6) && \
101 brnf_filter_pppoe_tagged)
103 static void fake_update_pmtu(struct dst_entry *dst, u32 mtu)
107 static struct dst_ops fake_dst_ops = {
108 .family = AF_INET,
109 .protocol = cpu_to_be16(ETH_P_IP),
110 .update_pmtu = fake_update_pmtu,
114 * Initialize bogus route table used to keep netfilter happy.
115 * Currently, we fill in the PMTU entry because netfilter
116 * refragmentation needs it, and the rt_flags entry because
117 * ipt_REJECT needs it. Future netfilter modules might
118 * require us to fill additional fields.
120 void br_netfilter_rtable_init(struct net_bridge *br)
122 struct rtable *rt = &br->fake_rtable;
124 atomic_set(&rt->dst.__refcnt, 1);
125 rt->dst.dev = br->dev;
126 rt->dst.path = &rt->dst;
127 dst_metric_set(&rt->dst, RTAX_MTU, 1500);
128 rt->dst.flags = DST_NOXFRM;
129 rt->dst.ops = &fake_dst_ops;
132 static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
134 struct net_bridge_port *port;
136 port = br_port_get_rcu(dev);
137 return port ? &port->br->fake_rtable : NULL;
140 static inline struct net_device *bridge_parent(const struct net_device *dev)
142 struct net_bridge_port *port;
144 port = br_port_get_rcu(dev);
145 return port ? port->br->dev : NULL;
148 static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
150 skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC);
151 if (likely(skb->nf_bridge))
152 atomic_set(&(skb->nf_bridge->use), 1);
154 return skb->nf_bridge;
157 static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
159 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
161 if (atomic_read(&nf_bridge->use) > 1) {
162 struct nf_bridge_info *tmp = nf_bridge_alloc(skb);
164 if (tmp) {
165 memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info));
166 atomic_set(&tmp->use, 1);
168 nf_bridge_put(nf_bridge);
169 nf_bridge = tmp;
171 return nf_bridge;
174 static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
176 unsigned int len = nf_bridge_encap_header_len(skb);
178 skb_push(skb, len);
179 skb->network_header -= len;
182 static inline void nf_bridge_pull_encap_header(struct sk_buff *skb)
184 unsigned int len = nf_bridge_encap_header_len(skb);
186 skb_pull(skb, len);
187 skb->network_header += len;
190 static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb)
192 unsigned int len = nf_bridge_encap_header_len(skb);
194 skb_pull_rcsum(skb, len);
195 skb->network_header += len;
198 static inline void nf_bridge_save_header(struct sk_buff *skb)
200 int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
202 skb_copy_from_linear_data_offset(skb, -header_size,
203 skb->nf_bridge->data, header_size);
206 static inline void nf_bridge_update_protocol(struct sk_buff *skb)
208 if (skb->nf_bridge->mask & BRNF_8021Q)
209 skb->protocol = htons(ETH_P_8021Q);
210 else if (skb->nf_bridge->mask & BRNF_PPPoE)
211 skb->protocol = htons(ETH_P_PPP_SES);
214 /* When handing a packet over to the IP layer
215 * check whether we have a skb that is in the
216 * expected format
219 static int br_parse_ip_options(struct sk_buff *skb)
221 struct ip_options *opt;
222 struct iphdr *iph;
223 struct net_device *dev = skb->dev;
224 u32 len;
226 iph = ip_hdr(skb);
227 opt = &(IPCB(skb)->opt);
229 /* Basic sanity checks */
230 if (iph->ihl < 5 || iph->version != 4)
231 goto inhdr_error;
233 if (!pskb_may_pull(skb, iph->ihl*4))
234 goto inhdr_error;
236 iph = ip_hdr(skb);
237 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
238 goto inhdr_error;
240 len = ntohs(iph->tot_len);
241 if (skb->len < len) {
242 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
243 goto drop;
244 } else if (len < (iph->ihl*4))
245 goto inhdr_error;
247 if (pskb_trim_rcsum(skb, len)) {
248 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
249 goto drop;
252 /* Zero out the CB buffer if no options present */
253 if (iph->ihl == 5) {
254 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
255 return 0;
258 opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
259 if (ip_options_compile(dev_net(dev), opt, skb))
260 goto inhdr_error;
262 /* Check correct handling of SRR option */
263 if (unlikely(opt->srr)) {
264 struct in_device *in_dev = __in_dev_get_rcu(dev);
265 if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev))
266 goto drop;
268 if (ip_options_rcv_srr(skb))
269 goto drop;
272 return 0;
274 inhdr_error:
275 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
276 drop:
277 return -1;
280 /* Fill in the header for fragmented IP packets handled by
281 * the IPv4 connection tracking code.
283 int nf_bridge_copy_header(struct sk_buff *skb)
285 int err;
286 unsigned int header_size;
288 nf_bridge_update_protocol(skb);
289 header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
290 err = skb_cow_head(skb, header_size);
291 if (err)
292 return err;
294 skb_copy_to_linear_data_offset(skb, -header_size,
295 skb->nf_bridge->data, header_size);
296 __skb_push(skb, nf_bridge_encap_header_len(skb));
297 return 0;
300 /* PF_BRIDGE/PRE_ROUTING *********************************************/
301 /* Undo the changes made for ip6tables PREROUTING and continue the
302 * bridge PRE_ROUTING hook. */
303 static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
305 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
306 struct rtable *rt;
308 if (nf_bridge->mask & BRNF_PKT_TYPE) {
309 skb->pkt_type = PACKET_OTHERHOST;
310 nf_bridge->mask ^= BRNF_PKT_TYPE;
312 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
314 rt = bridge_parent_rtable(nf_bridge->physindev);
315 if (!rt) {
316 kfree_skb(skb);
317 return 0;
319 skb_dst_set_noref(skb, &rt->dst);
321 skb->dev = nf_bridge->physindev;
322 nf_bridge_update_protocol(skb);
323 nf_bridge_push_encap_header(skb);
324 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
325 br_handle_frame_finish, 1);
327 return 0;
330 /* Obtain the correct destination MAC address, while preserving the original
331 * source MAC address. If we already know this address, we just copy it. If we
332 * don't, we use the neighbour framework to find out. In both cases, we make
333 * sure that br_handle_frame_finish() is called afterwards.
335 static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
337 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
338 struct dst_entry *dst;
340 skb->dev = bridge_parent(skb->dev);
341 if (!skb->dev)
342 goto free_skb;
343 dst = skb_dst(skb);
344 if (dst->hh) {
345 neigh_hh_bridge(dst->hh, skb);
346 skb->dev = nf_bridge->physindev;
347 return br_handle_frame_finish(skb);
348 } else if (dst->neighbour) {
349 /* the neighbour function below overwrites the complete
350 * MAC header, so we save the Ethernet source address and
351 * protocol number. */
352 skb_copy_from_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
353 /* tell br_dev_xmit to continue with forwarding */
354 nf_bridge->mask |= BRNF_BRIDGED_DNAT;
355 return dst->neighbour->output(skb);
357 free_skb:
358 kfree_skb(skb);
359 return 0;
362 /* This requires some explaining. If DNAT has taken place,
363 * we will need to fix up the destination Ethernet address.
365 * There are two cases to consider:
366 * 1. The packet was DNAT'ed to a device in the same bridge
367 * port group as it was received on. We can still bridge
368 * the packet.
369 * 2. The packet was DNAT'ed to a different device, either
370 * a non-bridged device or another bridge port group.
371 * The packet will need to be routed.
373 * The correct way of distinguishing between these two cases is to
374 * call ip_route_input() and to look at skb->dst->dev, which is
375 * changed to the destination device if ip_route_input() succeeds.
377 * Let's first consider the case that ip_route_input() succeeds:
379 * If the output device equals the logical bridge device the packet
380 * came in on, we can consider this bridging. The corresponding MAC
381 * address will be obtained in br_nf_pre_routing_finish_bridge.
382 * Otherwise, the packet is considered to be routed and we just
383 * change the destination MAC address so that the packet will
384 * later be passed up to the IP stack to be routed. For a redirected
385 * packet, ip_route_input() will give back the localhost as output device,
386 * which differs from the bridge device.
388 * Let's now consider the case that ip_route_input() fails:
390 * This can be because the destination address is martian, in which case
391 * the packet will be dropped.
392 * If IP forwarding is disabled, ip_route_input() will fail, while
393 * ip_route_output_key() can return success. The source
394 * address for ip_route_output_key() is set to zero, so ip_route_output_key()
395 * thinks we're handling a locally generated packet and won't care
396 * if IP forwarding is enabled. If the output device equals the logical bridge
397 * device, we proceed as if ip_route_input() succeeded. If it differs from the
398 * logical bridge port or if ip_route_output_key() fails we drop the packet.
400 static int br_nf_pre_routing_finish(struct sk_buff *skb)
402 struct net_device *dev = skb->dev;
403 struct iphdr *iph = ip_hdr(skb);
404 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
405 struct rtable *rt;
406 int err;
408 if (nf_bridge->mask & BRNF_PKT_TYPE) {
409 skb->pkt_type = PACKET_OTHERHOST;
410 nf_bridge->mask ^= BRNF_PKT_TYPE;
412 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
413 if (dnat_took_place(skb)) {
414 if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
415 struct flowi fl = {
416 .fl4_dst = iph->daddr,
417 .fl4_tos = RT_TOS(iph->tos),
419 struct in_device *in_dev = __in_dev_get_rcu(dev);
421 /* If err equals -EHOSTUNREACH the error is due to a
422 * martian destination or due to the fact that
423 * forwarding is disabled. For most martian packets,
424 * ip_route_output_key() will fail. It won't fail for 2 types of
425 * martian destinations: loopback destinations and destination
426 * 0.0.0.0. In both cases the packet will be dropped because the
427 * destination is the loopback device and not the bridge. */
428 if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
429 goto free_skb;
431 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
432 /* - Bridged-and-DNAT'ed traffic doesn't
433 * require ip_forwarding. */
434 if (((struct dst_entry *)rt)->dev == dev) {
435 skb_dst_set(skb, (struct dst_entry *)rt);
436 goto bridged_dnat;
438 dst_release((struct dst_entry *)rt);
440 free_skb:
441 kfree_skb(skb);
442 return 0;
443 } else {
444 if (skb_dst(skb)->dev == dev) {
445 bridged_dnat:
446 skb->dev = nf_bridge->physindev;
447 nf_bridge_update_protocol(skb);
448 nf_bridge_push_encap_header(skb);
449 NF_HOOK_THRESH(NFPROTO_BRIDGE,
450 NF_BR_PRE_ROUTING,
451 skb, skb->dev, NULL,
452 br_nf_pre_routing_finish_bridge,
454 return 0;
456 memcpy(eth_hdr(skb)->h_dest, dev->dev_addr, ETH_ALEN);
457 skb->pkt_type = PACKET_HOST;
459 } else {
460 rt = bridge_parent_rtable(nf_bridge->physindev);
461 if (!rt) {
462 kfree_skb(skb);
463 return 0;
465 skb_dst_set_noref(skb, &rt->dst);
468 skb->dev = nf_bridge->physindev;
469 nf_bridge_update_protocol(skb);
470 nf_bridge_push_encap_header(skb);
471 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
472 br_handle_frame_finish, 1);
474 return 0;
477 /* Some common code for IPv4/IPv6 */
478 static struct net_device *setup_pre_routing(struct sk_buff *skb)
480 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
482 if (skb->pkt_type == PACKET_OTHERHOST) {
483 skb->pkt_type = PACKET_HOST;
484 nf_bridge->mask |= BRNF_PKT_TYPE;
487 nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
488 nf_bridge->physindev = skb->dev;
489 skb->dev = bridge_parent(skb->dev);
490 if (skb->protocol == htons(ETH_P_8021Q))
491 nf_bridge->mask |= BRNF_8021Q;
492 else if (skb->protocol == htons(ETH_P_PPP_SES))
493 nf_bridge->mask |= BRNF_PPPoE;
495 return skb->dev;
498 /* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */
499 static int check_hbh_len(struct sk_buff *skb)
501 unsigned char *raw = (u8 *)(ipv6_hdr(skb) + 1);
502 u32 pkt_len;
503 const unsigned char *nh = skb_network_header(skb);
504 int off = raw - nh;
505 int len = (raw[1] + 1) << 3;
507 if ((raw + len) - skb->data > skb_headlen(skb))
508 goto bad;
510 off += 2;
511 len -= 2;
513 while (len > 0) {
514 int optlen = nh[off + 1] + 2;
516 switch (nh[off]) {
517 case IPV6_TLV_PAD0:
518 optlen = 1;
519 break;
521 case IPV6_TLV_PADN:
522 break;
524 case IPV6_TLV_JUMBO:
525 if (nh[off + 1] != 4 || (off & 3) != 2)
526 goto bad;
527 pkt_len = ntohl(*(__be32 *) (nh + off + 2));
528 if (pkt_len <= IPV6_MAXPLEN ||
529 ipv6_hdr(skb)->payload_len)
530 goto bad;
531 if (pkt_len > skb->len - sizeof(struct ipv6hdr))
532 goto bad;
533 if (pskb_trim_rcsum(skb,
534 pkt_len + sizeof(struct ipv6hdr)))
535 goto bad;
536 nh = skb_network_header(skb);
537 break;
538 default:
539 if (optlen > len)
540 goto bad;
541 break;
543 off += optlen;
544 len -= optlen;
546 if (len == 0)
547 return 0;
548 bad:
549 return -1;
553 /* Replicate the checks that IPv6 does on packet reception and pass the packet
554 * to ip6tables, which doesn't support NAT, so things are fairly simple. */
555 static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
556 struct sk_buff *skb,
557 const struct net_device *in,
558 const struct net_device *out,
559 int (*okfn)(struct sk_buff *))
561 struct ipv6hdr *hdr;
562 u32 pkt_len;
564 if (skb->len < sizeof(struct ipv6hdr))
565 return NF_DROP;
567 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
568 return NF_DROP;
570 hdr = ipv6_hdr(skb);
572 if (hdr->version != 6)
573 return NF_DROP;
575 pkt_len = ntohs(hdr->payload_len);
577 if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
578 if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
579 return NF_DROP;
580 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
581 return NF_DROP;
583 if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb))
584 return NF_DROP;
586 nf_bridge_put(skb->nf_bridge);
587 if (!nf_bridge_alloc(skb))
588 return NF_DROP;
589 if (!setup_pre_routing(skb))
590 return NF_DROP;
592 skb->protocol = htons(ETH_P_IPV6);
593 NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
594 br_nf_pre_routing_finish_ipv6);
596 return NF_STOLEN;
599 /* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
600 * Replicate the checks that IPv4 does on packet reception.
601 * Set skb->dev to the bridge device (i.e. parent of the
602 * receiving device) to make netfilter happy, the REDIRECT
603 * target in particular. Save the original destination IP
604 * address to be able to detect DNAT afterwards. */
605 static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
606 const struct net_device *in,
607 const struct net_device *out,
608 int (*okfn)(struct sk_buff *))
610 struct net_bridge_port *p;
611 struct net_bridge *br;
612 __u32 len = nf_bridge_encap_header_len(skb);
614 if (unlikely(!pskb_may_pull(skb, len)))
615 return NF_DROP;
617 p = br_port_get_rcu(in);
618 if (p == NULL)
619 return NF_DROP;
620 br = p->br;
622 if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
623 IS_PPPOE_IPV6(skb)) {
624 if (!brnf_call_ip6tables && !br->nf_call_ip6tables)
625 return NF_ACCEPT;
627 nf_bridge_pull_encap_header_rcsum(skb);
628 return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn);
631 if (!brnf_call_iptables && !br->nf_call_iptables)
632 return NF_ACCEPT;
634 if (skb->protocol != htons(ETH_P_IP) && !IS_VLAN_IP(skb) &&
635 !IS_PPPOE_IP(skb))
636 return NF_ACCEPT;
638 nf_bridge_pull_encap_header_rcsum(skb);
640 if (br_parse_ip_options(skb))
641 return NF_DROP;
643 nf_bridge_put(skb->nf_bridge);
644 if (!nf_bridge_alloc(skb))
645 return NF_DROP;
646 if (!setup_pre_routing(skb))
647 return NF_DROP;
648 store_orig_dstaddr(skb);
649 skb->protocol = htons(ETH_P_IP);
651 NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
652 br_nf_pre_routing_finish);
654 return NF_STOLEN;
658 /* PF_BRIDGE/LOCAL_IN ************************************************/
659 /* The packet is locally destined, which requires a real
660 * dst_entry, so detach the fake one. On the way up, the
661 * packet would pass through PRE_ROUTING again (which already
662 * took place when the packet entered the bridge), but we
663 * register an IPv4 PRE_ROUTING 'sabotage' hook that will
664 * prevent this from happening. */
665 static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb,
666 const struct net_device *in,
667 const struct net_device *out,
668 int (*okfn)(struct sk_buff *))
670 struct rtable *rt = skb_rtable(skb);
672 if (rt && rt == bridge_parent_rtable(in))
673 skb_dst_drop(skb);
675 return NF_ACCEPT;
678 /* PF_BRIDGE/FORWARD *************************************************/
679 static int br_nf_forward_finish(struct sk_buff *skb)
681 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
682 struct net_device *in;
684 if (skb->protocol != htons(ETH_P_ARP) && !IS_VLAN_ARP(skb)) {
685 in = nf_bridge->physindev;
686 if (nf_bridge->mask & BRNF_PKT_TYPE) {
687 skb->pkt_type = PACKET_OTHERHOST;
688 nf_bridge->mask ^= BRNF_PKT_TYPE;
690 nf_bridge_update_protocol(skb);
691 } else {
692 in = *((struct net_device **)(skb->cb));
694 nf_bridge_push_encap_header(skb);
696 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, in,
697 skb->dev, br_forward_finish, 1);
698 return 0;
701 /* This is the 'purely bridged' case. For IP, we pass the packet to
702 * netfilter with indev and outdev set to the bridge device,
703 * but we are still able to filter on the 'real' indev/outdev
704 * because of the physdev module. For ARP, indev and outdev are the
705 * bridge ports. */
706 static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
707 const struct net_device *in,
708 const struct net_device *out,
709 int (*okfn)(struct sk_buff *))
711 struct nf_bridge_info *nf_bridge;
712 struct net_device *parent;
713 u_int8_t pf;
715 if (!skb->nf_bridge)
716 return NF_ACCEPT;
718 /* Need exclusive nf_bridge_info since we might have multiple
719 * different physoutdevs. */
720 if (!nf_bridge_unshare(skb))
721 return NF_DROP;
723 parent = bridge_parent(out);
724 if (!parent)
725 return NF_DROP;
727 if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
728 IS_PPPOE_IP(skb))
729 pf = PF_INET;
730 else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
731 IS_PPPOE_IPV6(skb))
732 pf = PF_INET6;
733 else
734 return NF_ACCEPT;
736 nf_bridge_pull_encap_header(skb);
738 nf_bridge = skb->nf_bridge;
739 if (skb->pkt_type == PACKET_OTHERHOST) {
740 skb->pkt_type = PACKET_HOST;
741 nf_bridge->mask |= BRNF_PKT_TYPE;
744 /* The physdev module checks on this */
745 nf_bridge->mask |= BRNF_BRIDGED;
746 nf_bridge->physoutdev = skb->dev;
747 if (pf == PF_INET)
748 skb->protocol = htons(ETH_P_IP);
749 else
750 skb->protocol = htons(ETH_P_IPV6);
752 NF_HOOK(pf, NF_INET_FORWARD, skb, bridge_parent(in), parent,
753 br_nf_forward_finish);
755 return NF_STOLEN;
758 static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
759 const struct net_device *in,
760 const struct net_device *out,
761 int (*okfn)(struct sk_buff *))
763 struct net_bridge_port *p;
764 struct net_bridge *br;
765 struct net_device **d = (struct net_device **)(skb->cb);
767 p = br_port_get_rcu(out);
768 if (p == NULL)
769 return NF_ACCEPT;
770 br = p->br;
772 if (!brnf_call_arptables && !br->nf_call_arptables)
773 return NF_ACCEPT;
775 if (skb->protocol != htons(ETH_P_ARP)) {
776 if (!IS_VLAN_ARP(skb))
777 return NF_ACCEPT;
778 nf_bridge_pull_encap_header(skb);
781 if (arp_hdr(skb)->ar_pln != 4) {
782 if (IS_VLAN_ARP(skb))
783 nf_bridge_push_encap_header(skb);
784 return NF_ACCEPT;
786 *d = (struct net_device *)in;
787 NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, skb, (struct net_device *)in,
788 (struct net_device *)out, br_nf_forward_finish);
790 return NF_STOLEN;
793 #if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE)
794 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
796 int ret;
798 if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
799 skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
800 !skb_is_gso(skb)) {
801 if (br_parse_ip_options(skb))
802 /* Drop invalid packet */
803 return NF_DROP;
804 ret = ip_fragment(skb, br_dev_queue_push_xmit);
805 } else
806 ret = br_dev_queue_push_xmit(skb);
808 return ret;
810 #else
811 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
813 return br_dev_queue_push_xmit(skb);
815 #endif
817 /* PF_BRIDGE/POST_ROUTING ********************************************/
818 static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
819 const struct net_device *in,
820 const struct net_device *out,
821 int (*okfn)(struct sk_buff *))
823 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
824 struct net_device *realoutdev = bridge_parent(skb->dev);
825 u_int8_t pf;
827 if (!nf_bridge || !(nf_bridge->mask & BRNF_BRIDGED))
828 return NF_ACCEPT;
830 if (!realoutdev)
831 return NF_DROP;
833 if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
834 IS_PPPOE_IP(skb))
835 pf = PF_INET;
836 else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
837 IS_PPPOE_IPV6(skb))
838 pf = PF_INET6;
839 else
840 return NF_ACCEPT;
842 /* We assume any code from br_dev_queue_push_xmit onwards doesn't care
843 * about the value of skb->pkt_type. */
844 if (skb->pkt_type == PACKET_OTHERHOST) {
845 skb->pkt_type = PACKET_HOST;
846 nf_bridge->mask |= BRNF_PKT_TYPE;
849 nf_bridge_pull_encap_header(skb);
850 nf_bridge_save_header(skb);
851 if (pf == PF_INET)
852 skb->protocol = htons(ETH_P_IP);
853 else
854 skb->protocol = htons(ETH_P_IPV6);
856 NF_HOOK(pf, NF_INET_POST_ROUTING, skb, NULL, realoutdev,
857 br_nf_dev_queue_xmit);
859 return NF_STOLEN;
862 /* IP/SABOTAGE *****************************************************/
863 /* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
864 * for the second time. */
865 static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff *skb,
866 const struct net_device *in,
867 const struct net_device *out,
868 int (*okfn)(struct sk_buff *))
870 if (skb->nf_bridge &&
871 !(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) {
872 return NF_STOP;
875 return NF_ACCEPT;
878 /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
879 * br_dev_queue_push_xmit is called afterwards */
880 static struct nf_hook_ops br_nf_ops[] __read_mostly = {
882 .hook = br_nf_pre_routing,
883 .owner = THIS_MODULE,
884 .pf = PF_BRIDGE,
885 .hooknum = NF_BR_PRE_ROUTING,
886 .priority = NF_BR_PRI_BRNF,
889 .hook = br_nf_local_in,
890 .owner = THIS_MODULE,
891 .pf = PF_BRIDGE,
892 .hooknum = NF_BR_LOCAL_IN,
893 .priority = NF_BR_PRI_BRNF,
896 .hook = br_nf_forward_ip,
897 .owner = THIS_MODULE,
898 .pf = PF_BRIDGE,
899 .hooknum = NF_BR_FORWARD,
900 .priority = NF_BR_PRI_BRNF - 1,
903 .hook = br_nf_forward_arp,
904 .owner = THIS_MODULE,
905 .pf = PF_BRIDGE,
906 .hooknum = NF_BR_FORWARD,
907 .priority = NF_BR_PRI_BRNF,
910 .hook = br_nf_post_routing,
911 .owner = THIS_MODULE,
912 .pf = PF_BRIDGE,
913 .hooknum = NF_BR_POST_ROUTING,
914 .priority = NF_BR_PRI_LAST,
917 .hook = ip_sabotage_in,
918 .owner = THIS_MODULE,
919 .pf = PF_INET,
920 .hooknum = NF_INET_PRE_ROUTING,
921 .priority = NF_IP_PRI_FIRST,
924 .hook = ip_sabotage_in,
925 .owner = THIS_MODULE,
926 .pf = PF_INET6,
927 .hooknum = NF_INET_PRE_ROUTING,
928 .priority = NF_IP6_PRI_FIRST,
932 #ifdef CONFIG_SYSCTL
933 static
934 int brnf_sysctl_call_tables(ctl_table * ctl, int write,
935 void __user * buffer, size_t * lenp, loff_t * ppos)
937 int ret;
939 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
941 if (write && *(int *)(ctl->data))
942 *(int *)(ctl->data) = 1;
943 return ret;
946 static ctl_table brnf_table[] = {
948 .procname = "bridge-nf-call-arptables",
949 .data = &brnf_call_arptables,
950 .maxlen = sizeof(int),
951 .mode = 0644,
952 .proc_handler = brnf_sysctl_call_tables,
955 .procname = "bridge-nf-call-iptables",
956 .data = &brnf_call_iptables,
957 .maxlen = sizeof(int),
958 .mode = 0644,
959 .proc_handler = brnf_sysctl_call_tables,
962 .procname = "bridge-nf-call-ip6tables",
963 .data = &brnf_call_ip6tables,
964 .maxlen = sizeof(int),
965 .mode = 0644,
966 .proc_handler = brnf_sysctl_call_tables,
969 .procname = "bridge-nf-filter-vlan-tagged",
970 .data = &brnf_filter_vlan_tagged,
971 .maxlen = sizeof(int),
972 .mode = 0644,
973 .proc_handler = brnf_sysctl_call_tables,
976 .procname = "bridge-nf-filter-pppoe-tagged",
977 .data = &brnf_filter_pppoe_tagged,
978 .maxlen = sizeof(int),
979 .mode = 0644,
980 .proc_handler = brnf_sysctl_call_tables,
985 static struct ctl_path brnf_path[] = {
986 { .procname = "net", },
987 { .procname = "bridge", },
990 #endif
992 int __init br_netfilter_init(void)
994 int ret;
996 ret = dst_entries_init(&fake_dst_ops);
997 if (ret < 0)
998 return ret;
1000 ret = nf_register_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
1001 if (ret < 0) {
1002 dst_entries_destroy(&fake_dst_ops);
1003 return ret;
1005 #ifdef CONFIG_SYSCTL
1006 brnf_sysctl_header = register_sysctl_paths(brnf_path, brnf_table);
1007 if (brnf_sysctl_header == NULL) {
1008 printk(KERN_WARNING
1009 "br_netfilter: can't register to sysctl.\n");
1010 nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
1011 dst_entries_destroy(&fake_dst_ops);
1012 return -ENOMEM;
1014 #endif
1015 printk(KERN_NOTICE "Bridge firewalling registered\n");
1016 return 0;
1019 void br_netfilter_fini(void)
1021 nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
1022 #ifdef CONFIG_SYSCTL
1023 unregister_sysctl_table(brnf_sysctl_header);
1024 #endif
1025 dst_entries_destroy(&fake_dst_ops);