memcg: free spare array to avoid memory leak
[zen-stable.git] / net / bridge / br_multicast.c
blob27ca25ed70216979790ffd6c555baaf88bf4e7a4
1 /*
2 * Bridge multicast support.
4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
13 #include <linux/err.h>
14 #include <linux/if_ether.h>
15 #include <linux/igmp.h>
16 #include <linux/jhash.h>
17 #include <linux/kernel.h>
18 #include <linux/log2.h>
19 #include <linux/netdevice.h>
20 #include <linux/netfilter_bridge.h>
21 #include <linux/random.h>
22 #include <linux/rculist.h>
23 #include <linux/skbuff.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <net/ip.h>
27 #if IS_ENABLED(CONFIG_IPV6)
28 #include <net/ipv6.h>
29 #include <net/mld.h>
30 #include <net/addrconf.h>
31 #include <net/ip6_checksum.h>
32 #endif
34 #include "br_private.h"
36 #define mlock_dereference(X, br) \
37 rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
39 #if IS_ENABLED(CONFIG_IPV6)
40 static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
42 if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr))
43 return 1;
44 return 0;
46 #endif
48 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
50 if (a->proto != b->proto)
51 return 0;
52 switch (a->proto) {
53 case htons(ETH_P_IP):
54 return a->u.ip4 == b->u.ip4;
55 #if IS_ENABLED(CONFIG_IPV6)
56 case htons(ETH_P_IPV6):
57 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
58 #endif
60 return 0;
63 static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip)
65 return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1);
68 #if IS_ENABLED(CONFIG_IPV6)
69 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
70 const struct in6_addr *ip)
72 return jhash2((__force u32 *)ip->s6_addr32, 4, mdb->secret) & (mdb->max - 1);
74 #endif
76 static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb,
77 struct br_ip *ip)
79 switch (ip->proto) {
80 case htons(ETH_P_IP):
81 return __br_ip4_hash(mdb, ip->u.ip4);
82 #if IS_ENABLED(CONFIG_IPV6)
83 case htons(ETH_P_IPV6):
84 return __br_ip6_hash(mdb, &ip->u.ip6);
85 #endif
87 return 0;
90 static struct net_bridge_mdb_entry *__br_mdb_ip_get(
91 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash)
93 struct net_bridge_mdb_entry *mp;
94 struct hlist_node *p;
96 hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
97 if (br_ip_equal(&mp->addr, dst))
98 return mp;
101 return NULL;
104 static struct net_bridge_mdb_entry *br_mdb_ip_get(
105 struct net_bridge_mdb_htable *mdb, struct br_ip *dst)
107 if (!mdb)
108 return NULL;
110 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
113 static struct net_bridge_mdb_entry *br_mdb_ip4_get(
114 struct net_bridge_mdb_htable *mdb, __be32 dst)
116 struct br_ip br_dst;
118 br_dst.u.ip4 = dst;
119 br_dst.proto = htons(ETH_P_IP);
121 return br_mdb_ip_get(mdb, &br_dst);
124 #if IS_ENABLED(CONFIG_IPV6)
125 static struct net_bridge_mdb_entry *br_mdb_ip6_get(
126 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst)
128 struct br_ip br_dst;
130 br_dst.u.ip6 = *dst;
131 br_dst.proto = htons(ETH_P_IPV6);
133 return br_mdb_ip_get(mdb, &br_dst);
135 #endif
137 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
138 struct sk_buff *skb)
140 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb);
141 struct br_ip ip;
143 if (br->multicast_disabled)
144 return NULL;
146 if (BR_INPUT_SKB_CB(skb)->igmp)
147 return NULL;
149 ip.proto = skb->protocol;
151 switch (skb->protocol) {
152 case htons(ETH_P_IP):
153 ip.u.ip4 = ip_hdr(skb)->daddr;
154 break;
155 #if IS_ENABLED(CONFIG_IPV6)
156 case htons(ETH_P_IPV6):
157 ip.u.ip6 = ipv6_hdr(skb)->daddr;
158 break;
159 #endif
160 default:
161 return NULL;
164 return br_mdb_ip_get(mdb, &ip);
167 static void br_mdb_free(struct rcu_head *head)
169 struct net_bridge_mdb_htable *mdb =
170 container_of(head, struct net_bridge_mdb_htable, rcu);
171 struct net_bridge_mdb_htable *old = mdb->old;
173 mdb->old = NULL;
174 kfree(old->mhash);
175 kfree(old);
178 static int br_mdb_copy(struct net_bridge_mdb_htable *new,
179 struct net_bridge_mdb_htable *old,
180 int elasticity)
182 struct net_bridge_mdb_entry *mp;
183 struct hlist_node *p;
184 int maxlen;
185 int len;
186 int i;
188 for (i = 0; i < old->max; i++)
189 hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver])
190 hlist_add_head(&mp->hlist[new->ver],
191 &new->mhash[br_ip_hash(new, &mp->addr)]);
193 if (!elasticity)
194 return 0;
196 maxlen = 0;
197 for (i = 0; i < new->max; i++) {
198 len = 0;
199 hlist_for_each_entry(mp, p, &new->mhash[i], hlist[new->ver])
200 len++;
201 if (len > maxlen)
202 maxlen = len;
205 return maxlen > elasticity ? -EINVAL : 0;
208 static void br_multicast_free_pg(struct rcu_head *head)
210 struct net_bridge_port_group *p =
211 container_of(head, struct net_bridge_port_group, rcu);
213 kfree(p);
216 static void br_multicast_free_group(struct rcu_head *head)
218 struct net_bridge_mdb_entry *mp =
219 container_of(head, struct net_bridge_mdb_entry, rcu);
221 kfree(mp);
224 static void br_multicast_group_expired(unsigned long data)
226 struct net_bridge_mdb_entry *mp = (void *)data;
227 struct net_bridge *br = mp->br;
228 struct net_bridge_mdb_htable *mdb;
230 spin_lock(&br->multicast_lock);
231 if (!netif_running(br->dev) || timer_pending(&mp->timer))
232 goto out;
234 mp->mglist = false;
236 if (mp->ports)
237 goto out;
239 mdb = mlock_dereference(br->mdb, br);
241 hlist_del_rcu(&mp->hlist[mdb->ver]);
242 mdb->size--;
244 call_rcu_bh(&mp->rcu, br_multicast_free_group);
246 out:
247 spin_unlock(&br->multicast_lock);
250 static void br_multicast_del_pg(struct net_bridge *br,
251 struct net_bridge_port_group *pg)
253 struct net_bridge_mdb_htable *mdb;
254 struct net_bridge_mdb_entry *mp;
255 struct net_bridge_port_group *p;
256 struct net_bridge_port_group __rcu **pp;
258 mdb = mlock_dereference(br->mdb, br);
260 mp = br_mdb_ip_get(mdb, &pg->addr);
261 if (WARN_ON(!mp))
262 return;
264 for (pp = &mp->ports;
265 (p = mlock_dereference(*pp, br)) != NULL;
266 pp = &p->next) {
267 if (p != pg)
268 continue;
270 rcu_assign_pointer(*pp, p->next);
271 hlist_del_init(&p->mglist);
272 del_timer(&p->timer);
273 call_rcu_bh(&p->rcu, br_multicast_free_pg);
275 if (!mp->ports && !mp->mglist &&
276 netif_running(br->dev))
277 mod_timer(&mp->timer, jiffies);
279 return;
282 WARN_ON(1);
285 static void br_multicast_port_group_expired(unsigned long data)
287 struct net_bridge_port_group *pg = (void *)data;
288 struct net_bridge *br = pg->port->br;
290 spin_lock(&br->multicast_lock);
291 if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
292 hlist_unhashed(&pg->mglist))
293 goto out;
295 br_multicast_del_pg(br, pg);
297 out:
298 spin_unlock(&br->multicast_lock);
301 static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max,
302 int elasticity)
304 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1);
305 struct net_bridge_mdb_htable *mdb;
306 int err;
308 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC);
309 if (!mdb)
310 return -ENOMEM;
312 mdb->max = max;
313 mdb->old = old;
315 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC);
316 if (!mdb->mhash) {
317 kfree(mdb);
318 return -ENOMEM;
321 mdb->size = old ? old->size : 0;
322 mdb->ver = old ? old->ver ^ 1 : 0;
324 if (!old || elasticity)
325 get_random_bytes(&mdb->secret, sizeof(mdb->secret));
326 else
327 mdb->secret = old->secret;
329 if (!old)
330 goto out;
332 err = br_mdb_copy(mdb, old, elasticity);
333 if (err) {
334 kfree(mdb->mhash);
335 kfree(mdb);
336 return err;
339 call_rcu_bh(&mdb->rcu, br_mdb_free);
341 out:
342 rcu_assign_pointer(*mdbp, mdb);
344 return 0;
347 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
348 __be32 group)
350 struct sk_buff *skb;
351 struct igmphdr *ih;
352 struct ethhdr *eth;
353 struct iphdr *iph;
355 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
356 sizeof(*ih) + 4);
357 if (!skb)
358 goto out;
360 skb->protocol = htons(ETH_P_IP);
362 skb_reset_mac_header(skb);
363 eth = eth_hdr(skb);
365 memcpy(eth->h_source, br->dev->dev_addr, 6);
366 eth->h_dest[0] = 1;
367 eth->h_dest[1] = 0;
368 eth->h_dest[2] = 0x5e;
369 eth->h_dest[3] = 0;
370 eth->h_dest[4] = 0;
371 eth->h_dest[5] = 1;
372 eth->h_proto = htons(ETH_P_IP);
373 skb_put(skb, sizeof(*eth));
375 skb_set_network_header(skb, skb->len);
376 iph = ip_hdr(skb);
378 iph->version = 4;
379 iph->ihl = 6;
380 iph->tos = 0xc0;
381 iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4);
382 iph->id = 0;
383 iph->frag_off = htons(IP_DF);
384 iph->ttl = 1;
385 iph->protocol = IPPROTO_IGMP;
386 iph->saddr = 0;
387 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
388 ((u8 *)&iph[1])[0] = IPOPT_RA;
389 ((u8 *)&iph[1])[1] = 4;
390 ((u8 *)&iph[1])[2] = 0;
391 ((u8 *)&iph[1])[3] = 0;
392 ip_send_check(iph);
393 skb_put(skb, 24);
395 skb_set_transport_header(skb, skb->len);
396 ih = igmp_hdr(skb);
397 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
398 ih->code = (group ? br->multicast_last_member_interval :
399 br->multicast_query_response_interval) /
400 (HZ / IGMP_TIMER_SCALE);
401 ih->group = group;
402 ih->csum = 0;
403 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
404 skb_put(skb, sizeof(*ih));
406 __skb_pull(skb, sizeof(*eth));
408 out:
409 return skb;
412 #if IS_ENABLED(CONFIG_IPV6)
413 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
414 const struct in6_addr *group)
416 struct sk_buff *skb;
417 struct ipv6hdr *ip6h;
418 struct mld_msg *mldq;
419 struct ethhdr *eth;
420 u8 *hopopt;
421 unsigned long interval;
423 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
424 8 + sizeof(*mldq));
425 if (!skb)
426 goto out;
428 skb->protocol = htons(ETH_P_IPV6);
430 /* Ethernet header */
431 skb_reset_mac_header(skb);
432 eth = eth_hdr(skb);
434 memcpy(eth->h_source, br->dev->dev_addr, 6);
435 eth->h_proto = htons(ETH_P_IPV6);
436 skb_put(skb, sizeof(*eth));
438 /* IPv6 header + HbH option */
439 skb_set_network_header(skb, skb->len);
440 ip6h = ipv6_hdr(skb);
442 *(__force __be32 *)ip6h = htonl(0x60000000);
443 ip6h->payload_len = htons(8 + sizeof(*mldq));
444 ip6h->nexthdr = IPPROTO_HOPOPTS;
445 ip6h->hop_limit = 1;
446 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
447 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
448 &ip6h->saddr)) {
449 kfree_skb(skb);
450 return NULL;
452 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
454 hopopt = (u8 *)(ip6h + 1);
455 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
456 hopopt[1] = 0; /* length of HbH */
457 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
458 hopopt[3] = 2; /* Length of RA Option */
459 hopopt[4] = 0; /* Type = 0x0000 (MLD) */
460 hopopt[5] = 0;
461 hopopt[6] = IPV6_TLV_PAD0; /* Pad0 */
462 hopopt[7] = IPV6_TLV_PAD0; /* Pad0 */
464 skb_put(skb, sizeof(*ip6h) + 8);
466 /* ICMPv6 */
467 skb_set_transport_header(skb, skb->len);
468 mldq = (struct mld_msg *) icmp6_hdr(skb);
470 interval = ipv6_addr_any(group) ? br->multicast_last_member_interval :
471 br->multicast_query_response_interval;
473 mldq->mld_type = ICMPV6_MGM_QUERY;
474 mldq->mld_code = 0;
475 mldq->mld_cksum = 0;
476 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
477 mldq->mld_reserved = 0;
478 mldq->mld_mca = *group;
480 /* checksum */
481 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
482 sizeof(*mldq), IPPROTO_ICMPV6,
483 csum_partial(mldq,
484 sizeof(*mldq), 0));
485 skb_put(skb, sizeof(*mldq));
487 __skb_pull(skb, sizeof(*eth));
489 out:
490 return skb;
492 #endif
494 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
495 struct br_ip *addr)
497 switch (addr->proto) {
498 case htons(ETH_P_IP):
499 return br_ip4_multicast_alloc_query(br, addr->u.ip4);
500 #if IS_ENABLED(CONFIG_IPV6)
501 case htons(ETH_P_IPV6):
502 return br_ip6_multicast_alloc_query(br, &addr->u.ip6);
503 #endif
505 return NULL;
508 static struct net_bridge_mdb_entry *br_multicast_get_group(
509 struct net_bridge *br, struct net_bridge_port *port,
510 struct br_ip *group, int hash)
512 struct net_bridge_mdb_htable *mdb;
513 struct net_bridge_mdb_entry *mp;
514 struct hlist_node *p;
515 unsigned count = 0;
516 unsigned max;
517 int elasticity;
518 int err;
520 mdb = rcu_dereference_protected(br->mdb, 1);
521 hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
522 count++;
523 if (unlikely(br_ip_equal(group, &mp->addr)))
524 return mp;
527 elasticity = 0;
528 max = mdb->max;
530 if (unlikely(count > br->hash_elasticity && count)) {
531 if (net_ratelimit())
532 br_info(br, "Multicast hash table "
533 "chain limit reached: %s\n",
534 port ? port->dev->name : br->dev->name);
536 elasticity = br->hash_elasticity;
539 if (mdb->size >= max) {
540 max *= 2;
541 if (unlikely(max >= br->hash_max)) {
542 br_warn(br, "Multicast hash table maximum "
543 "reached, disabling snooping: %s, %d\n",
544 port ? port->dev->name : br->dev->name, max);
545 err = -E2BIG;
546 disable:
547 br->multicast_disabled = 1;
548 goto err;
552 if (max > mdb->max || elasticity) {
553 if (mdb->old) {
554 if (net_ratelimit())
555 br_info(br, "Multicast hash table "
556 "on fire: %s\n",
557 port ? port->dev->name : br->dev->name);
558 err = -EEXIST;
559 goto err;
562 err = br_mdb_rehash(&br->mdb, max, elasticity);
563 if (err) {
564 br_warn(br, "Cannot rehash multicast "
565 "hash table, disabling snooping: %s, %d, %d\n",
566 port ? port->dev->name : br->dev->name,
567 mdb->size, err);
568 goto disable;
571 err = -EAGAIN;
572 goto err;
575 return NULL;
577 err:
578 mp = ERR_PTR(err);
579 return mp;
582 static struct net_bridge_mdb_entry *br_multicast_new_group(
583 struct net_bridge *br, struct net_bridge_port *port,
584 struct br_ip *group)
586 struct net_bridge_mdb_htable *mdb;
587 struct net_bridge_mdb_entry *mp;
588 int hash;
589 int err;
591 mdb = rcu_dereference_protected(br->mdb, 1);
592 if (!mdb) {
593 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0);
594 if (err)
595 return ERR_PTR(err);
596 goto rehash;
599 hash = br_ip_hash(mdb, group);
600 mp = br_multicast_get_group(br, port, group, hash);
601 switch (PTR_ERR(mp)) {
602 case 0:
603 break;
605 case -EAGAIN:
606 rehash:
607 mdb = rcu_dereference_protected(br->mdb, 1);
608 hash = br_ip_hash(mdb, group);
609 break;
611 default:
612 goto out;
615 mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
616 if (unlikely(!mp))
617 return ERR_PTR(-ENOMEM);
619 mp->br = br;
620 mp->addr = *group;
621 setup_timer(&mp->timer, br_multicast_group_expired,
622 (unsigned long)mp);
624 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
625 mdb->size++;
627 out:
628 return mp;
631 static int br_multicast_add_group(struct net_bridge *br,
632 struct net_bridge_port *port,
633 struct br_ip *group)
635 struct net_bridge_mdb_entry *mp;
636 struct net_bridge_port_group *p;
637 struct net_bridge_port_group __rcu **pp;
638 unsigned long now = jiffies;
639 int err;
641 spin_lock(&br->multicast_lock);
642 if (!netif_running(br->dev) ||
643 (port && port->state == BR_STATE_DISABLED))
644 goto out;
646 mp = br_multicast_new_group(br, port, group);
647 err = PTR_ERR(mp);
648 if (IS_ERR(mp))
649 goto err;
651 if (!port) {
652 mp->mglist = true;
653 mod_timer(&mp->timer, now + br->multicast_membership_interval);
654 goto out;
657 for (pp = &mp->ports;
658 (p = mlock_dereference(*pp, br)) != NULL;
659 pp = &p->next) {
660 if (p->port == port)
661 goto found;
662 if ((unsigned long)p->port < (unsigned long)port)
663 break;
666 p = kzalloc(sizeof(*p), GFP_ATOMIC);
667 err = -ENOMEM;
668 if (unlikely(!p))
669 goto err;
671 p->addr = *group;
672 p->port = port;
673 p->next = *pp;
674 hlist_add_head(&p->mglist, &port->mglist);
675 setup_timer(&p->timer, br_multicast_port_group_expired,
676 (unsigned long)p);
678 rcu_assign_pointer(*pp, p);
680 found:
681 mod_timer(&p->timer, now + br->multicast_membership_interval);
682 out:
683 err = 0;
685 err:
686 spin_unlock(&br->multicast_lock);
687 return err;
690 static int br_ip4_multicast_add_group(struct net_bridge *br,
691 struct net_bridge_port *port,
692 __be32 group)
694 struct br_ip br_group;
696 if (ipv4_is_local_multicast(group))
697 return 0;
699 br_group.u.ip4 = group;
700 br_group.proto = htons(ETH_P_IP);
702 return br_multicast_add_group(br, port, &br_group);
705 #if IS_ENABLED(CONFIG_IPV6)
706 static int br_ip6_multicast_add_group(struct net_bridge *br,
707 struct net_bridge_port *port,
708 const struct in6_addr *group)
710 struct br_ip br_group;
712 if (!ipv6_is_transient_multicast(group))
713 return 0;
715 br_group.u.ip6 = *group;
716 br_group.proto = htons(ETH_P_IPV6);
718 return br_multicast_add_group(br, port, &br_group);
720 #endif
722 static void br_multicast_router_expired(unsigned long data)
724 struct net_bridge_port *port = (void *)data;
725 struct net_bridge *br = port->br;
727 spin_lock(&br->multicast_lock);
728 if (port->multicast_router != 1 ||
729 timer_pending(&port->multicast_router_timer) ||
730 hlist_unhashed(&port->rlist))
731 goto out;
733 hlist_del_init_rcu(&port->rlist);
735 out:
736 spin_unlock(&br->multicast_lock);
739 static void br_multicast_local_router_expired(unsigned long data)
743 static void __br_multicast_send_query(struct net_bridge *br,
744 struct net_bridge_port *port,
745 struct br_ip *ip)
747 struct sk_buff *skb;
749 skb = br_multicast_alloc_query(br, ip);
750 if (!skb)
751 return;
753 if (port) {
754 __skb_push(skb, sizeof(struct ethhdr));
755 skb->dev = port->dev;
756 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
757 dev_queue_xmit);
758 } else
759 netif_rx(skb);
762 static void br_multicast_send_query(struct net_bridge *br,
763 struct net_bridge_port *port, u32 sent)
765 unsigned long time;
766 struct br_ip br_group;
768 if (!netif_running(br->dev) || br->multicast_disabled ||
769 timer_pending(&br->multicast_querier_timer))
770 return;
772 memset(&br_group.u, 0, sizeof(br_group.u));
774 br_group.proto = htons(ETH_P_IP);
775 __br_multicast_send_query(br, port, &br_group);
777 #if IS_ENABLED(CONFIG_IPV6)
778 br_group.proto = htons(ETH_P_IPV6);
779 __br_multicast_send_query(br, port, &br_group);
780 #endif
782 time = jiffies;
783 time += sent < br->multicast_startup_query_count ?
784 br->multicast_startup_query_interval :
785 br->multicast_query_interval;
786 mod_timer(port ? &port->multicast_query_timer :
787 &br->multicast_query_timer, time);
790 static void br_multicast_port_query_expired(unsigned long data)
792 struct net_bridge_port *port = (void *)data;
793 struct net_bridge *br = port->br;
795 spin_lock(&br->multicast_lock);
796 if (port->state == BR_STATE_DISABLED ||
797 port->state == BR_STATE_BLOCKING)
798 goto out;
800 if (port->multicast_startup_queries_sent <
801 br->multicast_startup_query_count)
802 port->multicast_startup_queries_sent++;
804 br_multicast_send_query(port->br, port,
805 port->multicast_startup_queries_sent);
807 out:
808 spin_unlock(&br->multicast_lock);
811 void br_multicast_add_port(struct net_bridge_port *port)
813 port->multicast_router = 1;
815 setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
816 (unsigned long)port);
817 setup_timer(&port->multicast_query_timer,
818 br_multicast_port_query_expired, (unsigned long)port);
821 void br_multicast_del_port(struct net_bridge_port *port)
823 del_timer_sync(&port->multicast_router_timer);
826 static void __br_multicast_enable_port(struct net_bridge_port *port)
828 port->multicast_startup_queries_sent = 0;
830 if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 ||
831 del_timer(&port->multicast_query_timer))
832 mod_timer(&port->multicast_query_timer, jiffies);
835 void br_multicast_enable_port(struct net_bridge_port *port)
837 struct net_bridge *br = port->br;
839 spin_lock(&br->multicast_lock);
840 if (br->multicast_disabled || !netif_running(br->dev))
841 goto out;
843 __br_multicast_enable_port(port);
845 out:
846 spin_unlock(&br->multicast_lock);
849 void br_multicast_disable_port(struct net_bridge_port *port)
851 struct net_bridge *br = port->br;
852 struct net_bridge_port_group *pg;
853 struct hlist_node *p, *n;
855 spin_lock(&br->multicast_lock);
856 hlist_for_each_entry_safe(pg, p, n, &port->mglist, mglist)
857 br_multicast_del_pg(br, pg);
859 if (!hlist_unhashed(&port->rlist))
860 hlist_del_init_rcu(&port->rlist);
861 del_timer(&port->multicast_router_timer);
862 del_timer(&port->multicast_query_timer);
863 spin_unlock(&br->multicast_lock);
866 static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
867 struct net_bridge_port *port,
868 struct sk_buff *skb)
870 struct igmpv3_report *ih;
871 struct igmpv3_grec *grec;
872 int i;
873 int len;
874 int num;
875 int type;
876 int err = 0;
877 __be32 group;
879 if (!pskb_may_pull(skb, sizeof(*ih)))
880 return -EINVAL;
882 ih = igmpv3_report_hdr(skb);
883 num = ntohs(ih->ngrec);
884 len = sizeof(*ih);
886 for (i = 0; i < num; i++) {
887 len += sizeof(*grec);
888 if (!pskb_may_pull(skb, len))
889 return -EINVAL;
891 grec = (void *)(skb->data + len - sizeof(*grec));
892 group = grec->grec_mca;
893 type = grec->grec_type;
895 len += ntohs(grec->grec_nsrcs) * 4;
896 if (!pskb_may_pull(skb, len))
897 return -EINVAL;
899 /* We treat this as an IGMPv2 report for now. */
900 switch (type) {
901 case IGMPV3_MODE_IS_INCLUDE:
902 case IGMPV3_MODE_IS_EXCLUDE:
903 case IGMPV3_CHANGE_TO_INCLUDE:
904 case IGMPV3_CHANGE_TO_EXCLUDE:
905 case IGMPV3_ALLOW_NEW_SOURCES:
906 case IGMPV3_BLOCK_OLD_SOURCES:
907 break;
909 default:
910 continue;
913 err = br_ip4_multicast_add_group(br, port, group);
914 if (err)
915 break;
918 return err;
921 #if IS_ENABLED(CONFIG_IPV6)
922 static int br_ip6_multicast_mld2_report(struct net_bridge *br,
923 struct net_bridge_port *port,
924 struct sk_buff *skb)
926 struct icmp6hdr *icmp6h;
927 struct mld2_grec *grec;
928 int i;
929 int len;
930 int num;
931 int err = 0;
933 if (!pskb_may_pull(skb, sizeof(*icmp6h)))
934 return -EINVAL;
936 icmp6h = icmp6_hdr(skb);
937 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
938 len = sizeof(*icmp6h);
940 for (i = 0; i < num; i++) {
941 __be16 *nsrcs, _nsrcs;
943 nsrcs = skb_header_pointer(skb,
944 len + offsetof(struct mld2_grec,
945 grec_nsrcs),
946 sizeof(_nsrcs), &_nsrcs);
947 if (!nsrcs)
948 return -EINVAL;
950 if (!pskb_may_pull(skb,
951 len + sizeof(*grec) +
952 sizeof(struct in6_addr) * ntohs(*nsrcs)))
953 return -EINVAL;
955 grec = (struct mld2_grec *)(skb->data + len);
956 len += sizeof(*grec) +
957 sizeof(struct in6_addr) * ntohs(*nsrcs);
959 /* We treat these as MLDv1 reports for now. */
960 switch (grec->grec_type) {
961 case MLD2_MODE_IS_INCLUDE:
962 case MLD2_MODE_IS_EXCLUDE:
963 case MLD2_CHANGE_TO_INCLUDE:
964 case MLD2_CHANGE_TO_EXCLUDE:
965 case MLD2_ALLOW_NEW_SOURCES:
966 case MLD2_BLOCK_OLD_SOURCES:
967 break;
969 default:
970 continue;
973 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca);
974 if (!err)
975 break;
978 return err;
980 #endif
983 * Add port to rotuer_list
984 * list is maintained ordered by pointer value
985 * and locked by br->multicast_lock and RCU
987 static void br_multicast_add_router(struct net_bridge *br,
988 struct net_bridge_port *port)
990 struct net_bridge_port *p;
991 struct hlist_node *n, *slot = NULL;
993 hlist_for_each_entry(p, n, &br->router_list, rlist) {
994 if ((unsigned long) port >= (unsigned long) p)
995 break;
996 slot = n;
999 if (slot)
1000 hlist_add_after_rcu(slot, &port->rlist);
1001 else
1002 hlist_add_head_rcu(&port->rlist, &br->router_list);
1005 static void br_multicast_mark_router(struct net_bridge *br,
1006 struct net_bridge_port *port)
1008 unsigned long now = jiffies;
1010 if (!port) {
1011 if (br->multicast_router == 1)
1012 mod_timer(&br->multicast_router_timer,
1013 now + br->multicast_querier_interval);
1014 return;
1017 if (port->multicast_router != 1)
1018 return;
1020 if (!hlist_unhashed(&port->rlist))
1021 goto timer;
1023 br_multicast_add_router(br, port);
1025 timer:
1026 mod_timer(&port->multicast_router_timer,
1027 now + br->multicast_querier_interval);
1030 static void br_multicast_query_received(struct net_bridge *br,
1031 struct net_bridge_port *port,
1032 int saddr)
1034 if (saddr)
1035 mod_timer(&br->multicast_querier_timer,
1036 jiffies + br->multicast_querier_interval);
1037 else if (timer_pending(&br->multicast_querier_timer))
1038 return;
1040 br_multicast_mark_router(br, port);
1043 static int br_ip4_multicast_query(struct net_bridge *br,
1044 struct net_bridge_port *port,
1045 struct sk_buff *skb)
1047 const struct iphdr *iph = ip_hdr(skb);
1048 struct igmphdr *ih = igmp_hdr(skb);
1049 struct net_bridge_mdb_entry *mp;
1050 struct igmpv3_query *ih3;
1051 struct net_bridge_port_group *p;
1052 struct net_bridge_port_group __rcu **pp;
1053 unsigned long max_delay;
1054 unsigned long now = jiffies;
1055 __be32 group;
1056 int err = 0;
1058 spin_lock(&br->multicast_lock);
1059 if (!netif_running(br->dev) ||
1060 (port && port->state == BR_STATE_DISABLED))
1061 goto out;
1063 br_multicast_query_received(br, port, !!iph->saddr);
1065 group = ih->group;
1067 if (skb->len == sizeof(*ih)) {
1068 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
1070 if (!max_delay) {
1071 max_delay = 10 * HZ;
1072 group = 0;
1074 } else {
1075 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) {
1076 err = -EINVAL;
1077 goto out;
1080 ih3 = igmpv3_query_hdr(skb);
1081 if (ih3->nsrcs)
1082 goto out;
1084 max_delay = ih3->code ?
1085 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
1088 if (!group)
1089 goto out;
1091 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group);
1092 if (!mp)
1093 goto out;
1095 max_delay *= br->multicast_last_member_count;
1097 if (mp->mglist &&
1098 (timer_pending(&mp->timer) ?
1099 time_after(mp->timer.expires, now + max_delay) :
1100 try_to_del_timer_sync(&mp->timer) >= 0))
1101 mod_timer(&mp->timer, now + max_delay);
1103 for (pp = &mp->ports;
1104 (p = mlock_dereference(*pp, br)) != NULL;
1105 pp = &p->next) {
1106 if (timer_pending(&p->timer) ?
1107 time_after(p->timer.expires, now + max_delay) :
1108 try_to_del_timer_sync(&p->timer) >= 0)
1109 mod_timer(&p->timer, now + max_delay);
1112 out:
1113 spin_unlock(&br->multicast_lock);
1114 return err;
1117 #if IS_ENABLED(CONFIG_IPV6)
1118 static int br_ip6_multicast_query(struct net_bridge *br,
1119 struct net_bridge_port *port,
1120 struct sk_buff *skb)
1122 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
1123 struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb);
1124 struct net_bridge_mdb_entry *mp;
1125 struct mld2_query *mld2q;
1126 struct net_bridge_port_group *p;
1127 struct net_bridge_port_group __rcu **pp;
1128 unsigned long max_delay;
1129 unsigned long now = jiffies;
1130 const struct in6_addr *group = NULL;
1131 int err = 0;
1133 spin_lock(&br->multicast_lock);
1134 if (!netif_running(br->dev) ||
1135 (port && port->state == BR_STATE_DISABLED))
1136 goto out;
1138 br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr));
1140 if (skb->len == sizeof(*mld)) {
1141 if (!pskb_may_pull(skb, sizeof(*mld))) {
1142 err = -EINVAL;
1143 goto out;
1145 mld = (struct mld_msg *) icmp6_hdr(skb);
1146 max_delay = msecs_to_jiffies(htons(mld->mld_maxdelay));
1147 if (max_delay)
1148 group = &mld->mld_mca;
1149 } else if (skb->len >= sizeof(*mld2q)) {
1150 if (!pskb_may_pull(skb, sizeof(*mld2q))) {
1151 err = -EINVAL;
1152 goto out;
1154 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1155 if (!mld2q->mld2q_nsrcs)
1156 group = &mld2q->mld2q_mca;
1157 max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(mld2q->mld2q_mrc) : 1;
1160 if (!group)
1161 goto out;
1163 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group);
1164 if (!mp)
1165 goto out;
1167 max_delay *= br->multicast_last_member_count;
1168 if (mp->mglist &&
1169 (timer_pending(&mp->timer) ?
1170 time_after(mp->timer.expires, now + max_delay) :
1171 try_to_del_timer_sync(&mp->timer) >= 0))
1172 mod_timer(&mp->timer, now + max_delay);
1174 for (pp = &mp->ports;
1175 (p = mlock_dereference(*pp, br)) != NULL;
1176 pp = &p->next) {
1177 if (timer_pending(&p->timer) ?
1178 time_after(p->timer.expires, now + max_delay) :
1179 try_to_del_timer_sync(&p->timer) >= 0)
1180 mod_timer(&p->timer, now + max_delay);
1183 out:
1184 spin_unlock(&br->multicast_lock);
1185 return err;
1187 #endif
1189 static void br_multicast_leave_group(struct net_bridge *br,
1190 struct net_bridge_port *port,
1191 struct br_ip *group)
1193 struct net_bridge_mdb_htable *mdb;
1194 struct net_bridge_mdb_entry *mp;
1195 struct net_bridge_port_group *p;
1196 unsigned long now;
1197 unsigned long time;
1199 spin_lock(&br->multicast_lock);
1200 if (!netif_running(br->dev) ||
1201 (port && port->state == BR_STATE_DISABLED) ||
1202 timer_pending(&br->multicast_querier_timer))
1203 goto out;
1205 mdb = mlock_dereference(br->mdb, br);
1206 mp = br_mdb_ip_get(mdb, group);
1207 if (!mp)
1208 goto out;
1210 now = jiffies;
1211 time = now + br->multicast_last_member_count *
1212 br->multicast_last_member_interval;
1214 if (!port) {
1215 if (mp->mglist &&
1216 (timer_pending(&mp->timer) ?
1217 time_after(mp->timer.expires, time) :
1218 try_to_del_timer_sync(&mp->timer) >= 0)) {
1219 mod_timer(&mp->timer, time);
1222 goto out;
1225 for (p = mlock_dereference(mp->ports, br);
1226 p != NULL;
1227 p = mlock_dereference(p->next, br)) {
1228 if (p->port != port)
1229 continue;
1231 if (!hlist_unhashed(&p->mglist) &&
1232 (timer_pending(&p->timer) ?
1233 time_after(p->timer.expires, time) :
1234 try_to_del_timer_sync(&p->timer) >= 0)) {
1235 mod_timer(&p->timer, time);
1238 break;
1241 out:
1242 spin_unlock(&br->multicast_lock);
1245 static void br_ip4_multicast_leave_group(struct net_bridge *br,
1246 struct net_bridge_port *port,
1247 __be32 group)
1249 struct br_ip br_group;
1251 if (ipv4_is_local_multicast(group))
1252 return;
1254 br_group.u.ip4 = group;
1255 br_group.proto = htons(ETH_P_IP);
1257 br_multicast_leave_group(br, port, &br_group);
1260 #if IS_ENABLED(CONFIG_IPV6)
1261 static void br_ip6_multicast_leave_group(struct net_bridge *br,
1262 struct net_bridge_port *port,
1263 const struct in6_addr *group)
1265 struct br_ip br_group;
1267 if (!ipv6_is_transient_multicast(group))
1268 return;
1270 br_group.u.ip6 = *group;
1271 br_group.proto = htons(ETH_P_IPV6);
1273 br_multicast_leave_group(br, port, &br_group);
1275 #endif
1277 static int br_multicast_ipv4_rcv(struct net_bridge *br,
1278 struct net_bridge_port *port,
1279 struct sk_buff *skb)
1281 struct sk_buff *skb2 = skb;
1282 const struct iphdr *iph;
1283 struct igmphdr *ih;
1284 unsigned len;
1285 unsigned offset;
1286 int err;
1288 /* We treat OOM as packet loss for now. */
1289 if (!pskb_may_pull(skb, sizeof(*iph)))
1290 return -EINVAL;
1292 iph = ip_hdr(skb);
1294 if (iph->ihl < 5 || iph->version != 4)
1295 return -EINVAL;
1297 if (!pskb_may_pull(skb, ip_hdrlen(skb)))
1298 return -EINVAL;
1300 iph = ip_hdr(skb);
1302 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
1303 return -EINVAL;
1305 if (iph->protocol != IPPROTO_IGMP) {
1306 if ((iph->daddr & IGMP_LOCAL_GROUP_MASK) != IGMP_LOCAL_GROUP)
1307 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1308 return 0;
1311 len = ntohs(iph->tot_len);
1312 if (skb->len < len || len < ip_hdrlen(skb))
1313 return -EINVAL;
1315 if (skb->len > len) {
1316 skb2 = skb_clone(skb, GFP_ATOMIC);
1317 if (!skb2)
1318 return -ENOMEM;
1320 err = pskb_trim_rcsum(skb2, len);
1321 if (err)
1322 goto err_out;
1325 len -= ip_hdrlen(skb2);
1326 offset = skb_network_offset(skb2) + ip_hdrlen(skb2);
1327 __skb_pull(skb2, offset);
1328 skb_reset_transport_header(skb2);
1330 err = -EINVAL;
1331 if (!pskb_may_pull(skb2, sizeof(*ih)))
1332 goto out;
1334 switch (skb2->ip_summed) {
1335 case CHECKSUM_COMPLETE:
1336 if (!csum_fold(skb2->csum))
1337 break;
1338 /* fall through */
1339 case CHECKSUM_NONE:
1340 skb2->csum = 0;
1341 if (skb_checksum_complete(skb2))
1342 goto out;
1345 err = 0;
1347 BR_INPUT_SKB_CB(skb)->igmp = 1;
1348 ih = igmp_hdr(skb2);
1350 switch (ih->type) {
1351 case IGMP_HOST_MEMBERSHIP_REPORT:
1352 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1353 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1354 err = br_ip4_multicast_add_group(br, port, ih->group);
1355 break;
1356 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1357 err = br_ip4_multicast_igmp3_report(br, port, skb2);
1358 break;
1359 case IGMP_HOST_MEMBERSHIP_QUERY:
1360 err = br_ip4_multicast_query(br, port, skb2);
1361 break;
1362 case IGMP_HOST_LEAVE_MESSAGE:
1363 br_ip4_multicast_leave_group(br, port, ih->group);
1364 break;
1367 out:
1368 __skb_push(skb2, offset);
1369 err_out:
1370 if (skb2 != skb)
1371 kfree_skb(skb2);
1372 return err;
1375 #if IS_ENABLED(CONFIG_IPV6)
1376 static int br_multicast_ipv6_rcv(struct net_bridge *br,
1377 struct net_bridge_port *port,
1378 struct sk_buff *skb)
1380 struct sk_buff *skb2;
1381 const struct ipv6hdr *ip6h;
1382 u8 icmp6_type;
1383 u8 nexthdr;
1384 __be16 frag_off;
1385 unsigned len;
1386 int offset;
1387 int err;
1389 if (!pskb_may_pull(skb, sizeof(*ip6h)))
1390 return -EINVAL;
1392 ip6h = ipv6_hdr(skb);
1395 * We're interested in MLD messages only.
1396 * - Version is 6
1397 * - MLD has always Router Alert hop-by-hop option
1398 * - But we do not support jumbrograms.
1400 if (ip6h->version != 6 ||
1401 ip6h->nexthdr != IPPROTO_HOPOPTS ||
1402 ip6h->payload_len == 0)
1403 return 0;
1405 len = ntohs(ip6h->payload_len) + sizeof(*ip6h);
1406 if (skb->len < len)
1407 return -EINVAL;
1409 nexthdr = ip6h->nexthdr;
1410 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr, &frag_off);
1412 if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
1413 return 0;
1415 /* Okay, we found ICMPv6 header */
1416 skb2 = skb_clone(skb, GFP_ATOMIC);
1417 if (!skb2)
1418 return -ENOMEM;
1420 err = -EINVAL;
1421 if (!pskb_may_pull(skb2, offset + sizeof(struct icmp6hdr)))
1422 goto out;
1424 len -= offset - skb_network_offset(skb2);
1426 __skb_pull(skb2, offset);
1427 skb_reset_transport_header(skb2);
1428 skb_postpull_rcsum(skb2, skb_network_header(skb2),
1429 skb_network_header_len(skb2));
1431 icmp6_type = icmp6_hdr(skb2)->icmp6_type;
1433 switch (icmp6_type) {
1434 case ICMPV6_MGM_QUERY:
1435 case ICMPV6_MGM_REPORT:
1436 case ICMPV6_MGM_REDUCTION:
1437 case ICMPV6_MLD2_REPORT:
1438 break;
1439 default:
1440 err = 0;
1441 goto out;
1444 /* Okay, we found MLD message. Check further. */
1445 if (skb2->len > len) {
1446 err = pskb_trim_rcsum(skb2, len);
1447 if (err)
1448 goto out;
1449 err = -EINVAL;
1452 ip6h = ipv6_hdr(skb2);
1454 switch (skb2->ip_summed) {
1455 case CHECKSUM_COMPLETE:
1456 if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb2->len,
1457 IPPROTO_ICMPV6, skb2->csum))
1458 break;
1459 /*FALLTHROUGH*/
1460 case CHECKSUM_NONE:
1461 skb2->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
1462 &ip6h->daddr,
1463 skb2->len,
1464 IPPROTO_ICMPV6, 0));
1465 if (__skb_checksum_complete(skb2))
1466 goto out;
1469 err = 0;
1471 BR_INPUT_SKB_CB(skb)->igmp = 1;
1473 switch (icmp6_type) {
1474 case ICMPV6_MGM_REPORT:
1476 struct mld_msg *mld;
1477 if (!pskb_may_pull(skb2, sizeof(*mld))) {
1478 err = -EINVAL;
1479 goto out;
1481 mld = (struct mld_msg *)skb_transport_header(skb2);
1482 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1483 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca);
1484 break;
1486 case ICMPV6_MLD2_REPORT:
1487 err = br_ip6_multicast_mld2_report(br, port, skb2);
1488 break;
1489 case ICMPV6_MGM_QUERY:
1490 err = br_ip6_multicast_query(br, port, skb2);
1491 break;
1492 case ICMPV6_MGM_REDUCTION:
1494 struct mld_msg *mld;
1495 if (!pskb_may_pull(skb2, sizeof(*mld))) {
1496 err = -EINVAL;
1497 goto out;
1499 mld = (struct mld_msg *)skb_transport_header(skb2);
1500 br_ip6_multicast_leave_group(br, port, &mld->mld_mca);
1504 out:
1505 kfree_skb(skb2);
1506 return err;
1508 #endif
1510 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1511 struct sk_buff *skb)
1513 BR_INPUT_SKB_CB(skb)->igmp = 0;
1514 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
1516 if (br->multicast_disabled)
1517 return 0;
1519 switch (skb->protocol) {
1520 case htons(ETH_P_IP):
1521 return br_multicast_ipv4_rcv(br, port, skb);
1522 #if IS_ENABLED(CONFIG_IPV6)
1523 case htons(ETH_P_IPV6):
1524 return br_multicast_ipv6_rcv(br, port, skb);
1525 #endif
1528 return 0;
1531 static void br_multicast_query_expired(unsigned long data)
1533 struct net_bridge *br = (void *)data;
1535 spin_lock(&br->multicast_lock);
1536 if (br->multicast_startup_queries_sent <
1537 br->multicast_startup_query_count)
1538 br->multicast_startup_queries_sent++;
1540 br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent);
1542 spin_unlock(&br->multicast_lock);
1545 void br_multicast_init(struct net_bridge *br)
1547 br->hash_elasticity = 4;
1548 br->hash_max = 512;
1550 br->multicast_router = 1;
1551 br->multicast_last_member_count = 2;
1552 br->multicast_startup_query_count = 2;
1554 br->multicast_last_member_interval = HZ;
1555 br->multicast_query_response_interval = 10 * HZ;
1556 br->multicast_startup_query_interval = 125 * HZ / 4;
1557 br->multicast_query_interval = 125 * HZ;
1558 br->multicast_querier_interval = 255 * HZ;
1559 br->multicast_membership_interval = 260 * HZ;
1561 spin_lock_init(&br->multicast_lock);
1562 setup_timer(&br->multicast_router_timer,
1563 br_multicast_local_router_expired, 0);
1564 setup_timer(&br->multicast_querier_timer,
1565 br_multicast_local_router_expired, 0);
1566 setup_timer(&br->multicast_query_timer, br_multicast_query_expired,
1567 (unsigned long)br);
1570 void br_multicast_open(struct net_bridge *br)
1572 br->multicast_startup_queries_sent = 0;
1574 if (br->multicast_disabled)
1575 return;
1577 mod_timer(&br->multicast_query_timer, jiffies);
1580 void br_multicast_stop(struct net_bridge *br)
1582 struct net_bridge_mdb_htable *mdb;
1583 struct net_bridge_mdb_entry *mp;
1584 struct hlist_node *p, *n;
1585 u32 ver;
1586 int i;
1588 del_timer_sync(&br->multicast_router_timer);
1589 del_timer_sync(&br->multicast_querier_timer);
1590 del_timer_sync(&br->multicast_query_timer);
1592 spin_lock_bh(&br->multicast_lock);
1593 mdb = mlock_dereference(br->mdb, br);
1594 if (!mdb)
1595 goto out;
1597 br->mdb = NULL;
1599 ver = mdb->ver;
1600 for (i = 0; i < mdb->max; i++) {
1601 hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i],
1602 hlist[ver]) {
1603 del_timer(&mp->timer);
1604 call_rcu_bh(&mp->rcu, br_multicast_free_group);
1608 if (mdb->old) {
1609 spin_unlock_bh(&br->multicast_lock);
1610 rcu_barrier_bh();
1611 spin_lock_bh(&br->multicast_lock);
1612 WARN_ON(mdb->old);
1615 mdb->old = mdb;
1616 call_rcu_bh(&mdb->rcu, br_mdb_free);
1618 out:
1619 spin_unlock_bh(&br->multicast_lock);
1622 int br_multicast_set_router(struct net_bridge *br, unsigned long val)
1624 int err = -ENOENT;
1626 spin_lock_bh(&br->multicast_lock);
1627 if (!netif_running(br->dev))
1628 goto unlock;
1630 switch (val) {
1631 case 0:
1632 case 2:
1633 del_timer(&br->multicast_router_timer);
1634 /* fall through */
1635 case 1:
1636 br->multicast_router = val;
1637 err = 0;
1638 break;
1640 default:
1641 err = -EINVAL;
1642 break;
1645 unlock:
1646 spin_unlock_bh(&br->multicast_lock);
1648 return err;
1651 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
1653 struct net_bridge *br = p->br;
1654 int err = -ENOENT;
1656 spin_lock(&br->multicast_lock);
1657 if (!netif_running(br->dev) || p->state == BR_STATE_DISABLED)
1658 goto unlock;
1660 switch (val) {
1661 case 0:
1662 case 1:
1663 case 2:
1664 p->multicast_router = val;
1665 err = 0;
1667 if (val < 2 && !hlist_unhashed(&p->rlist))
1668 hlist_del_init_rcu(&p->rlist);
1670 if (val == 1)
1671 break;
1673 del_timer(&p->multicast_router_timer);
1675 if (val == 0)
1676 break;
1678 br_multicast_add_router(br, p);
1679 break;
1681 default:
1682 err = -EINVAL;
1683 break;
1686 unlock:
1687 spin_unlock(&br->multicast_lock);
1689 return err;
1692 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
1694 struct net_bridge_port *port;
1695 int err = 0;
1696 struct net_bridge_mdb_htable *mdb;
1698 spin_lock_bh(&br->multicast_lock);
1699 if (br->multicast_disabled == !val)
1700 goto unlock;
1702 br->multicast_disabled = !val;
1703 if (br->multicast_disabled)
1704 goto unlock;
1706 if (!netif_running(br->dev))
1707 goto unlock;
1709 mdb = mlock_dereference(br->mdb, br);
1710 if (mdb) {
1711 if (mdb->old) {
1712 err = -EEXIST;
1713 rollback:
1714 br->multicast_disabled = !!val;
1715 goto unlock;
1718 err = br_mdb_rehash(&br->mdb, mdb->max,
1719 br->hash_elasticity);
1720 if (err)
1721 goto rollback;
1724 br_multicast_open(br);
1725 list_for_each_entry(port, &br->port_list, list) {
1726 if (port->state == BR_STATE_DISABLED ||
1727 port->state == BR_STATE_BLOCKING)
1728 continue;
1730 __br_multicast_enable_port(port);
1733 unlock:
1734 spin_unlock_bh(&br->multicast_lock);
1736 return err;
1739 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
1741 int err = -ENOENT;
1742 u32 old;
1743 struct net_bridge_mdb_htable *mdb;
1745 spin_lock(&br->multicast_lock);
1746 if (!netif_running(br->dev))
1747 goto unlock;
1749 err = -EINVAL;
1750 if (!is_power_of_2(val))
1751 goto unlock;
1753 mdb = mlock_dereference(br->mdb, br);
1754 if (mdb && val < mdb->size)
1755 goto unlock;
1757 err = 0;
1759 old = br->hash_max;
1760 br->hash_max = val;
1762 if (mdb) {
1763 if (mdb->old) {
1764 err = -EEXIST;
1765 rollback:
1766 br->hash_max = old;
1767 goto unlock;
1770 err = br_mdb_rehash(&br->mdb, br->hash_max,
1771 br->hash_elasticity);
1772 if (err)
1773 goto rollback;
1776 unlock:
1777 spin_unlock(&br->multicast_lock);
1779 return err;