1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Bridge multicast support.
5 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
9 #include <linux/export.h>
10 #include <linux/if_ether.h>
11 #include <linux/igmp.h>
13 #include <linux/jhash.h>
14 #include <linux/kernel.h>
15 #include <linux/log2.h>
16 #include <linux/netdevice.h>
17 #include <linux/netfilter_bridge.h>
18 #include <linux/random.h>
19 #include <linux/rculist.h>
20 #include <linux/skbuff.h>
21 #include <linux/slab.h>
22 #include <linux/timer.h>
23 #include <linux/inetdevice.h>
24 #include <linux/mroute.h>
26 #include <net/switchdev.h>
27 #if IS_ENABLED(CONFIG_IPV6)
28 #include <linux/icmpv6.h>
31 #include <net/ip6_checksum.h>
32 #include <net/addrconf.h>
35 #include "br_private.h"
37 static const struct rhashtable_params br_mdb_rht_params
= {
38 .head_offset
= offsetof(struct net_bridge_mdb_entry
, rhnode
),
39 .key_offset
= offsetof(struct net_bridge_mdb_entry
, addr
),
40 .key_len
= sizeof(struct br_ip
),
41 .automatic_shrinking
= true,
44 static const struct rhashtable_params br_sg_port_rht_params
= {
45 .head_offset
= offsetof(struct net_bridge_port_group
, rhnode
),
46 .key_offset
= offsetof(struct net_bridge_port_group
, key
),
47 .key_len
= sizeof(struct net_bridge_port_group_sg_key
),
48 .automatic_shrinking
= true,
51 static void br_multicast_start_querier(struct net_bridge
*br
,
52 struct bridge_mcast_own_query
*query
);
53 static void br_multicast_add_router(struct net_bridge
*br
,
54 struct net_bridge_port
*port
);
55 static void br_ip4_multicast_leave_group(struct net_bridge
*br
,
56 struct net_bridge_port
*port
,
59 const unsigned char *src
);
60 static void br_multicast_port_group_rexmit(struct timer_list
*t
);
62 static void __del_port_router(struct net_bridge_port
*p
);
63 #if IS_ENABLED(CONFIG_IPV6)
64 static void br_ip6_multicast_leave_group(struct net_bridge
*br
,
65 struct net_bridge_port
*port
,
66 const struct in6_addr
*group
,
67 __u16 vid
, const unsigned char *src
);
69 static struct net_bridge_port_group
*
70 __br_multicast_add_group(struct net_bridge
*br
,
71 struct net_bridge_port
*port
,
73 const unsigned char *src
,
77 static void br_multicast_find_del_pg(struct net_bridge
*br
,
78 struct net_bridge_port_group
*pg
);
80 static struct net_bridge_port_group
*
81 br_sg_port_find(struct net_bridge
*br
,
82 struct net_bridge_port_group_sg_key
*sg_p
)
84 lockdep_assert_held_once(&br
->multicast_lock
);
86 return rhashtable_lookup_fast(&br
->sg_port_tbl
, sg_p
,
87 br_sg_port_rht_params
);
90 static struct net_bridge_mdb_entry
*br_mdb_ip_get_rcu(struct net_bridge
*br
,
93 return rhashtable_lookup(&br
->mdb_hash_tbl
, dst
, br_mdb_rht_params
);
96 struct net_bridge_mdb_entry
*br_mdb_ip_get(struct net_bridge
*br
,
99 struct net_bridge_mdb_entry
*ent
;
101 lockdep_assert_held_once(&br
->multicast_lock
);
104 ent
= rhashtable_lookup(&br
->mdb_hash_tbl
, dst
, br_mdb_rht_params
);
110 static struct net_bridge_mdb_entry
*br_mdb_ip4_get(struct net_bridge
*br
,
111 __be32 dst
, __u16 vid
)
115 memset(&br_dst
, 0, sizeof(br_dst
));
116 br_dst
.dst
.ip4
= dst
;
117 br_dst
.proto
= htons(ETH_P_IP
);
120 return br_mdb_ip_get(br
, &br_dst
);
123 #if IS_ENABLED(CONFIG_IPV6)
124 static struct net_bridge_mdb_entry
*br_mdb_ip6_get(struct net_bridge
*br
,
125 const struct in6_addr
*dst
,
130 memset(&br_dst
, 0, sizeof(br_dst
));
131 br_dst
.dst
.ip6
= *dst
;
132 br_dst
.proto
= htons(ETH_P_IPV6
);
135 return br_mdb_ip_get(br
, &br_dst
);
139 struct net_bridge_mdb_entry
*br_mdb_get(struct net_bridge
*br
,
140 struct sk_buff
*skb
, u16 vid
)
144 if (!br_opt_get(br
, BROPT_MULTICAST_ENABLED
))
147 if (BR_INPUT_SKB_CB(skb
)->igmp
)
150 memset(&ip
, 0, sizeof(ip
));
151 ip
.proto
= skb
->protocol
;
154 switch (skb
->protocol
) {
155 case htons(ETH_P_IP
):
156 ip
.dst
.ip4
= ip_hdr(skb
)->daddr
;
157 if (br
->multicast_igmp_version
== 3) {
158 struct net_bridge_mdb_entry
*mdb
;
160 ip
.src
.ip4
= ip_hdr(skb
)->saddr
;
161 mdb
= br_mdb_ip_get_rcu(br
, &ip
);
167 #if IS_ENABLED(CONFIG_IPV6)
168 case htons(ETH_P_IPV6
):
169 ip
.dst
.ip6
= ipv6_hdr(skb
)->daddr
;
170 if (br
->multicast_mld_version
== 2) {
171 struct net_bridge_mdb_entry
*mdb
;
173 ip
.src
.ip6
= ipv6_hdr(skb
)->saddr
;
174 mdb
= br_mdb_ip_get_rcu(br
, &ip
);
177 memset(&ip
.src
.ip6
, 0, sizeof(ip
.src
.ip6
));
183 ether_addr_copy(ip
.dst
.mac_addr
, eth_hdr(skb
)->h_dest
);
186 return br_mdb_ip_get_rcu(br
, &ip
);
189 static bool br_port_group_equal(struct net_bridge_port_group
*p
,
190 struct net_bridge_port
*port
,
191 const unsigned char *src
)
193 if (p
->key
.port
!= port
)
196 if (!(port
->flags
& BR_MULTICAST_TO_UNICAST
))
199 return ether_addr_equal(src
, p
->eth_addr
);
202 static void __fwd_add_star_excl(struct net_bridge_port_group
*pg
,
205 struct net_bridge_port_group_sg_key sg_key
;
206 struct net_bridge
*br
= pg
->key
.port
->br
;
207 struct net_bridge_port_group
*src_pg
;
209 memset(&sg_key
, 0, sizeof(sg_key
));
210 sg_key
.port
= pg
->key
.port
;
211 sg_key
.addr
= *sg_ip
;
212 if (br_sg_port_find(br
, &sg_key
))
215 src_pg
= __br_multicast_add_group(br
, pg
->key
.port
, sg_ip
, pg
->eth_addr
,
216 MCAST_INCLUDE
, false, false);
217 if (IS_ERR_OR_NULL(src_pg
) ||
218 src_pg
->rt_protocol
!= RTPROT_KERNEL
)
221 src_pg
->flags
|= MDB_PG_FLAGS_STAR_EXCL
;
224 static void __fwd_del_star_excl(struct net_bridge_port_group
*pg
,
227 struct net_bridge_port_group_sg_key sg_key
;
228 struct net_bridge
*br
= pg
->key
.port
->br
;
229 struct net_bridge_port_group
*src_pg
;
231 memset(&sg_key
, 0, sizeof(sg_key
));
232 sg_key
.port
= pg
->key
.port
;
233 sg_key
.addr
= *sg_ip
;
234 src_pg
= br_sg_port_find(br
, &sg_key
);
235 if (!src_pg
|| !(src_pg
->flags
& MDB_PG_FLAGS_STAR_EXCL
) ||
236 src_pg
->rt_protocol
!= RTPROT_KERNEL
)
239 br_multicast_find_del_pg(br
, src_pg
);
242 /* When a port group transitions to (or is added as) EXCLUDE we need to add it
243 * to all other ports' S,G entries which are not blocked by the current group
244 * for proper replication, the assumption is that any S,G blocked entries
245 * are already added so the S,G,port lookup should skip them.
246 * When a port group transitions from EXCLUDE -> INCLUDE mode or is being
247 * deleted we need to remove it from all ports' S,G entries where it was
248 * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL).
250 void br_multicast_star_g_handle_mode(struct net_bridge_port_group
*pg
,
253 struct net_bridge
*br
= pg
->key
.port
->br
;
254 struct net_bridge_port_group
*pg_lst
;
255 struct net_bridge_mdb_entry
*mp
;
258 if (WARN_ON(!br_multicast_is_star_g(&pg
->key
.addr
)))
261 mp
= br_mdb_ip_get(br
, &pg
->key
.addr
);
265 memset(&sg_ip
, 0, sizeof(sg_ip
));
266 sg_ip
= pg
->key
.addr
;
267 for (pg_lst
= mlock_dereference(mp
->ports
, br
);
269 pg_lst
= mlock_dereference(pg_lst
->next
, br
)) {
270 struct net_bridge_group_src
*src_ent
;
274 hlist_for_each_entry(src_ent
, &pg_lst
->src_list
, node
) {
275 if (!(src_ent
->flags
& BR_SGRP_F_INSTALLED
))
277 sg_ip
.src
= src_ent
->addr
.src
;
278 switch (filter_mode
) {
280 __fwd_del_star_excl(pg
, &sg_ip
);
283 __fwd_add_star_excl(pg
, &sg_ip
);
290 /* called when adding a new S,G with host_joined == false by default */
291 static void br_multicast_sg_host_state(struct net_bridge_mdb_entry
*star_mp
,
292 struct net_bridge_port_group
*sg
)
294 struct net_bridge_mdb_entry
*sg_mp
;
296 if (WARN_ON(!br_multicast_is_star_g(&star_mp
->addr
)))
298 if (!star_mp
->host_joined
)
301 sg_mp
= br_mdb_ip_get(star_mp
->br
, &sg
->key
.addr
);
304 sg_mp
->host_joined
= true;
307 /* set the host_joined state of all of *,G's S,G entries */
308 static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry
*star_mp
)
310 struct net_bridge
*br
= star_mp
->br
;
311 struct net_bridge_mdb_entry
*sg_mp
;
312 struct net_bridge_port_group
*pg
;
315 if (WARN_ON(!br_multicast_is_star_g(&star_mp
->addr
)))
318 memset(&sg_ip
, 0, sizeof(sg_ip
));
319 sg_ip
= star_mp
->addr
;
320 for (pg
= mlock_dereference(star_mp
->ports
, br
);
322 pg
= mlock_dereference(pg
->next
, br
)) {
323 struct net_bridge_group_src
*src_ent
;
325 hlist_for_each_entry(src_ent
, &pg
->src_list
, node
) {
326 if (!(src_ent
->flags
& BR_SGRP_F_INSTALLED
))
328 sg_ip
.src
= src_ent
->addr
.src
;
329 sg_mp
= br_mdb_ip_get(br
, &sg_ip
);
332 sg_mp
->host_joined
= star_mp
->host_joined
;
337 static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry
*sgmp
)
339 struct net_bridge_port_group __rcu
**pp
;
340 struct net_bridge_port_group
*p
;
342 /* *,G exclude ports are only added to S,G entries */
343 if (WARN_ON(br_multicast_is_star_g(&sgmp
->addr
)))
346 /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports
347 * we should ignore perm entries since they're managed by user-space
349 for (pp
= &sgmp
->ports
;
350 (p
= mlock_dereference(*pp
, sgmp
->br
)) != NULL
;
352 if (!(p
->flags
& (MDB_PG_FLAGS_STAR_EXCL
|
353 MDB_PG_FLAGS_PERMANENT
)))
356 /* currently the host can only have joined the *,G which means
357 * we treat it as EXCLUDE {}, so for an S,G it's considered a
358 * STAR_EXCLUDE entry and we can safely leave it
360 sgmp
->host_joined
= false;
362 for (pp
= &sgmp
->ports
;
363 (p
= mlock_dereference(*pp
, sgmp
->br
)) != NULL
;) {
364 if (!(p
->flags
& MDB_PG_FLAGS_PERMANENT
))
365 br_multicast_del_pg(sgmp
, p
, pp
);
371 void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry
*star_mp
,
372 struct net_bridge_port_group
*sg
)
374 struct net_bridge_port_group_sg_key sg_key
;
375 struct net_bridge
*br
= star_mp
->br
;
376 struct net_bridge_port_group
*pg
;
378 if (WARN_ON(br_multicast_is_star_g(&sg
->key
.addr
)))
380 if (WARN_ON(!br_multicast_is_star_g(&star_mp
->addr
)))
383 br_multicast_sg_host_state(star_mp
, sg
);
384 memset(&sg_key
, 0, sizeof(sg_key
));
385 sg_key
.addr
= sg
->key
.addr
;
386 /* we need to add all exclude ports to the S,G */
387 for (pg
= mlock_dereference(star_mp
->ports
, br
);
389 pg
= mlock_dereference(pg
->next
, br
)) {
390 struct net_bridge_port_group
*src_pg
;
392 if (pg
== sg
|| pg
->filter_mode
== MCAST_INCLUDE
)
395 sg_key
.port
= pg
->key
.port
;
396 if (br_sg_port_find(br
, &sg_key
))
399 src_pg
= __br_multicast_add_group(br
, pg
->key
.port
,
402 MCAST_INCLUDE
, false, false);
403 if (IS_ERR_OR_NULL(src_pg
) ||
404 src_pg
->rt_protocol
!= RTPROT_KERNEL
)
406 src_pg
->flags
|= MDB_PG_FLAGS_STAR_EXCL
;
410 static void br_multicast_fwd_src_add(struct net_bridge_group_src
*src
)
412 struct net_bridge_mdb_entry
*star_mp
;
413 struct net_bridge_port_group
*sg
;
416 if (src
->flags
& BR_SGRP_F_INSTALLED
)
419 memset(&sg_ip
, 0, sizeof(sg_ip
));
420 sg_ip
= src
->pg
->key
.addr
;
421 sg_ip
.src
= src
->addr
.src
;
422 sg
= __br_multicast_add_group(src
->br
, src
->pg
->key
.port
, &sg_ip
,
423 src
->pg
->eth_addr
, MCAST_INCLUDE
, false,
424 !timer_pending(&src
->timer
));
425 if (IS_ERR_OR_NULL(sg
))
427 src
->flags
|= BR_SGRP_F_INSTALLED
;
428 sg
->flags
&= ~MDB_PG_FLAGS_STAR_EXCL
;
430 /* if it was added by user-space as perm we can skip next steps */
431 if (sg
->rt_protocol
!= RTPROT_KERNEL
&&
432 (sg
->flags
& MDB_PG_FLAGS_PERMANENT
))
435 /* the kernel is now responsible for removing this S,G */
436 del_timer(&sg
->timer
);
437 star_mp
= br_mdb_ip_get(src
->br
, &src
->pg
->key
.addr
);
441 br_multicast_sg_add_exclude_ports(star_mp
, sg
);
444 static void br_multicast_fwd_src_remove(struct net_bridge_group_src
*src
)
446 struct net_bridge_port_group
*p
, *pg
= src
->pg
;
447 struct net_bridge_port_group __rcu
**pp
;
448 struct net_bridge_mdb_entry
*mp
;
451 memset(&sg_ip
, 0, sizeof(sg_ip
));
452 sg_ip
= pg
->key
.addr
;
453 sg_ip
.src
= src
->addr
.src
;
455 mp
= br_mdb_ip_get(src
->br
, &sg_ip
);
459 for (pp
= &mp
->ports
;
460 (p
= mlock_dereference(*pp
, src
->br
)) != NULL
;
462 if (!br_port_group_equal(p
, pg
->key
.port
, pg
->eth_addr
))
465 if (p
->rt_protocol
!= RTPROT_KERNEL
&&
466 (p
->flags
& MDB_PG_FLAGS_PERMANENT
))
469 br_multicast_del_pg(mp
, p
, pp
);
472 src
->flags
&= ~BR_SGRP_F_INSTALLED
;
475 /* install S,G and based on src's timer enable or disable forwarding */
476 static void br_multicast_fwd_src_handle(struct net_bridge_group_src
*src
)
478 struct net_bridge_port_group_sg_key sg_key
;
479 struct net_bridge_port_group
*sg
;
482 br_multicast_fwd_src_add(src
);
484 memset(&sg_key
, 0, sizeof(sg_key
));
485 sg_key
.addr
= src
->pg
->key
.addr
;
486 sg_key
.addr
.src
= src
->addr
.src
;
487 sg_key
.port
= src
->pg
->key
.port
;
489 sg
= br_sg_port_find(src
->br
, &sg_key
);
490 if (!sg
|| (sg
->flags
& MDB_PG_FLAGS_PERMANENT
))
493 old_flags
= sg
->flags
;
494 if (timer_pending(&src
->timer
))
495 sg
->flags
&= ~MDB_PG_FLAGS_BLOCKED
;
497 sg
->flags
|= MDB_PG_FLAGS_BLOCKED
;
499 if (old_flags
!= sg
->flags
) {
500 struct net_bridge_mdb_entry
*sg_mp
;
502 sg_mp
= br_mdb_ip_get(src
->br
, &sg_key
.addr
);
505 br_mdb_notify(src
->br
->dev
, sg_mp
, sg
, RTM_NEWMDB
);
509 static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc
*gc
)
511 struct net_bridge_mdb_entry
*mp
;
513 mp
= container_of(gc
, struct net_bridge_mdb_entry
, mcast_gc
);
514 WARN_ON(!hlist_unhashed(&mp
->mdb_node
));
517 del_timer_sync(&mp
->timer
);
521 static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry
*mp
)
523 struct net_bridge
*br
= mp
->br
;
525 rhashtable_remove_fast(&br
->mdb_hash_tbl
, &mp
->rhnode
,
527 hlist_del_init_rcu(&mp
->mdb_node
);
528 hlist_add_head(&mp
->mcast_gc
.gc_node
, &br
->mcast_gc_list
);
529 queue_work(system_long_wq
, &br
->mcast_gc_work
);
532 static void br_multicast_group_expired(struct timer_list
*t
)
534 struct net_bridge_mdb_entry
*mp
= from_timer(mp
, t
, timer
);
535 struct net_bridge
*br
= mp
->br
;
537 spin_lock(&br
->multicast_lock
);
538 if (hlist_unhashed(&mp
->mdb_node
) || !netif_running(br
->dev
) ||
539 timer_pending(&mp
->timer
))
542 br_multicast_host_leave(mp
, true);
546 br_multicast_del_mdb_entry(mp
);
548 spin_unlock(&br
->multicast_lock
);
551 static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc
*gc
)
553 struct net_bridge_group_src
*src
;
555 src
= container_of(gc
, struct net_bridge_group_src
, mcast_gc
);
556 WARN_ON(!hlist_unhashed(&src
->node
));
558 del_timer_sync(&src
->timer
);
562 static void br_multicast_del_group_src(struct net_bridge_group_src
*src
)
564 struct net_bridge
*br
= src
->pg
->key
.port
->br
;
566 br_multicast_fwd_src_remove(src
);
567 hlist_del_init_rcu(&src
->node
);
569 hlist_add_head(&src
->mcast_gc
.gc_node
, &br
->mcast_gc_list
);
570 queue_work(system_long_wq
, &br
->mcast_gc_work
);
573 static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc
*gc
)
575 struct net_bridge_port_group
*pg
;
577 pg
= container_of(gc
, struct net_bridge_port_group
, mcast_gc
);
578 WARN_ON(!hlist_unhashed(&pg
->mglist
));
579 WARN_ON(!hlist_empty(&pg
->src_list
));
581 del_timer_sync(&pg
->rexmit_timer
);
582 del_timer_sync(&pg
->timer
);
586 void br_multicast_del_pg(struct net_bridge_mdb_entry
*mp
,
587 struct net_bridge_port_group
*pg
,
588 struct net_bridge_port_group __rcu
**pp
)
590 struct net_bridge
*br
= pg
->key
.port
->br
;
591 struct net_bridge_group_src
*ent
;
592 struct hlist_node
*tmp
;
594 rcu_assign_pointer(*pp
, pg
->next
);
595 hlist_del_init(&pg
->mglist
);
596 hlist_for_each_entry_safe(ent
, tmp
, &pg
->src_list
, node
)
597 br_multicast_del_group_src(ent
);
598 br_mdb_notify(br
->dev
, mp
, pg
, RTM_DELMDB
);
599 if (!br_multicast_is_star_g(&mp
->addr
)) {
600 rhashtable_remove_fast(&br
->sg_port_tbl
, &pg
->rhnode
,
601 br_sg_port_rht_params
);
602 br_multicast_sg_del_exclude_ports(mp
);
604 br_multicast_star_g_handle_mode(pg
, MCAST_INCLUDE
);
606 hlist_add_head(&pg
->mcast_gc
.gc_node
, &br
->mcast_gc_list
);
607 queue_work(system_long_wq
, &br
->mcast_gc_work
);
609 if (!mp
->ports
&& !mp
->host_joined
&& netif_running(br
->dev
))
610 mod_timer(&mp
->timer
, jiffies
);
613 static void br_multicast_find_del_pg(struct net_bridge
*br
,
614 struct net_bridge_port_group
*pg
)
616 struct net_bridge_port_group __rcu
**pp
;
617 struct net_bridge_mdb_entry
*mp
;
618 struct net_bridge_port_group
*p
;
620 mp
= br_mdb_ip_get(br
, &pg
->key
.addr
);
624 for (pp
= &mp
->ports
;
625 (p
= mlock_dereference(*pp
, br
)) != NULL
;
630 br_multicast_del_pg(mp
, pg
, pp
);
637 static void br_multicast_port_group_expired(struct timer_list
*t
)
639 struct net_bridge_port_group
*pg
= from_timer(pg
, t
, timer
);
640 struct net_bridge_group_src
*src_ent
;
641 struct net_bridge
*br
= pg
->key
.port
->br
;
642 struct hlist_node
*tmp
;
645 spin_lock(&br
->multicast_lock
);
646 if (!netif_running(br
->dev
) || timer_pending(&pg
->timer
) ||
647 hlist_unhashed(&pg
->mglist
) || pg
->flags
& MDB_PG_FLAGS_PERMANENT
)
650 changed
= !!(pg
->filter_mode
== MCAST_EXCLUDE
);
651 pg
->filter_mode
= MCAST_INCLUDE
;
652 hlist_for_each_entry_safe(src_ent
, tmp
, &pg
->src_list
, node
) {
653 if (!timer_pending(&src_ent
->timer
)) {
654 br_multicast_del_group_src(src_ent
);
659 if (hlist_empty(&pg
->src_list
)) {
660 br_multicast_find_del_pg(br
, pg
);
661 } else if (changed
) {
662 struct net_bridge_mdb_entry
*mp
= br_mdb_ip_get(br
, &pg
->key
.addr
);
664 if (changed
&& br_multicast_is_star_g(&pg
->key
.addr
))
665 br_multicast_star_g_handle_mode(pg
, MCAST_INCLUDE
);
669 br_mdb_notify(br
->dev
, mp
, pg
, RTM_NEWMDB
);
672 spin_unlock(&br
->multicast_lock
);
675 static void br_multicast_gc(struct hlist_head
*head
)
677 struct net_bridge_mcast_gc
*gcent
;
678 struct hlist_node
*tmp
;
680 hlist_for_each_entry_safe(gcent
, tmp
, head
, gc_node
) {
681 hlist_del_init(&gcent
->gc_node
);
682 gcent
->destroy(gcent
);
686 static struct sk_buff
*br_ip4_multicast_alloc_query(struct net_bridge
*br
,
687 struct net_bridge_port_group
*pg
,
688 __be32 ip_dst
, __be32 group
,
689 bool with_srcs
, bool over_lmqt
,
690 u8 sflag
, u8
*igmp_type
,
693 struct net_bridge_port
*p
= pg
? pg
->key
.port
: NULL
;
694 struct net_bridge_group_src
*ent
;
695 size_t pkt_size
, igmp_hdr_size
;
696 unsigned long now
= jiffies
;
697 struct igmpv3_query
*ihv3
;
698 void *csum_start
= NULL
;
699 __sum16
*csum
= NULL
;
707 igmp_hdr_size
= sizeof(*ih
);
708 if (br
->multicast_igmp_version
== 3) {
709 igmp_hdr_size
= sizeof(*ihv3
);
710 if (pg
&& with_srcs
) {
711 lmqt
= now
+ (br
->multicast_last_member_interval
*
712 br
->multicast_last_member_count
);
713 hlist_for_each_entry(ent
, &pg
->src_list
, node
) {
714 if (over_lmqt
== time_after(ent
->timer
.expires
,
716 ent
->src_query_rexmit_cnt
> 0)
722 igmp_hdr_size
+= lmqt_srcs
* sizeof(__be32
);
726 pkt_size
= sizeof(*eth
) + sizeof(*iph
) + 4 + igmp_hdr_size
;
727 if ((p
&& pkt_size
> p
->dev
->mtu
) ||
728 pkt_size
> br
->dev
->mtu
)
731 skb
= netdev_alloc_skb_ip_align(br
->dev
, pkt_size
);
735 skb
->protocol
= htons(ETH_P_IP
);
737 skb_reset_mac_header(skb
);
740 ether_addr_copy(eth
->h_source
, br
->dev
->dev_addr
);
741 ip_eth_mc_map(ip_dst
, eth
->h_dest
);
742 eth
->h_proto
= htons(ETH_P_IP
);
743 skb_put(skb
, sizeof(*eth
));
745 skb_set_network_header(skb
, skb
->len
);
747 iph
->tot_len
= htons(pkt_size
- sizeof(*eth
));
753 iph
->frag_off
= htons(IP_DF
);
755 iph
->protocol
= IPPROTO_IGMP
;
756 iph
->saddr
= br_opt_get(br
, BROPT_MULTICAST_QUERY_USE_IFADDR
) ?
757 inet_select_addr(br
->dev
, 0, RT_SCOPE_LINK
) : 0;
759 ((u8
*)&iph
[1])[0] = IPOPT_RA
;
760 ((u8
*)&iph
[1])[1] = 4;
761 ((u8
*)&iph
[1])[2] = 0;
762 ((u8
*)&iph
[1])[3] = 0;
766 skb_set_transport_header(skb
, skb
->len
);
767 *igmp_type
= IGMP_HOST_MEMBERSHIP_QUERY
;
769 switch (br
->multicast_igmp_version
) {
772 ih
->type
= IGMP_HOST_MEMBERSHIP_QUERY
;
773 ih
->code
= (group
? br
->multicast_last_member_interval
:
774 br
->multicast_query_response_interval
) /
775 (HZ
/ IGMP_TIMER_SCALE
);
779 csum_start
= (void *)ih
;
782 ihv3
= igmpv3_query_hdr(skb
);
783 ihv3
->type
= IGMP_HOST_MEMBERSHIP_QUERY
;
784 ihv3
->code
= (group
? br
->multicast_last_member_interval
:
785 br
->multicast_query_response_interval
) /
786 (HZ
/ IGMP_TIMER_SCALE
);
788 ihv3
->qqic
= br
->multicast_query_interval
/ HZ
;
789 ihv3
->nsrcs
= htons(lmqt_srcs
);
791 ihv3
->suppress
= sflag
;
795 csum_start
= (void *)ihv3
;
796 if (!pg
|| !with_srcs
)
800 hlist_for_each_entry(ent
, &pg
->src_list
, node
) {
801 if (over_lmqt
== time_after(ent
->timer
.expires
,
803 ent
->src_query_rexmit_cnt
> 0) {
804 ihv3
->srcs
[lmqt_srcs
++] = ent
->addr
.src
.ip4
;
805 ent
->src_query_rexmit_cnt
--;
806 if (need_rexmit
&& ent
->src_query_rexmit_cnt
)
810 if (WARN_ON(lmqt_srcs
!= ntohs(ihv3
->nsrcs
))) {
817 if (WARN_ON(!csum
|| !csum_start
)) {
822 *csum
= ip_compute_csum(csum_start
, igmp_hdr_size
);
823 skb_put(skb
, igmp_hdr_size
);
824 __skb_pull(skb
, sizeof(*eth
));
830 #if IS_ENABLED(CONFIG_IPV6)
831 static struct sk_buff
*br_ip6_multicast_alloc_query(struct net_bridge
*br
,
832 struct net_bridge_port_group
*pg
,
833 const struct in6_addr
*ip6_dst
,
834 const struct in6_addr
*group
,
835 bool with_srcs
, bool over_llqt
,
836 u8 sflag
, u8
*igmp_type
,
839 struct net_bridge_port
*p
= pg
? pg
->key
.port
: NULL
;
840 struct net_bridge_group_src
*ent
;
841 size_t pkt_size
, mld_hdr_size
;
842 unsigned long now
= jiffies
;
843 struct mld2_query
*mld2q
;
844 void *csum_start
= NULL
;
845 unsigned long interval
;
846 __sum16
*csum
= NULL
;
847 struct ipv6hdr
*ip6h
;
848 struct mld_msg
*mldq
;
855 mld_hdr_size
= sizeof(*mldq
);
856 if (br
->multicast_mld_version
== 2) {
857 mld_hdr_size
= sizeof(*mld2q
);
858 if (pg
&& with_srcs
) {
859 llqt
= now
+ (br
->multicast_last_member_interval
*
860 br
->multicast_last_member_count
);
861 hlist_for_each_entry(ent
, &pg
->src_list
, node
) {
862 if (over_llqt
== time_after(ent
->timer
.expires
,
864 ent
->src_query_rexmit_cnt
> 0)
870 mld_hdr_size
+= llqt_srcs
* sizeof(struct in6_addr
);
874 pkt_size
= sizeof(*eth
) + sizeof(*ip6h
) + 8 + mld_hdr_size
;
875 if ((p
&& pkt_size
> p
->dev
->mtu
) ||
876 pkt_size
> br
->dev
->mtu
)
879 skb
= netdev_alloc_skb_ip_align(br
->dev
, pkt_size
);
883 skb
->protocol
= htons(ETH_P_IPV6
);
885 /* Ethernet header */
886 skb_reset_mac_header(skb
);
889 ether_addr_copy(eth
->h_source
, br
->dev
->dev_addr
);
890 eth
->h_proto
= htons(ETH_P_IPV6
);
891 skb_put(skb
, sizeof(*eth
));
893 /* IPv6 header + HbH option */
894 skb_set_network_header(skb
, skb
->len
);
895 ip6h
= ipv6_hdr(skb
);
897 *(__force __be32
*)ip6h
= htonl(0x60000000);
898 ip6h
->payload_len
= htons(8 + mld_hdr_size
);
899 ip6h
->nexthdr
= IPPROTO_HOPOPTS
;
901 ip6h
->daddr
= *ip6_dst
;
902 if (ipv6_dev_get_saddr(dev_net(br
->dev
), br
->dev
, &ip6h
->daddr
, 0,
905 br_opt_toggle(br
, BROPT_HAS_IPV6_ADDR
, false);
909 br_opt_toggle(br
, BROPT_HAS_IPV6_ADDR
, true);
910 ipv6_eth_mc_map(&ip6h
->daddr
, eth
->h_dest
);
912 hopopt
= (u8
*)(ip6h
+ 1);
913 hopopt
[0] = IPPROTO_ICMPV6
; /* next hdr */
914 hopopt
[1] = 0; /* length of HbH */
915 hopopt
[2] = IPV6_TLV_ROUTERALERT
; /* Router Alert */
916 hopopt
[3] = 2; /* Length of RA Option */
917 hopopt
[4] = 0; /* Type = 0x0000 (MLD) */
919 hopopt
[6] = IPV6_TLV_PAD1
; /* Pad1 */
920 hopopt
[7] = IPV6_TLV_PAD1
; /* Pad1 */
922 skb_put(skb
, sizeof(*ip6h
) + 8);
925 skb_set_transport_header(skb
, skb
->len
);
926 interval
= ipv6_addr_any(group
) ?
927 br
->multicast_query_response_interval
:
928 br
->multicast_last_member_interval
;
929 *igmp_type
= ICMPV6_MGM_QUERY
;
930 switch (br
->multicast_mld_version
) {
932 mldq
= (struct mld_msg
*)icmp6_hdr(skb
);
933 mldq
->mld_type
= ICMPV6_MGM_QUERY
;
936 mldq
->mld_maxdelay
= htons((u16
)jiffies_to_msecs(interval
));
937 mldq
->mld_reserved
= 0;
938 mldq
->mld_mca
= *group
;
939 csum
= &mldq
->mld_cksum
;
940 csum_start
= (void *)mldq
;
943 mld2q
= (struct mld2_query
*)icmp6_hdr(skb
);
944 mld2q
->mld2q_mrc
= htons((u16
)jiffies_to_msecs(interval
));
945 mld2q
->mld2q_type
= ICMPV6_MGM_QUERY
;
946 mld2q
->mld2q_code
= 0;
947 mld2q
->mld2q_cksum
= 0;
948 mld2q
->mld2q_resv1
= 0;
949 mld2q
->mld2q_resv2
= 0;
950 mld2q
->mld2q_suppress
= sflag
;
951 mld2q
->mld2q_qrv
= 2;
952 mld2q
->mld2q_nsrcs
= htons(llqt_srcs
);
953 mld2q
->mld2q_qqic
= br
->multicast_query_interval
/ HZ
;
954 mld2q
->mld2q_mca
= *group
;
955 csum
= &mld2q
->mld2q_cksum
;
956 csum_start
= (void *)mld2q
;
957 if (!pg
|| !with_srcs
)
961 hlist_for_each_entry(ent
, &pg
->src_list
, node
) {
962 if (over_llqt
== time_after(ent
->timer
.expires
,
964 ent
->src_query_rexmit_cnt
> 0) {
965 mld2q
->mld2q_srcs
[llqt_srcs
++] = ent
->addr
.src
.ip6
;
966 ent
->src_query_rexmit_cnt
--;
967 if (need_rexmit
&& ent
->src_query_rexmit_cnt
)
971 if (WARN_ON(llqt_srcs
!= ntohs(mld2q
->mld2q_nsrcs
))) {
978 if (WARN_ON(!csum
|| !csum_start
)) {
983 *csum
= csum_ipv6_magic(&ip6h
->saddr
, &ip6h
->daddr
, mld_hdr_size
,
985 csum_partial(csum_start
, mld_hdr_size
, 0));
986 skb_put(skb
, mld_hdr_size
);
987 __skb_pull(skb
, sizeof(*eth
));
994 static struct sk_buff
*br_multicast_alloc_query(struct net_bridge
*br
,
995 struct net_bridge_port_group
*pg
,
996 struct br_ip
*ip_dst
,
998 bool with_srcs
, bool over_lmqt
,
999 u8 sflag
, u8
*igmp_type
,
1004 switch (group
->proto
) {
1005 case htons(ETH_P_IP
):
1006 ip4_dst
= ip_dst
? ip_dst
->dst
.ip4
: htonl(INADDR_ALLHOSTS_GROUP
);
1007 return br_ip4_multicast_alloc_query(br
, pg
,
1008 ip4_dst
, group
->dst
.ip4
,
1009 with_srcs
, over_lmqt
,
1012 #if IS_ENABLED(CONFIG_IPV6)
1013 case htons(ETH_P_IPV6
): {
1014 struct in6_addr ip6_dst
;
1017 ip6_dst
= ip_dst
->dst
.ip6
;
1019 ipv6_addr_set(&ip6_dst
, htonl(0xff020000), 0, 0,
1022 return br_ip6_multicast_alloc_query(br
, pg
,
1023 &ip6_dst
, &group
->dst
.ip6
,
1024 with_srcs
, over_lmqt
,
1033 struct net_bridge_mdb_entry
*br_multicast_new_group(struct net_bridge
*br
,
1034 struct br_ip
*group
)
1036 struct net_bridge_mdb_entry
*mp
;
1039 mp
= br_mdb_ip_get(br
, group
);
1043 if (atomic_read(&br
->mdb_hash_tbl
.nelems
) >= br
->hash_max
) {
1044 br_opt_toggle(br
, BROPT_MULTICAST_ENABLED
, false);
1045 return ERR_PTR(-E2BIG
);
1048 mp
= kzalloc(sizeof(*mp
), GFP_ATOMIC
);
1050 return ERR_PTR(-ENOMEM
);
1054 mp
->mcast_gc
.destroy
= br_multicast_destroy_mdb_entry
;
1055 timer_setup(&mp
->timer
, br_multicast_group_expired
, 0);
1056 err
= rhashtable_lookup_insert_fast(&br
->mdb_hash_tbl
, &mp
->rhnode
,
1062 hlist_add_head_rcu(&mp
->mdb_node
, &br
->mdb_list
);
1068 static void br_multicast_group_src_expired(struct timer_list
*t
)
1070 struct net_bridge_group_src
*src
= from_timer(src
, t
, timer
);
1071 struct net_bridge_port_group
*pg
;
1072 struct net_bridge
*br
= src
->br
;
1074 spin_lock(&br
->multicast_lock
);
1075 if (hlist_unhashed(&src
->node
) || !netif_running(br
->dev
) ||
1076 timer_pending(&src
->timer
))
1080 if (pg
->filter_mode
== MCAST_INCLUDE
) {
1081 br_multicast_del_group_src(src
);
1082 if (!hlist_empty(&pg
->src_list
))
1084 br_multicast_find_del_pg(br
, pg
);
1086 br_multicast_fwd_src_handle(src
);
1090 spin_unlock(&br
->multicast_lock
);
1093 static struct net_bridge_group_src
*
1094 br_multicast_find_group_src(struct net_bridge_port_group
*pg
, struct br_ip
*ip
)
1096 struct net_bridge_group_src
*ent
;
1098 switch (ip
->proto
) {
1099 case htons(ETH_P_IP
):
1100 hlist_for_each_entry(ent
, &pg
->src_list
, node
)
1101 if (ip
->src
.ip4
== ent
->addr
.src
.ip4
)
1104 #if IS_ENABLED(CONFIG_IPV6)
1105 case htons(ETH_P_IPV6
):
1106 hlist_for_each_entry(ent
, &pg
->src_list
, node
)
1107 if (!ipv6_addr_cmp(&ent
->addr
.src
.ip6
, &ip
->src
.ip6
))
1116 static struct net_bridge_group_src
*
1117 br_multicast_new_group_src(struct net_bridge_port_group
*pg
, struct br_ip
*src_ip
)
1119 struct net_bridge_group_src
*grp_src
;
1121 if (unlikely(pg
->src_ents
>= PG_SRC_ENT_LIMIT
))
1124 switch (src_ip
->proto
) {
1125 case htons(ETH_P_IP
):
1126 if (ipv4_is_zeronet(src_ip
->src
.ip4
) ||
1127 ipv4_is_multicast(src_ip
->src
.ip4
))
1130 #if IS_ENABLED(CONFIG_IPV6)
1131 case htons(ETH_P_IPV6
):
1132 if (ipv6_addr_any(&src_ip
->src
.ip6
) ||
1133 ipv6_addr_is_multicast(&src_ip
->src
.ip6
))
1139 grp_src
= kzalloc(sizeof(*grp_src
), GFP_ATOMIC
);
1140 if (unlikely(!grp_src
))
1144 grp_src
->br
= pg
->key
.port
->br
;
1145 grp_src
->addr
= *src_ip
;
1146 grp_src
->mcast_gc
.destroy
= br_multicast_destroy_group_src
;
1147 timer_setup(&grp_src
->timer
, br_multicast_group_src_expired
, 0);
1149 hlist_add_head_rcu(&grp_src
->node
, &pg
->src_list
);
1155 struct net_bridge_port_group
*br_multicast_new_port_group(
1156 struct net_bridge_port
*port
,
1157 struct br_ip
*group
,
1158 struct net_bridge_port_group __rcu
*next
,
1159 unsigned char flags
,
1160 const unsigned char *src
,
1164 struct net_bridge_port_group
*p
;
1166 p
= kzalloc(sizeof(*p
), GFP_ATOMIC
);
1170 p
->key
.addr
= *group
;
1173 p
->filter_mode
= filter_mode
;
1174 p
->rt_protocol
= rt_protocol
;
1175 p
->mcast_gc
.destroy
= br_multicast_destroy_port_group
;
1176 INIT_HLIST_HEAD(&p
->src_list
);
1178 if (!br_multicast_is_star_g(group
) &&
1179 rhashtable_lookup_insert_fast(&port
->br
->sg_port_tbl
, &p
->rhnode
,
1180 br_sg_port_rht_params
)) {
1185 rcu_assign_pointer(p
->next
, next
);
1186 timer_setup(&p
->timer
, br_multicast_port_group_expired
, 0);
1187 timer_setup(&p
->rexmit_timer
, br_multicast_port_group_rexmit
, 0);
1188 hlist_add_head(&p
->mglist
, &port
->mglist
);
1191 memcpy(p
->eth_addr
, src
, ETH_ALEN
);
1193 eth_broadcast_addr(p
->eth_addr
);
1198 void br_multicast_host_join(struct net_bridge_mdb_entry
*mp
, bool notify
)
1200 if (!mp
->host_joined
) {
1201 mp
->host_joined
= true;
1202 if (br_multicast_is_star_g(&mp
->addr
))
1203 br_multicast_star_g_host_state(mp
);
1205 br_mdb_notify(mp
->br
->dev
, mp
, NULL
, RTM_NEWMDB
);
1208 if (br_group_is_l2(&mp
->addr
))
1211 mod_timer(&mp
->timer
, jiffies
+ mp
->br
->multicast_membership_interval
);
1214 void br_multicast_host_leave(struct net_bridge_mdb_entry
*mp
, bool notify
)
1216 if (!mp
->host_joined
)
1219 mp
->host_joined
= false;
1220 if (br_multicast_is_star_g(&mp
->addr
))
1221 br_multicast_star_g_host_state(mp
);
1223 br_mdb_notify(mp
->br
->dev
, mp
, NULL
, RTM_DELMDB
);
1226 static struct net_bridge_port_group
*
1227 __br_multicast_add_group(struct net_bridge
*br
,
1228 struct net_bridge_port
*port
,
1229 struct br_ip
*group
,
1230 const unsigned char *src
,
1235 struct net_bridge_port_group __rcu
**pp
;
1236 struct net_bridge_port_group
*p
= NULL
;
1237 struct net_bridge_mdb_entry
*mp
;
1238 unsigned long now
= jiffies
;
1240 if (!netif_running(br
->dev
) ||
1241 (port
&& port
->state
== BR_STATE_DISABLED
))
1244 mp
= br_multicast_new_group(br
, group
);
1246 return ERR_PTR(PTR_ERR(mp
));
1249 br_multicast_host_join(mp
, true);
1253 for (pp
= &mp
->ports
;
1254 (p
= mlock_dereference(*pp
, br
)) != NULL
;
1256 if (br_port_group_equal(p
, port
, src
))
1258 if ((unsigned long)p
->key
.port
< (unsigned long)port
)
1262 p
= br_multicast_new_port_group(port
, group
, *pp
, 0, src
,
1263 filter_mode
, RTPROT_KERNEL
);
1265 p
= ERR_PTR(-ENOMEM
);
1268 rcu_assign_pointer(*pp
, p
);
1270 p
->flags
|= MDB_PG_FLAGS_BLOCKED
;
1271 br_mdb_notify(br
->dev
, mp
, p
, RTM_NEWMDB
);
1275 mod_timer(&p
->timer
, now
+ br
->multicast_membership_interval
);
1281 static int br_multicast_add_group(struct net_bridge
*br
,
1282 struct net_bridge_port
*port
,
1283 struct br_ip
*group
,
1284 const unsigned char *src
,
1288 struct net_bridge_port_group
*pg
;
1291 spin_lock(&br
->multicast_lock
);
1292 pg
= __br_multicast_add_group(br
, port
, group
, src
, filter_mode
,
1293 igmpv2_mldv1
, false);
1294 /* NULL is considered valid for host joined groups */
1295 err
= IS_ERR(pg
) ? PTR_ERR(pg
) : 0;
1296 spin_unlock(&br
->multicast_lock
);
1301 static int br_ip4_multicast_add_group(struct net_bridge
*br
,
1302 struct net_bridge_port
*port
,
1305 const unsigned char *src
,
1308 struct br_ip br_group
;
1311 if (ipv4_is_local_multicast(group
))
1314 memset(&br_group
, 0, sizeof(br_group
));
1315 br_group
.dst
.ip4
= group
;
1316 br_group
.proto
= htons(ETH_P_IP
);
1318 filter_mode
= igmpv2
? MCAST_EXCLUDE
: MCAST_INCLUDE
;
1320 return br_multicast_add_group(br
, port
, &br_group
, src
, filter_mode
,
1324 #if IS_ENABLED(CONFIG_IPV6)
1325 static int br_ip6_multicast_add_group(struct net_bridge
*br
,
1326 struct net_bridge_port
*port
,
1327 const struct in6_addr
*group
,
1329 const unsigned char *src
,
1332 struct br_ip br_group
;
1335 if (ipv6_addr_is_ll_all_nodes(group
))
1338 memset(&br_group
, 0, sizeof(br_group
));
1339 br_group
.dst
.ip6
= *group
;
1340 br_group
.proto
= htons(ETH_P_IPV6
);
1342 filter_mode
= mldv1
? MCAST_EXCLUDE
: MCAST_INCLUDE
;
1344 return br_multicast_add_group(br
, port
, &br_group
, src
, filter_mode
,
1349 static void br_multicast_router_expired(struct timer_list
*t
)
1351 struct net_bridge_port
*port
=
1352 from_timer(port
, t
, multicast_router_timer
);
1353 struct net_bridge
*br
= port
->br
;
1355 spin_lock(&br
->multicast_lock
);
1356 if (port
->multicast_router
== MDB_RTR_TYPE_DISABLED
||
1357 port
->multicast_router
== MDB_RTR_TYPE_PERM
||
1358 timer_pending(&port
->multicast_router_timer
))
1361 __del_port_router(port
);
1363 spin_unlock(&br
->multicast_lock
);
1366 static void br_mc_router_state_change(struct net_bridge
*p
,
1369 struct switchdev_attr attr
= {
1371 .id
= SWITCHDEV_ATTR_ID_BRIDGE_MROUTER
,
1372 .flags
= SWITCHDEV_F_DEFER
,
1373 .u
.mrouter
= is_mc_router
,
1376 switchdev_port_attr_set(p
->dev
, &attr
);
1379 static void br_multicast_local_router_expired(struct timer_list
*t
)
1381 struct net_bridge
*br
= from_timer(br
, t
, multicast_router_timer
);
1383 spin_lock(&br
->multicast_lock
);
1384 if (br
->multicast_router
== MDB_RTR_TYPE_DISABLED
||
1385 br
->multicast_router
== MDB_RTR_TYPE_PERM
||
1386 timer_pending(&br
->multicast_router_timer
))
1389 br_mc_router_state_change(br
, false);
1391 spin_unlock(&br
->multicast_lock
);
1394 static void br_multicast_querier_expired(struct net_bridge
*br
,
1395 struct bridge_mcast_own_query
*query
)
1397 spin_lock(&br
->multicast_lock
);
1398 if (!netif_running(br
->dev
) || !br_opt_get(br
, BROPT_MULTICAST_ENABLED
))
1401 br_multicast_start_querier(br
, query
);
1404 spin_unlock(&br
->multicast_lock
);
1407 static void br_ip4_multicast_querier_expired(struct timer_list
*t
)
1409 struct net_bridge
*br
= from_timer(br
, t
, ip4_other_query
.timer
);
1411 br_multicast_querier_expired(br
, &br
->ip4_own_query
);
1414 #if IS_ENABLED(CONFIG_IPV6)
1415 static void br_ip6_multicast_querier_expired(struct timer_list
*t
)
1417 struct net_bridge
*br
= from_timer(br
, t
, ip6_other_query
.timer
);
1419 br_multicast_querier_expired(br
, &br
->ip6_own_query
);
1423 static void br_multicast_select_own_querier(struct net_bridge
*br
,
1425 struct sk_buff
*skb
)
1427 if (ip
->proto
== htons(ETH_P_IP
))
1428 br
->ip4_querier
.addr
.src
.ip4
= ip_hdr(skb
)->saddr
;
1429 #if IS_ENABLED(CONFIG_IPV6)
1431 br
->ip6_querier
.addr
.src
.ip6
= ipv6_hdr(skb
)->saddr
;
1435 static void __br_multicast_send_query(struct net_bridge
*br
,
1436 struct net_bridge_port
*port
,
1437 struct net_bridge_port_group
*pg
,
1438 struct br_ip
*ip_dst
,
1439 struct br_ip
*group
,
1444 bool over_lmqt
= !!sflag
;
1445 struct sk_buff
*skb
;
1449 skb
= br_multicast_alloc_query(br
, pg
, ip_dst
, group
, with_srcs
,
1450 over_lmqt
, sflag
, &igmp_type
,
1456 skb
->dev
= port
->dev
;
1457 br_multicast_count(br
, port
, skb
, igmp_type
,
1459 NF_HOOK(NFPROTO_BRIDGE
, NF_BR_LOCAL_OUT
,
1460 dev_net(port
->dev
), NULL
, skb
, NULL
, skb
->dev
,
1461 br_dev_queue_push_xmit
);
1463 if (over_lmqt
&& with_srcs
&& sflag
) {
1465 goto again_under_lmqt
;
1468 br_multicast_select_own_querier(br
, group
, skb
);
1469 br_multicast_count(br
, port
, skb
, igmp_type
,
1475 static void br_multicast_send_query(struct net_bridge
*br
,
1476 struct net_bridge_port
*port
,
1477 struct bridge_mcast_own_query
*own_query
)
1479 struct bridge_mcast_other_query
*other_query
= NULL
;
1480 struct br_ip br_group
;
1483 if (!netif_running(br
->dev
) ||
1484 !br_opt_get(br
, BROPT_MULTICAST_ENABLED
) ||
1485 !br_opt_get(br
, BROPT_MULTICAST_QUERIER
))
1488 memset(&br_group
.dst
, 0, sizeof(br_group
.dst
));
1490 if (port
? (own_query
== &port
->ip4_own_query
) :
1491 (own_query
== &br
->ip4_own_query
)) {
1492 other_query
= &br
->ip4_other_query
;
1493 br_group
.proto
= htons(ETH_P_IP
);
1494 #if IS_ENABLED(CONFIG_IPV6)
1496 other_query
= &br
->ip6_other_query
;
1497 br_group
.proto
= htons(ETH_P_IPV6
);
1501 if (!other_query
|| timer_pending(&other_query
->timer
))
1504 __br_multicast_send_query(br
, port
, NULL
, NULL
, &br_group
, false, 0,
1508 time
+= own_query
->startup_sent
< br
->multicast_startup_query_count
?
1509 br
->multicast_startup_query_interval
:
1510 br
->multicast_query_interval
;
1511 mod_timer(&own_query
->timer
, time
);
1515 br_multicast_port_query_expired(struct net_bridge_port
*port
,
1516 struct bridge_mcast_own_query
*query
)
1518 struct net_bridge
*br
= port
->br
;
1520 spin_lock(&br
->multicast_lock
);
1521 if (port
->state
== BR_STATE_DISABLED
||
1522 port
->state
== BR_STATE_BLOCKING
)
1525 if (query
->startup_sent
< br
->multicast_startup_query_count
)
1526 query
->startup_sent
++;
1528 br_multicast_send_query(port
->br
, port
, query
);
1531 spin_unlock(&br
->multicast_lock
);
1534 static void br_ip4_multicast_port_query_expired(struct timer_list
*t
)
1536 struct net_bridge_port
*port
= from_timer(port
, t
, ip4_own_query
.timer
);
1538 br_multicast_port_query_expired(port
, &port
->ip4_own_query
);
1541 #if IS_ENABLED(CONFIG_IPV6)
1542 static void br_ip6_multicast_port_query_expired(struct timer_list
*t
)
1544 struct net_bridge_port
*port
= from_timer(port
, t
, ip6_own_query
.timer
);
1546 br_multicast_port_query_expired(port
, &port
->ip6_own_query
);
1550 static void br_multicast_port_group_rexmit(struct timer_list
*t
)
1552 struct net_bridge_port_group
*pg
= from_timer(pg
, t
, rexmit_timer
);
1553 struct bridge_mcast_other_query
*other_query
= NULL
;
1554 struct net_bridge
*br
= pg
->key
.port
->br
;
1555 bool need_rexmit
= false;
1557 spin_lock(&br
->multicast_lock
);
1558 if (!netif_running(br
->dev
) || hlist_unhashed(&pg
->mglist
) ||
1559 !br_opt_get(br
, BROPT_MULTICAST_ENABLED
) ||
1560 !br_opt_get(br
, BROPT_MULTICAST_QUERIER
))
1563 if (pg
->key
.addr
.proto
== htons(ETH_P_IP
))
1564 other_query
= &br
->ip4_other_query
;
1565 #if IS_ENABLED(CONFIG_IPV6)
1567 other_query
= &br
->ip6_other_query
;
1570 if (!other_query
|| timer_pending(&other_query
->timer
))
1573 if (pg
->grp_query_rexmit_cnt
) {
1574 pg
->grp_query_rexmit_cnt
--;
1575 __br_multicast_send_query(br
, pg
->key
.port
, pg
, &pg
->key
.addr
,
1576 &pg
->key
.addr
, false, 1, NULL
);
1578 __br_multicast_send_query(br
, pg
->key
.port
, pg
, &pg
->key
.addr
,
1579 &pg
->key
.addr
, true, 0, &need_rexmit
);
1581 if (pg
->grp_query_rexmit_cnt
|| need_rexmit
)
1582 mod_timer(&pg
->rexmit_timer
, jiffies
+
1583 br
->multicast_last_member_interval
);
1585 spin_unlock(&br
->multicast_lock
);
1588 static void br_mc_disabled_update(struct net_device
*dev
, bool value
)
1590 struct switchdev_attr attr
= {
1592 .id
= SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED
,
1593 .flags
= SWITCHDEV_F_DEFER
,
1594 .u
.mc_disabled
= !value
,
1597 switchdev_port_attr_set(dev
, &attr
);
1600 int br_multicast_add_port(struct net_bridge_port
*port
)
1602 port
->multicast_router
= MDB_RTR_TYPE_TEMP_QUERY
;
1604 timer_setup(&port
->multicast_router_timer
,
1605 br_multicast_router_expired
, 0);
1606 timer_setup(&port
->ip4_own_query
.timer
,
1607 br_ip4_multicast_port_query_expired
, 0);
1608 #if IS_ENABLED(CONFIG_IPV6)
1609 timer_setup(&port
->ip6_own_query
.timer
,
1610 br_ip6_multicast_port_query_expired
, 0);
1612 br_mc_disabled_update(port
->dev
,
1613 br_opt_get(port
->br
, BROPT_MULTICAST_ENABLED
));
1615 port
->mcast_stats
= netdev_alloc_pcpu_stats(struct bridge_mcast_stats
);
1616 if (!port
->mcast_stats
)
1622 void br_multicast_del_port(struct net_bridge_port
*port
)
1624 struct net_bridge
*br
= port
->br
;
1625 struct net_bridge_port_group
*pg
;
1626 HLIST_HEAD(deleted_head
);
1627 struct hlist_node
*n
;
1629 /* Take care of the remaining groups, only perm ones should be left */
1630 spin_lock_bh(&br
->multicast_lock
);
1631 hlist_for_each_entry_safe(pg
, n
, &port
->mglist
, mglist
)
1632 br_multicast_find_del_pg(br
, pg
);
1633 hlist_move_list(&br
->mcast_gc_list
, &deleted_head
);
1634 spin_unlock_bh(&br
->multicast_lock
);
1635 br_multicast_gc(&deleted_head
);
1636 del_timer_sync(&port
->multicast_router_timer
);
1637 free_percpu(port
->mcast_stats
);
1640 static void br_multicast_enable(struct bridge_mcast_own_query
*query
)
1642 query
->startup_sent
= 0;
1644 if (try_to_del_timer_sync(&query
->timer
) >= 0 ||
1645 del_timer(&query
->timer
))
1646 mod_timer(&query
->timer
, jiffies
);
1649 static void __br_multicast_enable_port(struct net_bridge_port
*port
)
1651 struct net_bridge
*br
= port
->br
;
1653 if (!br_opt_get(br
, BROPT_MULTICAST_ENABLED
) || !netif_running(br
->dev
))
1656 br_multicast_enable(&port
->ip4_own_query
);
1657 #if IS_ENABLED(CONFIG_IPV6)
1658 br_multicast_enable(&port
->ip6_own_query
);
1660 if (port
->multicast_router
== MDB_RTR_TYPE_PERM
&&
1661 hlist_unhashed(&port
->rlist
))
1662 br_multicast_add_router(br
, port
);
1665 void br_multicast_enable_port(struct net_bridge_port
*port
)
1667 struct net_bridge
*br
= port
->br
;
1669 spin_lock(&br
->multicast_lock
);
1670 __br_multicast_enable_port(port
);
1671 spin_unlock(&br
->multicast_lock
);
1674 void br_multicast_disable_port(struct net_bridge_port
*port
)
1676 struct net_bridge
*br
= port
->br
;
1677 struct net_bridge_port_group
*pg
;
1678 struct hlist_node
*n
;
1680 spin_lock(&br
->multicast_lock
);
1681 hlist_for_each_entry_safe(pg
, n
, &port
->mglist
, mglist
)
1682 if (!(pg
->flags
& MDB_PG_FLAGS_PERMANENT
))
1683 br_multicast_find_del_pg(br
, pg
);
1685 __del_port_router(port
);
1687 del_timer(&port
->multicast_router_timer
);
1688 del_timer(&port
->ip4_own_query
.timer
);
1689 #if IS_ENABLED(CONFIG_IPV6)
1690 del_timer(&port
->ip6_own_query
.timer
);
1692 spin_unlock(&br
->multicast_lock
);
1695 static int __grp_src_delete_marked(struct net_bridge_port_group
*pg
)
1697 struct net_bridge_group_src
*ent
;
1698 struct hlist_node
*tmp
;
1701 hlist_for_each_entry_safe(ent
, tmp
, &pg
->src_list
, node
)
1702 if (ent
->flags
& BR_SGRP_F_DELETE
) {
1703 br_multicast_del_group_src(ent
);
1710 static void __grp_src_mod_timer(struct net_bridge_group_src
*src
,
1711 unsigned long expires
)
1713 mod_timer(&src
->timer
, expires
);
1714 br_multicast_fwd_src_handle(src
);
1717 static void __grp_src_query_marked_and_rexmit(struct net_bridge_port_group
*pg
)
1719 struct bridge_mcast_other_query
*other_query
= NULL
;
1720 struct net_bridge
*br
= pg
->key
.port
->br
;
1721 u32 lmqc
= br
->multicast_last_member_count
;
1722 unsigned long lmqt
, lmi
, now
= jiffies
;
1723 struct net_bridge_group_src
*ent
;
1725 if (!netif_running(br
->dev
) ||
1726 !br_opt_get(br
, BROPT_MULTICAST_ENABLED
))
1729 if (pg
->key
.addr
.proto
== htons(ETH_P_IP
))
1730 other_query
= &br
->ip4_other_query
;
1731 #if IS_ENABLED(CONFIG_IPV6)
1733 other_query
= &br
->ip6_other_query
;
1736 lmqt
= now
+ br_multicast_lmqt(br
);
1737 hlist_for_each_entry(ent
, &pg
->src_list
, node
) {
1738 if (ent
->flags
& BR_SGRP_F_SEND
) {
1739 ent
->flags
&= ~BR_SGRP_F_SEND
;
1740 if (ent
->timer
.expires
> lmqt
) {
1741 if (br_opt_get(br
, BROPT_MULTICAST_QUERIER
) &&
1743 !timer_pending(&other_query
->timer
))
1744 ent
->src_query_rexmit_cnt
= lmqc
;
1745 __grp_src_mod_timer(ent
, lmqt
);
1750 if (!br_opt_get(br
, BROPT_MULTICAST_QUERIER
) ||
1751 !other_query
|| timer_pending(&other_query
->timer
))
1754 __br_multicast_send_query(br
, pg
->key
.port
, pg
, &pg
->key
.addr
,
1755 &pg
->key
.addr
, true, 1, NULL
);
1757 lmi
= now
+ br
->multicast_last_member_interval
;
1758 if (!timer_pending(&pg
->rexmit_timer
) ||
1759 time_after(pg
->rexmit_timer
.expires
, lmi
))
1760 mod_timer(&pg
->rexmit_timer
, lmi
);
1763 static void __grp_send_query_and_rexmit(struct net_bridge_port_group
*pg
)
1765 struct bridge_mcast_other_query
*other_query
= NULL
;
1766 struct net_bridge
*br
= pg
->key
.port
->br
;
1767 unsigned long now
= jiffies
, lmi
;
1769 if (!netif_running(br
->dev
) ||
1770 !br_opt_get(br
, BROPT_MULTICAST_ENABLED
))
1773 if (pg
->key
.addr
.proto
== htons(ETH_P_IP
))
1774 other_query
= &br
->ip4_other_query
;
1775 #if IS_ENABLED(CONFIG_IPV6)
1777 other_query
= &br
->ip6_other_query
;
1780 if (br_opt_get(br
, BROPT_MULTICAST_QUERIER
) &&
1781 other_query
&& !timer_pending(&other_query
->timer
)) {
1782 lmi
= now
+ br
->multicast_last_member_interval
;
1783 pg
->grp_query_rexmit_cnt
= br
->multicast_last_member_count
- 1;
1784 __br_multicast_send_query(br
, pg
->key
.port
, pg
, &pg
->key
.addr
,
1785 &pg
->key
.addr
, false, 0, NULL
);
1786 if (!timer_pending(&pg
->rexmit_timer
) ||
1787 time_after(pg
->rexmit_timer
.expires
, lmi
))
1788 mod_timer(&pg
->rexmit_timer
, lmi
);
1791 if (pg
->filter_mode
== MCAST_EXCLUDE
&&
1792 (!timer_pending(&pg
->timer
) ||
1793 time_after(pg
->timer
.expires
, now
+ br_multicast_lmqt(br
))))
1794 mod_timer(&pg
->timer
, now
+ br_multicast_lmqt(br
));
1797 /* State Msg type New state Actions
1798 * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI
1799 * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI
1800 * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI
1802 static bool br_multicast_isinc_allow(struct net_bridge_port_group
*pg
,
1803 void *srcs
, u32 nsrcs
, size_t src_size
)
1805 struct net_bridge
*br
= pg
->key
.port
->br
;
1806 struct net_bridge_group_src
*ent
;
1807 unsigned long now
= jiffies
;
1808 bool changed
= false;
1809 struct br_ip src_ip
;
1812 memset(&src_ip
, 0, sizeof(src_ip
));
1813 src_ip
.proto
= pg
->key
.addr
.proto
;
1814 for (src_idx
= 0; src_idx
< nsrcs
; src_idx
++) {
1815 memcpy(&src_ip
.src
, srcs
, src_size
);
1816 ent
= br_multicast_find_group_src(pg
, &src_ip
);
1818 ent
= br_multicast_new_group_src(pg
, &src_ip
);
1824 __grp_src_mod_timer(ent
, now
+ br_multicast_gmi(br
));
1831 /* State Msg type New state Actions
1832 * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
1836 static void __grp_src_isexc_incl(struct net_bridge_port_group
*pg
,
1837 void *srcs
, u32 nsrcs
, size_t src_size
)
1839 struct net_bridge_group_src
*ent
;
1840 struct br_ip src_ip
;
1843 hlist_for_each_entry(ent
, &pg
->src_list
, node
)
1844 ent
->flags
|= BR_SGRP_F_DELETE
;
1846 memset(&src_ip
, 0, sizeof(src_ip
));
1847 src_ip
.proto
= pg
->key
.addr
.proto
;
1848 for (src_idx
= 0; src_idx
< nsrcs
; src_idx
++) {
1849 memcpy(&src_ip
.src
, srcs
, src_size
);
1850 ent
= br_multicast_find_group_src(pg
, &src_ip
);
1852 ent
->flags
&= ~BR_SGRP_F_DELETE
;
1854 ent
= br_multicast_new_group_src(pg
, &src_ip
);
1856 br_multicast_fwd_src_handle(ent
);
1860 __grp_src_delete_marked(pg
);
1863 /* State Msg type New state Actions
1864 * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI
1869 static bool __grp_src_isexc_excl(struct net_bridge_port_group
*pg
,
1870 void *srcs
, u32 nsrcs
, size_t src_size
)
1872 struct net_bridge
*br
= pg
->key
.port
->br
;
1873 struct net_bridge_group_src
*ent
;
1874 unsigned long now
= jiffies
;
1875 bool changed
= false;
1876 struct br_ip src_ip
;
1879 hlist_for_each_entry(ent
, &pg
->src_list
, node
)
1880 ent
->flags
|= BR_SGRP_F_DELETE
;
1882 memset(&src_ip
, 0, sizeof(src_ip
));
1883 src_ip
.proto
= pg
->key
.addr
.proto
;
1884 for (src_idx
= 0; src_idx
< nsrcs
; src_idx
++) {
1885 memcpy(&src_ip
.src
, srcs
, src_size
);
1886 ent
= br_multicast_find_group_src(pg
, &src_ip
);
1888 ent
->flags
&= ~BR_SGRP_F_DELETE
;
1890 ent
= br_multicast_new_group_src(pg
, &src_ip
);
1892 __grp_src_mod_timer(ent
,
1893 now
+ br_multicast_gmi(br
));
1900 if (__grp_src_delete_marked(pg
))
1906 static bool br_multicast_isexc(struct net_bridge_port_group
*pg
,
1907 void *srcs
, u32 nsrcs
, size_t src_size
)
1909 struct net_bridge
*br
= pg
->key
.port
->br
;
1910 bool changed
= false;
1912 switch (pg
->filter_mode
) {
1914 __grp_src_isexc_incl(pg
, srcs
, nsrcs
, src_size
);
1915 br_multicast_star_g_handle_mode(pg
, MCAST_EXCLUDE
);
1919 changed
= __grp_src_isexc_excl(pg
, srcs
, nsrcs
, src_size
);
1923 pg
->filter_mode
= MCAST_EXCLUDE
;
1924 mod_timer(&pg
->timer
, jiffies
+ br_multicast_gmi(br
));
1929 /* State Msg type New state Actions
1930 * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI
1933 static bool __grp_src_toin_incl(struct net_bridge_port_group
*pg
,
1934 void *srcs
, u32 nsrcs
, size_t src_size
)
1936 struct net_bridge
*br
= pg
->key
.port
->br
;
1937 u32 src_idx
, to_send
= pg
->src_ents
;
1938 struct net_bridge_group_src
*ent
;
1939 unsigned long now
= jiffies
;
1940 bool changed
= false;
1941 struct br_ip src_ip
;
1943 hlist_for_each_entry(ent
, &pg
->src_list
, node
)
1944 ent
->flags
|= BR_SGRP_F_SEND
;
1946 memset(&src_ip
, 0, sizeof(src_ip
));
1947 src_ip
.proto
= pg
->key
.addr
.proto
;
1948 for (src_idx
= 0; src_idx
< nsrcs
; src_idx
++) {
1949 memcpy(&src_ip
.src
, srcs
, src_size
);
1950 ent
= br_multicast_find_group_src(pg
, &src_ip
);
1952 ent
->flags
&= ~BR_SGRP_F_SEND
;
1955 ent
= br_multicast_new_group_src(pg
, &src_ip
);
1960 __grp_src_mod_timer(ent
, now
+ br_multicast_gmi(br
));
1965 __grp_src_query_marked_and_rexmit(pg
);
1970 /* State Msg type New state Actions
1971 * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
1975 static bool __grp_src_toin_excl(struct net_bridge_port_group
*pg
,
1976 void *srcs
, u32 nsrcs
, size_t src_size
)
1978 struct net_bridge
*br
= pg
->key
.port
->br
;
1979 u32 src_idx
, to_send
= pg
->src_ents
;
1980 struct net_bridge_group_src
*ent
;
1981 unsigned long now
= jiffies
;
1982 bool changed
= false;
1983 struct br_ip src_ip
;
1985 hlist_for_each_entry(ent
, &pg
->src_list
, node
)
1986 if (timer_pending(&ent
->timer
))
1987 ent
->flags
|= BR_SGRP_F_SEND
;
1989 memset(&src_ip
, 0, sizeof(src_ip
));
1990 src_ip
.proto
= pg
->key
.addr
.proto
;
1991 for (src_idx
= 0; src_idx
< nsrcs
; src_idx
++) {
1992 memcpy(&src_ip
.src
, srcs
, src_size
);
1993 ent
= br_multicast_find_group_src(pg
, &src_ip
);
1995 if (timer_pending(&ent
->timer
)) {
1996 ent
->flags
&= ~BR_SGRP_F_SEND
;
2000 ent
= br_multicast_new_group_src(pg
, &src_ip
);
2005 __grp_src_mod_timer(ent
, now
+ br_multicast_gmi(br
));
2010 __grp_src_query_marked_and_rexmit(pg
);
2012 __grp_send_query_and_rexmit(pg
);
2017 static bool br_multicast_toin(struct net_bridge_port_group
*pg
,
2018 void *srcs
, u32 nsrcs
, size_t src_size
)
2020 bool changed
= false;
2022 switch (pg
->filter_mode
) {
2024 changed
= __grp_src_toin_incl(pg
, srcs
, nsrcs
, src_size
);
2027 changed
= __grp_src_toin_excl(pg
, srcs
, nsrcs
, src_size
);
2034 /* State Msg type New state Actions
2035 * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
2040 static void __grp_src_toex_incl(struct net_bridge_port_group
*pg
,
2041 void *srcs
, u32 nsrcs
, size_t src_size
)
2043 struct net_bridge_group_src
*ent
;
2044 u32 src_idx
, to_send
= 0;
2045 struct br_ip src_ip
;
2047 hlist_for_each_entry(ent
, &pg
->src_list
, node
)
2048 ent
->flags
= (ent
->flags
& ~BR_SGRP_F_SEND
) | BR_SGRP_F_DELETE
;
2050 memset(&src_ip
, 0, sizeof(src_ip
));
2051 src_ip
.proto
= pg
->key
.addr
.proto
;
2052 for (src_idx
= 0; src_idx
< nsrcs
; src_idx
++) {
2053 memcpy(&src_ip
.src
, srcs
, src_size
);
2054 ent
= br_multicast_find_group_src(pg
, &src_ip
);
2056 ent
->flags
= (ent
->flags
& ~BR_SGRP_F_DELETE
) |
2060 ent
= br_multicast_new_group_src(pg
, &src_ip
);
2063 br_multicast_fwd_src_handle(ent
);
2067 __grp_src_delete_marked(pg
);
2069 __grp_src_query_marked_and_rexmit(pg
);
2072 /* State Msg type New state Actions
2073 * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer
2079 static bool __grp_src_toex_excl(struct net_bridge_port_group
*pg
,
2080 void *srcs
, u32 nsrcs
, size_t src_size
)
2082 struct net_bridge_group_src
*ent
;
2083 u32 src_idx
, to_send
= 0;
2084 bool changed
= false;
2085 struct br_ip src_ip
;
2087 hlist_for_each_entry(ent
, &pg
->src_list
, node
)
2088 ent
->flags
= (ent
->flags
& ~BR_SGRP_F_SEND
) | BR_SGRP_F_DELETE
;
2090 memset(&src_ip
, 0, sizeof(src_ip
));
2091 src_ip
.proto
= pg
->key
.addr
.proto
;
2092 for (src_idx
= 0; src_idx
< nsrcs
; src_idx
++) {
2093 memcpy(&src_ip
.src
, srcs
, src_size
);
2094 ent
= br_multicast_find_group_src(pg
, &src_ip
);
2096 ent
->flags
&= ~BR_SGRP_F_DELETE
;
2098 ent
= br_multicast_new_group_src(pg
, &src_ip
);
2100 __grp_src_mod_timer(ent
, pg
->timer
.expires
);
2104 if (ent
&& timer_pending(&ent
->timer
)) {
2105 ent
->flags
|= BR_SGRP_F_SEND
;
2111 if (__grp_src_delete_marked(pg
))
2114 __grp_src_query_marked_and_rexmit(pg
);
2119 static bool br_multicast_toex(struct net_bridge_port_group
*pg
,
2120 void *srcs
, u32 nsrcs
, size_t src_size
)
2122 struct net_bridge
*br
= pg
->key
.port
->br
;
2123 bool changed
= false;
2125 switch (pg
->filter_mode
) {
2127 __grp_src_toex_incl(pg
, srcs
, nsrcs
, src_size
);
2128 br_multicast_star_g_handle_mode(pg
, MCAST_EXCLUDE
);
2132 changed
= __grp_src_toex_excl(pg
, srcs
, nsrcs
, src_size
);
2136 pg
->filter_mode
= MCAST_EXCLUDE
;
2137 mod_timer(&pg
->timer
, jiffies
+ br_multicast_gmi(br
));
2142 /* State Msg type New state Actions
2143 * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B)
2145 static void __grp_src_block_incl(struct net_bridge_port_group
*pg
,
2146 void *srcs
, u32 nsrcs
, size_t src_size
)
2148 struct net_bridge_group_src
*ent
;
2149 u32 src_idx
, to_send
= 0;
2150 struct br_ip src_ip
;
2152 hlist_for_each_entry(ent
, &pg
->src_list
, node
)
2153 ent
->flags
&= ~BR_SGRP_F_SEND
;
2155 memset(&src_ip
, 0, sizeof(src_ip
));
2156 src_ip
.proto
= pg
->key
.addr
.proto
;
2157 for (src_idx
= 0; src_idx
< nsrcs
; src_idx
++) {
2158 memcpy(&src_ip
.src
, srcs
, src_size
);
2159 ent
= br_multicast_find_group_src(pg
, &src_ip
);
2161 ent
->flags
|= BR_SGRP_F_SEND
;
2168 __grp_src_query_marked_and_rexmit(pg
);
2170 if (pg
->filter_mode
== MCAST_INCLUDE
&& hlist_empty(&pg
->src_list
))
2171 br_multicast_find_del_pg(pg
->key
.port
->br
, pg
);
2174 /* State Msg type New state Actions
2175 * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer
2178 static bool __grp_src_block_excl(struct net_bridge_port_group
*pg
,
2179 void *srcs
, u32 nsrcs
, size_t src_size
)
2181 struct net_bridge_group_src
*ent
;
2182 u32 src_idx
, to_send
= 0;
2183 bool changed
= false;
2184 struct br_ip src_ip
;
2186 hlist_for_each_entry(ent
, &pg
->src_list
, node
)
2187 ent
->flags
&= ~BR_SGRP_F_SEND
;
2189 memset(&src_ip
, 0, sizeof(src_ip
));
2190 src_ip
.proto
= pg
->key
.addr
.proto
;
2191 for (src_idx
= 0; src_idx
< nsrcs
; src_idx
++) {
2192 memcpy(&src_ip
.src
, srcs
, src_size
);
2193 ent
= br_multicast_find_group_src(pg
, &src_ip
);
2195 ent
= br_multicast_new_group_src(pg
, &src_ip
);
2197 __grp_src_mod_timer(ent
, pg
->timer
.expires
);
2201 if (ent
&& timer_pending(&ent
->timer
)) {
2202 ent
->flags
|= BR_SGRP_F_SEND
;
2209 __grp_src_query_marked_and_rexmit(pg
);
2214 static bool br_multicast_block(struct net_bridge_port_group
*pg
,
2215 void *srcs
, u32 nsrcs
, size_t src_size
)
2217 bool changed
= false;
2219 switch (pg
->filter_mode
) {
2221 __grp_src_block_incl(pg
, srcs
, nsrcs
, src_size
);
2224 changed
= __grp_src_block_excl(pg
, srcs
, nsrcs
, src_size
);
2231 static struct net_bridge_port_group
*
2232 br_multicast_find_port(struct net_bridge_mdb_entry
*mp
,
2233 struct net_bridge_port
*p
,
2234 const unsigned char *src
)
2236 struct net_bridge
*br __maybe_unused
= mp
->br
;
2237 struct net_bridge_port_group
*pg
;
2239 for (pg
= mlock_dereference(mp
->ports
, br
);
2241 pg
= mlock_dereference(pg
->next
, br
))
2242 if (br_port_group_equal(pg
, p
, src
))
2248 static int br_ip4_multicast_igmp3_report(struct net_bridge
*br
,
2249 struct net_bridge_port
*port
,
2250 struct sk_buff
*skb
,
2253 bool igmpv2
= br
->multicast_igmp_version
== 2;
2254 struct net_bridge_mdb_entry
*mdst
;
2255 struct net_bridge_port_group
*pg
;
2256 const unsigned char *src
;
2257 struct igmpv3_report
*ih
;
2258 struct igmpv3_grec
*grec
;
2259 int i
, len
, num
, type
;
2260 bool changed
= false;
2265 ih
= igmpv3_report_hdr(skb
);
2266 num
= ntohs(ih
->ngrec
);
2267 len
= skb_transport_offset(skb
) + sizeof(*ih
);
2269 for (i
= 0; i
< num
; i
++) {
2270 len
+= sizeof(*grec
);
2271 if (!ip_mc_may_pull(skb
, len
))
2274 grec
= (void *)(skb
->data
+ len
- sizeof(*grec
));
2275 group
= grec
->grec_mca
;
2276 type
= grec
->grec_type
;
2277 nsrcs
= ntohs(grec
->grec_nsrcs
);
2280 if (!ip_mc_may_pull(skb
, len
))
2284 case IGMPV3_MODE_IS_INCLUDE
:
2285 case IGMPV3_MODE_IS_EXCLUDE
:
2286 case IGMPV3_CHANGE_TO_INCLUDE
:
2287 case IGMPV3_CHANGE_TO_EXCLUDE
:
2288 case IGMPV3_ALLOW_NEW_SOURCES
:
2289 case IGMPV3_BLOCK_OLD_SOURCES
:
2296 src
= eth_hdr(skb
)->h_source
;
2298 (type
== IGMPV3_CHANGE_TO_INCLUDE
||
2299 type
== IGMPV3_MODE_IS_INCLUDE
)) {
2300 if (!port
|| igmpv2
) {
2301 br_ip4_multicast_leave_group(br
, port
, group
, vid
, src
);
2305 err
= br_ip4_multicast_add_group(br
, port
, group
, vid
,
2311 if (!port
|| igmpv2
)
2314 spin_lock_bh(&br
->multicast_lock
);
2315 mdst
= br_mdb_ip4_get(br
, group
, vid
);
2317 goto unlock_continue
;
2318 pg
= br_multicast_find_port(mdst
, port
, src
);
2319 if (!pg
|| (pg
->flags
& MDB_PG_FLAGS_PERMANENT
))
2320 goto unlock_continue
;
2322 grec
= (void *)(skb
->data
+ len
- sizeof(*grec
) - (nsrcs
* 4));
2324 case IGMPV3_ALLOW_NEW_SOURCES
:
2325 changed
= br_multicast_isinc_allow(pg
, grec
->grec_src
,
2326 nsrcs
, sizeof(__be32
));
2328 case IGMPV3_MODE_IS_INCLUDE
:
2329 changed
= br_multicast_isinc_allow(pg
, grec
->grec_src
, nsrcs
,
2332 case IGMPV3_MODE_IS_EXCLUDE
:
2333 changed
= br_multicast_isexc(pg
, grec
->grec_src
, nsrcs
,
2336 case IGMPV3_CHANGE_TO_INCLUDE
:
2337 changed
= br_multicast_toin(pg
, grec
->grec_src
, nsrcs
,
2340 case IGMPV3_CHANGE_TO_EXCLUDE
:
2341 changed
= br_multicast_toex(pg
, grec
->grec_src
, nsrcs
,
2344 case IGMPV3_BLOCK_OLD_SOURCES
:
2345 changed
= br_multicast_block(pg
, grec
->grec_src
, nsrcs
,
2350 br_mdb_notify(br
->dev
, mdst
, pg
, RTM_NEWMDB
);
2352 spin_unlock_bh(&br
->multicast_lock
);
2358 #if IS_ENABLED(CONFIG_IPV6)
2359 static int br_ip6_multicast_mld2_report(struct net_bridge
*br
,
2360 struct net_bridge_port
*port
,
2361 struct sk_buff
*skb
,
2364 bool mldv1
= br
->multicast_mld_version
== 1;
2365 struct net_bridge_mdb_entry
*mdst
;
2366 struct net_bridge_port_group
*pg
;
2367 unsigned int nsrcs_offset
;
2368 const unsigned char *src
;
2369 struct icmp6hdr
*icmp6h
;
2370 struct mld2_grec
*grec
;
2371 unsigned int grec_len
;
2372 bool changed
= false;
2376 if (!ipv6_mc_may_pull(skb
, sizeof(*icmp6h
)))
2379 icmp6h
= icmp6_hdr(skb
);
2380 num
= ntohs(icmp6h
->icmp6_dataun
.un_data16
[1]);
2381 len
= skb_transport_offset(skb
) + sizeof(*icmp6h
);
2383 for (i
= 0; i
< num
; i
++) {
2384 __be16
*_nsrcs
, __nsrcs
;
2387 nsrcs_offset
= len
+ offsetof(struct mld2_grec
, grec_nsrcs
);
2389 if (skb_transport_offset(skb
) + ipv6_transport_len(skb
) <
2390 nsrcs_offset
+ sizeof(__nsrcs
))
2393 _nsrcs
= skb_header_pointer(skb
, nsrcs_offset
,
2394 sizeof(__nsrcs
), &__nsrcs
);
2398 nsrcs
= ntohs(*_nsrcs
);
2399 grec_len
= struct_size(grec
, grec_src
, nsrcs
);
2401 if (!ipv6_mc_may_pull(skb
, len
+ grec_len
))
2404 grec
= (struct mld2_grec
*)(skb
->data
+ len
);
2407 switch (grec
->grec_type
) {
2408 case MLD2_MODE_IS_INCLUDE
:
2409 case MLD2_MODE_IS_EXCLUDE
:
2410 case MLD2_CHANGE_TO_INCLUDE
:
2411 case MLD2_CHANGE_TO_EXCLUDE
:
2412 case MLD2_ALLOW_NEW_SOURCES
:
2413 case MLD2_BLOCK_OLD_SOURCES
:
2420 src
= eth_hdr(skb
)->h_source
;
2421 if ((grec
->grec_type
== MLD2_CHANGE_TO_INCLUDE
||
2422 grec
->grec_type
== MLD2_MODE_IS_INCLUDE
) &&
2424 if (!port
|| mldv1
) {
2425 br_ip6_multicast_leave_group(br
, port
,
2431 err
= br_ip6_multicast_add_group(br
, port
,
2432 &grec
->grec_mca
, vid
,
2441 spin_lock_bh(&br
->multicast_lock
);
2442 mdst
= br_mdb_ip6_get(br
, &grec
->grec_mca
, vid
);
2444 goto unlock_continue
;
2445 pg
= br_multicast_find_port(mdst
, port
, src
);
2446 if (!pg
|| (pg
->flags
& MDB_PG_FLAGS_PERMANENT
))
2447 goto unlock_continue
;
2448 switch (grec
->grec_type
) {
2449 case MLD2_ALLOW_NEW_SOURCES
:
2450 changed
= br_multicast_isinc_allow(pg
, grec
->grec_src
,
2452 sizeof(struct in6_addr
));
2454 case MLD2_MODE_IS_INCLUDE
:
2455 changed
= br_multicast_isinc_allow(pg
, grec
->grec_src
, nsrcs
,
2456 sizeof(struct in6_addr
));
2458 case MLD2_MODE_IS_EXCLUDE
:
2459 changed
= br_multicast_isexc(pg
, grec
->grec_src
, nsrcs
,
2460 sizeof(struct in6_addr
));
2462 case MLD2_CHANGE_TO_INCLUDE
:
2463 changed
= br_multicast_toin(pg
, grec
->grec_src
, nsrcs
,
2464 sizeof(struct in6_addr
));
2466 case MLD2_CHANGE_TO_EXCLUDE
:
2467 changed
= br_multicast_toex(pg
, grec
->grec_src
, nsrcs
,
2468 sizeof(struct in6_addr
));
2470 case MLD2_BLOCK_OLD_SOURCES
:
2471 changed
= br_multicast_block(pg
, grec
->grec_src
, nsrcs
,
2472 sizeof(struct in6_addr
));
2476 br_mdb_notify(br
->dev
, mdst
, pg
, RTM_NEWMDB
);
2478 spin_unlock_bh(&br
->multicast_lock
);
2485 static bool br_ip4_multicast_select_querier(struct net_bridge
*br
,
2486 struct net_bridge_port
*port
,
2489 if (!timer_pending(&br
->ip4_own_query
.timer
) &&
2490 !timer_pending(&br
->ip4_other_query
.timer
))
2493 if (!br
->ip4_querier
.addr
.src
.ip4
)
2496 if (ntohl(saddr
) <= ntohl(br
->ip4_querier
.addr
.src
.ip4
))
2502 br
->ip4_querier
.addr
.src
.ip4
= saddr
;
2504 /* update protected by general multicast_lock by caller */
2505 rcu_assign_pointer(br
->ip4_querier
.port
, port
);
2510 #if IS_ENABLED(CONFIG_IPV6)
2511 static bool br_ip6_multicast_select_querier(struct net_bridge
*br
,
2512 struct net_bridge_port
*port
,
2513 struct in6_addr
*saddr
)
2515 if (!timer_pending(&br
->ip6_own_query
.timer
) &&
2516 !timer_pending(&br
->ip6_other_query
.timer
))
2519 if (ipv6_addr_cmp(saddr
, &br
->ip6_querier
.addr
.src
.ip6
) <= 0)
2525 br
->ip6_querier
.addr
.src
.ip6
= *saddr
;
2527 /* update protected by general multicast_lock by caller */
2528 rcu_assign_pointer(br
->ip6_querier
.port
, port
);
2534 static bool br_multicast_select_querier(struct net_bridge
*br
,
2535 struct net_bridge_port
*port
,
2536 struct br_ip
*saddr
)
2538 switch (saddr
->proto
) {
2539 case htons(ETH_P_IP
):
2540 return br_ip4_multicast_select_querier(br
, port
, saddr
->src
.ip4
);
2541 #if IS_ENABLED(CONFIG_IPV6)
2542 case htons(ETH_P_IPV6
):
2543 return br_ip6_multicast_select_querier(br
, port
, &saddr
->src
.ip6
);
2551 br_multicast_update_query_timer(struct net_bridge
*br
,
2552 struct bridge_mcast_other_query
*query
,
2553 unsigned long max_delay
)
2555 if (!timer_pending(&query
->timer
))
2556 query
->delay_time
= jiffies
+ max_delay
;
2558 mod_timer(&query
->timer
, jiffies
+ br
->multicast_querier_interval
);
2561 static void br_port_mc_router_state_change(struct net_bridge_port
*p
,
2564 struct switchdev_attr attr
= {
2566 .id
= SWITCHDEV_ATTR_ID_PORT_MROUTER
,
2567 .flags
= SWITCHDEV_F_DEFER
,
2568 .u
.mrouter
= is_mc_router
,
2571 switchdev_port_attr_set(p
->dev
, &attr
);
2575 * Add port to router_list
2576 * list is maintained ordered by pointer value
2577 * and locked by br->multicast_lock and RCU
2579 static void br_multicast_add_router(struct net_bridge
*br
,
2580 struct net_bridge_port
*port
)
2582 struct net_bridge_port
*p
;
2583 struct hlist_node
*slot
= NULL
;
2585 if (!hlist_unhashed(&port
->rlist
))
2588 hlist_for_each_entry(p
, &br
->router_list
, rlist
) {
2589 if ((unsigned long) port
>= (unsigned long) p
)
2595 hlist_add_behind_rcu(&port
->rlist
, slot
);
2597 hlist_add_head_rcu(&port
->rlist
, &br
->router_list
);
2598 br_rtr_notify(br
->dev
, port
, RTM_NEWMDB
);
2599 br_port_mc_router_state_change(port
, true);
2602 static void br_multicast_mark_router(struct net_bridge
*br
,
2603 struct net_bridge_port
*port
)
2605 unsigned long now
= jiffies
;
2608 if (br
->multicast_router
== MDB_RTR_TYPE_TEMP_QUERY
) {
2609 if (!timer_pending(&br
->multicast_router_timer
))
2610 br_mc_router_state_change(br
, true);
2611 mod_timer(&br
->multicast_router_timer
,
2612 now
+ br
->multicast_querier_interval
);
2617 if (port
->multicast_router
== MDB_RTR_TYPE_DISABLED
||
2618 port
->multicast_router
== MDB_RTR_TYPE_PERM
)
2621 br_multicast_add_router(br
, port
);
2623 mod_timer(&port
->multicast_router_timer
,
2624 now
+ br
->multicast_querier_interval
);
2627 static void br_multicast_query_received(struct net_bridge
*br
,
2628 struct net_bridge_port
*port
,
2629 struct bridge_mcast_other_query
*query
,
2630 struct br_ip
*saddr
,
2631 unsigned long max_delay
)
2633 if (!br_multicast_select_querier(br
, port
, saddr
))
2636 br_multicast_update_query_timer(br
, query
, max_delay
);
2637 br_multicast_mark_router(br
, port
);
2640 static void br_ip4_multicast_query(struct net_bridge
*br
,
2641 struct net_bridge_port
*port
,
2642 struct sk_buff
*skb
,
2645 unsigned int transport_len
= ip_transport_len(skb
);
2646 const struct iphdr
*iph
= ip_hdr(skb
);
2647 struct igmphdr
*ih
= igmp_hdr(skb
);
2648 struct net_bridge_mdb_entry
*mp
;
2649 struct igmpv3_query
*ih3
;
2650 struct net_bridge_port_group
*p
;
2651 struct net_bridge_port_group __rcu
**pp
;
2653 unsigned long max_delay
;
2654 unsigned long now
= jiffies
;
2657 spin_lock(&br
->multicast_lock
);
2658 if (!netif_running(br
->dev
) ||
2659 (port
&& port
->state
== BR_STATE_DISABLED
))
2664 if (transport_len
== sizeof(*ih
)) {
2665 max_delay
= ih
->code
* (HZ
/ IGMP_TIMER_SCALE
);
2668 max_delay
= 10 * HZ
;
2671 } else if (transport_len
>= sizeof(*ih3
)) {
2672 ih3
= igmpv3_query_hdr(skb
);
2674 (br
->multicast_igmp_version
== 3 && group
&& ih3
->suppress
))
2677 max_delay
= ih3
->code
?
2678 IGMPV3_MRC(ih3
->code
) * (HZ
/ IGMP_TIMER_SCALE
) : 1;
2684 saddr
.proto
= htons(ETH_P_IP
);
2685 saddr
.src
.ip4
= iph
->saddr
;
2687 br_multicast_query_received(br
, port
, &br
->ip4_other_query
,
2692 mp
= br_mdb_ip4_get(br
, group
, vid
);
2696 max_delay
*= br
->multicast_last_member_count
;
2698 if (mp
->host_joined
&&
2699 (timer_pending(&mp
->timer
) ?
2700 time_after(mp
->timer
.expires
, now
+ max_delay
) :
2701 try_to_del_timer_sync(&mp
->timer
) >= 0))
2702 mod_timer(&mp
->timer
, now
+ max_delay
);
2704 for (pp
= &mp
->ports
;
2705 (p
= mlock_dereference(*pp
, br
)) != NULL
;
2707 if (timer_pending(&p
->timer
) ?
2708 time_after(p
->timer
.expires
, now
+ max_delay
) :
2709 try_to_del_timer_sync(&p
->timer
) >= 0 &&
2710 (br
->multicast_igmp_version
== 2 ||
2711 p
->filter_mode
== MCAST_EXCLUDE
))
2712 mod_timer(&p
->timer
, now
+ max_delay
);
2716 spin_unlock(&br
->multicast_lock
);
2719 #if IS_ENABLED(CONFIG_IPV6)
2720 static int br_ip6_multicast_query(struct net_bridge
*br
,
2721 struct net_bridge_port
*port
,
2722 struct sk_buff
*skb
,
2725 unsigned int transport_len
= ipv6_transport_len(skb
);
2726 struct mld_msg
*mld
;
2727 struct net_bridge_mdb_entry
*mp
;
2728 struct mld2_query
*mld2q
;
2729 struct net_bridge_port_group
*p
;
2730 struct net_bridge_port_group __rcu
**pp
;
2732 unsigned long max_delay
;
2733 unsigned long now
= jiffies
;
2734 unsigned int offset
= skb_transport_offset(skb
);
2735 const struct in6_addr
*group
= NULL
;
2736 bool is_general_query
;
2739 spin_lock(&br
->multicast_lock
);
2740 if (!netif_running(br
->dev
) ||
2741 (port
&& port
->state
== BR_STATE_DISABLED
))
2744 if (transport_len
== sizeof(*mld
)) {
2745 if (!pskb_may_pull(skb
, offset
+ sizeof(*mld
))) {
2749 mld
= (struct mld_msg
*) icmp6_hdr(skb
);
2750 max_delay
= msecs_to_jiffies(ntohs(mld
->mld_maxdelay
));
2752 group
= &mld
->mld_mca
;
2754 if (!pskb_may_pull(skb
, offset
+ sizeof(*mld2q
))) {
2758 mld2q
= (struct mld2_query
*)icmp6_hdr(skb
);
2759 if (!mld2q
->mld2q_nsrcs
)
2760 group
= &mld2q
->mld2q_mca
;
2761 if (br
->multicast_mld_version
== 2 &&
2762 !ipv6_addr_any(&mld2q
->mld2q_mca
) &&
2763 mld2q
->mld2q_suppress
)
2766 max_delay
= max(msecs_to_jiffies(mldv2_mrc(mld2q
)), 1UL);
2769 is_general_query
= group
&& ipv6_addr_any(group
);
2771 if (is_general_query
) {
2772 saddr
.proto
= htons(ETH_P_IPV6
);
2773 saddr
.src
.ip6
= ipv6_hdr(skb
)->saddr
;
2775 br_multicast_query_received(br
, port
, &br
->ip6_other_query
,
2778 } else if (!group
) {
2782 mp
= br_mdb_ip6_get(br
, group
, vid
);
2786 max_delay
*= br
->multicast_last_member_count
;
2787 if (mp
->host_joined
&&
2788 (timer_pending(&mp
->timer
) ?
2789 time_after(mp
->timer
.expires
, now
+ max_delay
) :
2790 try_to_del_timer_sync(&mp
->timer
) >= 0))
2791 mod_timer(&mp
->timer
, now
+ max_delay
);
2793 for (pp
= &mp
->ports
;
2794 (p
= mlock_dereference(*pp
, br
)) != NULL
;
2796 if (timer_pending(&p
->timer
) ?
2797 time_after(p
->timer
.expires
, now
+ max_delay
) :
2798 try_to_del_timer_sync(&p
->timer
) >= 0 &&
2799 (br
->multicast_mld_version
== 1 ||
2800 p
->filter_mode
== MCAST_EXCLUDE
))
2801 mod_timer(&p
->timer
, now
+ max_delay
);
2805 spin_unlock(&br
->multicast_lock
);
2811 br_multicast_leave_group(struct net_bridge
*br
,
2812 struct net_bridge_port
*port
,
2813 struct br_ip
*group
,
2814 struct bridge_mcast_other_query
*other_query
,
2815 struct bridge_mcast_own_query
*own_query
,
2816 const unsigned char *src
)
2818 struct net_bridge_mdb_entry
*mp
;
2819 struct net_bridge_port_group
*p
;
2823 spin_lock(&br
->multicast_lock
);
2824 if (!netif_running(br
->dev
) ||
2825 (port
&& port
->state
== BR_STATE_DISABLED
))
2828 mp
= br_mdb_ip_get(br
, group
);
2832 if (port
&& (port
->flags
& BR_MULTICAST_FAST_LEAVE
)) {
2833 struct net_bridge_port_group __rcu
**pp
;
2835 for (pp
= &mp
->ports
;
2836 (p
= mlock_dereference(*pp
, br
)) != NULL
;
2838 if (!br_port_group_equal(p
, port
, src
))
2841 if (p
->flags
& MDB_PG_FLAGS_PERMANENT
)
2844 p
->flags
|= MDB_PG_FLAGS_FAST_LEAVE
;
2845 br_multicast_del_pg(mp
, p
, pp
);
2850 if (timer_pending(&other_query
->timer
))
2853 if (br_opt_get(br
, BROPT_MULTICAST_QUERIER
)) {
2854 __br_multicast_send_query(br
, port
, NULL
, NULL
, &mp
->addr
,
2857 time
= jiffies
+ br
->multicast_last_member_count
*
2858 br
->multicast_last_member_interval
;
2860 mod_timer(&own_query
->timer
, time
);
2862 for (p
= mlock_dereference(mp
->ports
, br
);
2864 p
= mlock_dereference(p
->next
, br
)) {
2865 if (!br_port_group_equal(p
, port
, src
))
2868 if (!hlist_unhashed(&p
->mglist
) &&
2869 (timer_pending(&p
->timer
) ?
2870 time_after(p
->timer
.expires
, time
) :
2871 try_to_del_timer_sync(&p
->timer
) >= 0)) {
2872 mod_timer(&p
->timer
, time
);
2880 time
= now
+ br
->multicast_last_member_count
*
2881 br
->multicast_last_member_interval
;
2884 if (mp
->host_joined
&&
2885 (timer_pending(&mp
->timer
) ?
2886 time_after(mp
->timer
.expires
, time
) :
2887 try_to_del_timer_sync(&mp
->timer
) >= 0)) {
2888 mod_timer(&mp
->timer
, time
);
2894 for (p
= mlock_dereference(mp
->ports
, br
);
2896 p
= mlock_dereference(p
->next
, br
)) {
2897 if (p
->key
.port
!= port
)
2900 if (!hlist_unhashed(&p
->mglist
) &&
2901 (timer_pending(&p
->timer
) ?
2902 time_after(p
->timer
.expires
, time
) :
2903 try_to_del_timer_sync(&p
->timer
) >= 0)) {
2904 mod_timer(&p
->timer
, time
);
2910 spin_unlock(&br
->multicast_lock
);
2913 static void br_ip4_multicast_leave_group(struct net_bridge
*br
,
2914 struct net_bridge_port
*port
,
2917 const unsigned char *src
)
2919 struct br_ip br_group
;
2920 struct bridge_mcast_own_query
*own_query
;
2922 if (ipv4_is_local_multicast(group
))
2925 own_query
= port
? &port
->ip4_own_query
: &br
->ip4_own_query
;
2927 memset(&br_group
, 0, sizeof(br_group
));
2928 br_group
.dst
.ip4
= group
;
2929 br_group
.proto
= htons(ETH_P_IP
);
2932 br_multicast_leave_group(br
, port
, &br_group
, &br
->ip4_other_query
,
2936 #if IS_ENABLED(CONFIG_IPV6)
2937 static void br_ip6_multicast_leave_group(struct net_bridge
*br
,
2938 struct net_bridge_port
*port
,
2939 const struct in6_addr
*group
,
2941 const unsigned char *src
)
2943 struct br_ip br_group
;
2944 struct bridge_mcast_own_query
*own_query
;
2946 if (ipv6_addr_is_ll_all_nodes(group
))
2949 own_query
= port
? &port
->ip6_own_query
: &br
->ip6_own_query
;
2951 memset(&br_group
, 0, sizeof(br_group
));
2952 br_group
.dst
.ip6
= *group
;
2953 br_group
.proto
= htons(ETH_P_IPV6
);
2956 br_multicast_leave_group(br
, port
, &br_group
, &br
->ip6_other_query
,
2961 static void br_multicast_err_count(const struct net_bridge
*br
,
2962 const struct net_bridge_port
*p
,
2965 struct bridge_mcast_stats __percpu
*stats
;
2966 struct bridge_mcast_stats
*pstats
;
2968 if (!br_opt_get(br
, BROPT_MULTICAST_STATS_ENABLED
))
2972 stats
= p
->mcast_stats
;
2974 stats
= br
->mcast_stats
;
2975 if (WARN_ON(!stats
))
2978 pstats
= this_cpu_ptr(stats
);
2980 u64_stats_update_begin(&pstats
->syncp
);
2982 case htons(ETH_P_IP
):
2983 pstats
->mstats
.igmp_parse_errors
++;
2985 #if IS_ENABLED(CONFIG_IPV6)
2986 case htons(ETH_P_IPV6
):
2987 pstats
->mstats
.mld_parse_errors
++;
2991 u64_stats_update_end(&pstats
->syncp
);
2994 static void br_multicast_pim(struct net_bridge
*br
,
2995 struct net_bridge_port
*port
,
2996 const struct sk_buff
*skb
)
2998 unsigned int offset
= skb_transport_offset(skb
);
2999 struct pimhdr
*pimhdr
, _pimhdr
;
3001 pimhdr
= skb_header_pointer(skb
, offset
, sizeof(_pimhdr
), &_pimhdr
);
3002 if (!pimhdr
|| pim_hdr_version(pimhdr
) != PIM_VERSION
||
3003 pim_hdr_type(pimhdr
) != PIM_TYPE_HELLO
)
3006 br_multicast_mark_router(br
, port
);
3009 static int br_ip4_multicast_mrd_rcv(struct net_bridge
*br
,
3010 struct net_bridge_port
*port
,
3011 struct sk_buff
*skb
)
3013 if (ip_hdr(skb
)->protocol
!= IPPROTO_IGMP
||
3014 igmp_hdr(skb
)->type
!= IGMP_MRDISC_ADV
)
3017 br_multicast_mark_router(br
, port
);
3022 static int br_multicast_ipv4_rcv(struct net_bridge
*br
,
3023 struct net_bridge_port
*port
,
3024 struct sk_buff
*skb
,
3027 const unsigned char *src
;
3031 err
= ip_mc_check_igmp(skb
);
3033 if (err
== -ENOMSG
) {
3034 if (!ipv4_is_local_multicast(ip_hdr(skb
)->daddr
)) {
3035 BR_INPUT_SKB_CB(skb
)->mrouters_only
= 1;
3036 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb
)->daddr
)) {
3037 if (ip_hdr(skb
)->protocol
== IPPROTO_PIM
)
3038 br_multicast_pim(br
, port
, skb
);
3039 } else if (ipv4_is_all_snoopers(ip_hdr(skb
)->daddr
)) {
3040 br_ip4_multicast_mrd_rcv(br
, port
, skb
);
3044 } else if (err
< 0) {
3045 br_multicast_err_count(br
, port
, skb
->protocol
);
3050 src
= eth_hdr(skb
)->h_source
;
3051 BR_INPUT_SKB_CB(skb
)->igmp
= ih
->type
;
3054 case IGMP_HOST_MEMBERSHIP_REPORT
:
3055 case IGMPV2_HOST_MEMBERSHIP_REPORT
:
3056 BR_INPUT_SKB_CB(skb
)->mrouters_only
= 1;
3057 err
= br_ip4_multicast_add_group(br
, port
, ih
->group
, vid
, src
,
3060 case IGMPV3_HOST_MEMBERSHIP_REPORT
:
3061 err
= br_ip4_multicast_igmp3_report(br
, port
, skb
, vid
);
3063 case IGMP_HOST_MEMBERSHIP_QUERY
:
3064 br_ip4_multicast_query(br
, port
, skb
, vid
);
3066 case IGMP_HOST_LEAVE_MESSAGE
:
3067 br_ip4_multicast_leave_group(br
, port
, ih
->group
, vid
, src
);
3071 br_multicast_count(br
, port
, skb
, BR_INPUT_SKB_CB(skb
)->igmp
,
3077 #if IS_ENABLED(CONFIG_IPV6)
3078 static int br_ip6_multicast_mrd_rcv(struct net_bridge
*br
,
3079 struct net_bridge_port
*port
,
3080 struct sk_buff
*skb
)
3084 if (ipv6_hdr(skb
)->nexthdr
!= IPPROTO_ICMPV6
)
3087 ret
= ipv6_mc_check_icmpv6(skb
);
3091 if (icmp6_hdr(skb
)->icmp6_type
!= ICMPV6_MRDISC_ADV
)
3094 br_multicast_mark_router(br
, port
);
3099 static int br_multicast_ipv6_rcv(struct net_bridge
*br
,
3100 struct net_bridge_port
*port
,
3101 struct sk_buff
*skb
,
3104 const unsigned char *src
;
3105 struct mld_msg
*mld
;
3108 err
= ipv6_mc_check_mld(skb
);
3110 if (err
== -ENOMSG
) {
3111 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb
)->daddr
))
3112 BR_INPUT_SKB_CB(skb
)->mrouters_only
= 1;
3114 if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb
)->daddr
)) {
3115 err
= br_ip6_multicast_mrd_rcv(br
, port
, skb
);
3117 if (err
< 0 && err
!= -ENOMSG
) {
3118 br_multicast_err_count(br
, port
, skb
->protocol
);
3124 } else if (err
< 0) {
3125 br_multicast_err_count(br
, port
, skb
->protocol
);
3129 mld
= (struct mld_msg
*)skb_transport_header(skb
);
3130 BR_INPUT_SKB_CB(skb
)->igmp
= mld
->mld_type
;
3132 switch (mld
->mld_type
) {
3133 case ICMPV6_MGM_REPORT
:
3134 src
= eth_hdr(skb
)->h_source
;
3135 BR_INPUT_SKB_CB(skb
)->mrouters_only
= 1;
3136 err
= br_ip6_multicast_add_group(br
, port
, &mld
->mld_mca
, vid
,
3139 case ICMPV6_MLD2_REPORT
:
3140 err
= br_ip6_multicast_mld2_report(br
, port
, skb
, vid
);
3142 case ICMPV6_MGM_QUERY
:
3143 err
= br_ip6_multicast_query(br
, port
, skb
, vid
);
3145 case ICMPV6_MGM_REDUCTION
:
3146 src
= eth_hdr(skb
)->h_source
;
3147 br_ip6_multicast_leave_group(br
, port
, &mld
->mld_mca
, vid
, src
);
3151 br_multicast_count(br
, port
, skb
, BR_INPUT_SKB_CB(skb
)->igmp
,
3158 int br_multicast_rcv(struct net_bridge
*br
, struct net_bridge_port
*port
,
3159 struct sk_buff
*skb
, u16 vid
)
3163 BR_INPUT_SKB_CB(skb
)->igmp
= 0;
3164 BR_INPUT_SKB_CB(skb
)->mrouters_only
= 0;
3166 if (!br_opt_get(br
, BROPT_MULTICAST_ENABLED
))
3169 switch (skb
->protocol
) {
3170 case htons(ETH_P_IP
):
3171 ret
= br_multicast_ipv4_rcv(br
, port
, skb
, vid
);
3173 #if IS_ENABLED(CONFIG_IPV6)
3174 case htons(ETH_P_IPV6
):
3175 ret
= br_multicast_ipv6_rcv(br
, port
, skb
, vid
);
3183 static void br_multicast_query_expired(struct net_bridge
*br
,
3184 struct bridge_mcast_own_query
*query
,
3185 struct bridge_mcast_querier
*querier
)
3187 spin_lock(&br
->multicast_lock
);
3188 if (query
->startup_sent
< br
->multicast_startup_query_count
)
3189 query
->startup_sent
++;
3191 RCU_INIT_POINTER(querier
->port
, NULL
);
3192 br_multicast_send_query(br
, NULL
, query
);
3193 spin_unlock(&br
->multicast_lock
);
3196 static void br_ip4_multicast_query_expired(struct timer_list
*t
)
3198 struct net_bridge
*br
= from_timer(br
, t
, ip4_own_query
.timer
);
3200 br_multicast_query_expired(br
, &br
->ip4_own_query
, &br
->ip4_querier
);
3203 #if IS_ENABLED(CONFIG_IPV6)
3204 static void br_ip6_multicast_query_expired(struct timer_list
*t
)
3206 struct net_bridge
*br
= from_timer(br
, t
, ip6_own_query
.timer
);
3208 br_multicast_query_expired(br
, &br
->ip6_own_query
, &br
->ip6_querier
);
3212 static void br_multicast_gc_work(struct work_struct
*work
)
3214 struct net_bridge
*br
= container_of(work
, struct net_bridge
,
3216 HLIST_HEAD(deleted_head
);
3218 spin_lock_bh(&br
->multicast_lock
);
3219 hlist_move_list(&br
->mcast_gc_list
, &deleted_head
);
3220 spin_unlock_bh(&br
->multicast_lock
);
3222 br_multicast_gc(&deleted_head
);
3225 void br_multicast_init(struct net_bridge
*br
)
3227 br
->hash_max
= BR_MULTICAST_DEFAULT_HASH_MAX
;
3229 br
->multicast_router
= MDB_RTR_TYPE_TEMP_QUERY
;
3230 br
->multicast_last_member_count
= 2;
3231 br
->multicast_startup_query_count
= 2;
3233 br
->multicast_last_member_interval
= HZ
;
3234 br
->multicast_query_response_interval
= 10 * HZ
;
3235 br
->multicast_startup_query_interval
= 125 * HZ
/ 4;
3236 br
->multicast_query_interval
= 125 * HZ
;
3237 br
->multicast_querier_interval
= 255 * HZ
;
3238 br
->multicast_membership_interval
= 260 * HZ
;
3240 br
->ip4_other_query
.delay_time
= 0;
3241 br
->ip4_querier
.port
= NULL
;
3242 br
->multicast_igmp_version
= 2;
3243 #if IS_ENABLED(CONFIG_IPV6)
3244 br
->multicast_mld_version
= 1;
3245 br
->ip6_other_query
.delay_time
= 0;
3246 br
->ip6_querier
.port
= NULL
;
3248 br_opt_toggle(br
, BROPT_MULTICAST_ENABLED
, true);
3249 br_opt_toggle(br
, BROPT_HAS_IPV6_ADDR
, true);
3251 spin_lock_init(&br
->multicast_lock
);
3252 timer_setup(&br
->multicast_router_timer
,
3253 br_multicast_local_router_expired
, 0);
3254 timer_setup(&br
->ip4_other_query
.timer
,
3255 br_ip4_multicast_querier_expired
, 0);
3256 timer_setup(&br
->ip4_own_query
.timer
,
3257 br_ip4_multicast_query_expired
, 0);
3258 #if IS_ENABLED(CONFIG_IPV6)
3259 timer_setup(&br
->ip6_other_query
.timer
,
3260 br_ip6_multicast_querier_expired
, 0);
3261 timer_setup(&br
->ip6_own_query
.timer
,
3262 br_ip6_multicast_query_expired
, 0);
3264 INIT_HLIST_HEAD(&br
->mdb_list
);
3265 INIT_HLIST_HEAD(&br
->mcast_gc_list
);
3266 INIT_WORK(&br
->mcast_gc_work
, br_multicast_gc_work
);
3269 static void br_ip4_multicast_join_snoopers(struct net_bridge
*br
)
3271 struct in_device
*in_dev
= in_dev_get(br
->dev
);
3276 __ip_mc_inc_group(in_dev
, htonl(INADDR_ALLSNOOPERS_GROUP
), GFP_ATOMIC
);
3280 #if IS_ENABLED(CONFIG_IPV6)
3281 static void br_ip6_multicast_join_snoopers(struct net_bridge
*br
)
3283 struct in6_addr addr
;
3285 ipv6_addr_set(&addr
, htonl(0xff020000), 0, 0, htonl(0x6a));
3286 ipv6_dev_mc_inc(br
->dev
, &addr
);
3289 static inline void br_ip6_multicast_join_snoopers(struct net_bridge
*br
)
3294 void br_multicast_join_snoopers(struct net_bridge
*br
)
3296 br_ip4_multicast_join_snoopers(br
);
3297 br_ip6_multicast_join_snoopers(br
);
3300 static void br_ip4_multicast_leave_snoopers(struct net_bridge
*br
)
3302 struct in_device
*in_dev
= in_dev_get(br
->dev
);
3304 if (WARN_ON(!in_dev
))
3307 __ip_mc_dec_group(in_dev
, htonl(INADDR_ALLSNOOPERS_GROUP
), GFP_ATOMIC
);
3311 #if IS_ENABLED(CONFIG_IPV6)
3312 static void br_ip6_multicast_leave_snoopers(struct net_bridge
*br
)
3314 struct in6_addr addr
;
3316 ipv6_addr_set(&addr
, htonl(0xff020000), 0, 0, htonl(0x6a));
3317 ipv6_dev_mc_dec(br
->dev
, &addr
);
3320 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge
*br
)
3325 void br_multicast_leave_snoopers(struct net_bridge
*br
)
3327 br_ip4_multicast_leave_snoopers(br
);
3328 br_ip6_multicast_leave_snoopers(br
);
3331 static void __br_multicast_open(struct net_bridge
*br
,
3332 struct bridge_mcast_own_query
*query
)
3334 query
->startup_sent
= 0;
3336 if (!br_opt_get(br
, BROPT_MULTICAST_ENABLED
))
3339 mod_timer(&query
->timer
, jiffies
);
3342 void br_multicast_open(struct net_bridge
*br
)
3344 __br_multicast_open(br
, &br
->ip4_own_query
);
3345 #if IS_ENABLED(CONFIG_IPV6)
3346 __br_multicast_open(br
, &br
->ip6_own_query
);
3350 void br_multicast_stop(struct net_bridge
*br
)
3352 del_timer_sync(&br
->multicast_router_timer
);
3353 del_timer_sync(&br
->ip4_other_query
.timer
);
3354 del_timer_sync(&br
->ip4_own_query
.timer
);
3355 #if IS_ENABLED(CONFIG_IPV6)
3356 del_timer_sync(&br
->ip6_other_query
.timer
);
3357 del_timer_sync(&br
->ip6_own_query
.timer
);
3361 void br_multicast_dev_del(struct net_bridge
*br
)
3363 struct net_bridge_mdb_entry
*mp
;
3364 HLIST_HEAD(deleted_head
);
3365 struct hlist_node
*tmp
;
3367 spin_lock_bh(&br
->multicast_lock
);
3368 hlist_for_each_entry_safe(mp
, tmp
, &br
->mdb_list
, mdb_node
)
3369 br_multicast_del_mdb_entry(mp
);
3370 hlist_move_list(&br
->mcast_gc_list
, &deleted_head
);
3371 spin_unlock_bh(&br
->multicast_lock
);
3373 br_multicast_gc(&deleted_head
);
3374 cancel_work_sync(&br
->mcast_gc_work
);
3379 int br_multicast_set_router(struct net_bridge
*br
, unsigned long val
)
3383 spin_lock_bh(&br
->multicast_lock
);
3386 case MDB_RTR_TYPE_DISABLED
:
3387 case MDB_RTR_TYPE_PERM
:
3388 br_mc_router_state_change(br
, val
== MDB_RTR_TYPE_PERM
);
3389 del_timer(&br
->multicast_router_timer
);
3390 br
->multicast_router
= val
;
3393 case MDB_RTR_TYPE_TEMP_QUERY
:
3394 if (br
->multicast_router
!= MDB_RTR_TYPE_TEMP_QUERY
)
3395 br_mc_router_state_change(br
, false);
3396 br
->multicast_router
= val
;
3401 spin_unlock_bh(&br
->multicast_lock
);
3406 static void __del_port_router(struct net_bridge_port
*p
)
3408 if (hlist_unhashed(&p
->rlist
))
3410 hlist_del_init_rcu(&p
->rlist
);
3411 br_rtr_notify(p
->br
->dev
, p
, RTM_DELMDB
);
3412 br_port_mc_router_state_change(p
, false);
3414 /* don't allow timer refresh */
3415 if (p
->multicast_router
== MDB_RTR_TYPE_TEMP
)
3416 p
->multicast_router
= MDB_RTR_TYPE_TEMP_QUERY
;
3419 int br_multicast_set_port_router(struct net_bridge_port
*p
, unsigned long val
)
3421 struct net_bridge
*br
= p
->br
;
3422 unsigned long now
= jiffies
;
3425 spin_lock(&br
->multicast_lock
);
3426 if (p
->multicast_router
== val
) {
3427 /* Refresh the temp router port timer */
3428 if (p
->multicast_router
== MDB_RTR_TYPE_TEMP
)
3429 mod_timer(&p
->multicast_router_timer
,
3430 now
+ br
->multicast_querier_interval
);
3435 case MDB_RTR_TYPE_DISABLED
:
3436 p
->multicast_router
= MDB_RTR_TYPE_DISABLED
;
3437 __del_port_router(p
);
3438 del_timer(&p
->multicast_router_timer
);
3440 case MDB_RTR_TYPE_TEMP_QUERY
:
3441 p
->multicast_router
= MDB_RTR_TYPE_TEMP_QUERY
;
3442 __del_port_router(p
);
3444 case MDB_RTR_TYPE_PERM
:
3445 p
->multicast_router
= MDB_RTR_TYPE_PERM
;
3446 del_timer(&p
->multicast_router_timer
);
3447 br_multicast_add_router(br
, p
);
3449 case MDB_RTR_TYPE_TEMP
:
3450 p
->multicast_router
= MDB_RTR_TYPE_TEMP
;
3451 br_multicast_mark_router(br
, p
);
3458 spin_unlock(&br
->multicast_lock
);
3463 static void br_multicast_start_querier(struct net_bridge
*br
,
3464 struct bridge_mcast_own_query
*query
)
3466 struct net_bridge_port
*port
;
3468 __br_multicast_open(br
, query
);
3471 list_for_each_entry_rcu(port
, &br
->port_list
, list
) {
3472 if (port
->state
== BR_STATE_DISABLED
||
3473 port
->state
== BR_STATE_BLOCKING
)
3476 if (query
== &br
->ip4_own_query
)
3477 br_multicast_enable(&port
->ip4_own_query
);
3478 #if IS_ENABLED(CONFIG_IPV6)
3480 br_multicast_enable(&port
->ip6_own_query
);
3486 int br_multicast_toggle(struct net_bridge
*br
, unsigned long val
)
3488 struct net_bridge_port
*port
;
3489 bool change_snoopers
= false;
3491 spin_lock_bh(&br
->multicast_lock
);
3492 if (!!br_opt_get(br
, BROPT_MULTICAST_ENABLED
) == !!val
)
3495 br_mc_disabled_update(br
->dev
, val
);
3496 br_opt_toggle(br
, BROPT_MULTICAST_ENABLED
, !!val
);
3497 if (!br_opt_get(br
, BROPT_MULTICAST_ENABLED
)) {
3498 change_snoopers
= true;
3502 if (!netif_running(br
->dev
))
3505 br_multicast_open(br
);
3506 list_for_each_entry(port
, &br
->port_list
, list
)
3507 __br_multicast_enable_port(port
);
3509 change_snoopers
= true;
3512 spin_unlock_bh(&br
->multicast_lock
);
3514 /* br_multicast_join_snoopers has the potential to cause
3515 * an MLD Report/Leave to be delivered to br_multicast_rcv,
3516 * which would in turn call br_multicast_add_group, which would
3517 * attempt to acquire multicast_lock. This function should be
3518 * called after the lock has been released to avoid deadlocks on
3521 * br_multicast_leave_snoopers does not have the problem since
3522 * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and
3523 * returns without calling br_multicast_ipv4/6_rcv if it's not
3524 * enabled. Moved both functions out just for symmetry.
3526 if (change_snoopers
) {
3527 if (br_opt_get(br
, BROPT_MULTICAST_ENABLED
))
3528 br_multicast_join_snoopers(br
);
3530 br_multicast_leave_snoopers(br
);
3536 bool br_multicast_enabled(const struct net_device
*dev
)
3538 struct net_bridge
*br
= netdev_priv(dev
);
3540 return !!br_opt_get(br
, BROPT_MULTICAST_ENABLED
);
3542 EXPORT_SYMBOL_GPL(br_multicast_enabled
);
3544 bool br_multicast_router(const struct net_device
*dev
)
3546 struct net_bridge
*br
= netdev_priv(dev
);
3549 spin_lock_bh(&br
->multicast_lock
);
3550 is_router
= br_multicast_is_router(br
);
3551 spin_unlock_bh(&br
->multicast_lock
);
3554 EXPORT_SYMBOL_GPL(br_multicast_router
);
3556 int br_multicast_set_querier(struct net_bridge
*br
, unsigned long val
)
3558 unsigned long max_delay
;
3562 spin_lock_bh(&br
->multicast_lock
);
3563 if (br_opt_get(br
, BROPT_MULTICAST_QUERIER
) == val
)
3566 br_opt_toggle(br
, BROPT_MULTICAST_QUERIER
, !!val
);
3570 max_delay
= br
->multicast_query_response_interval
;
3572 if (!timer_pending(&br
->ip4_other_query
.timer
))
3573 br
->ip4_other_query
.delay_time
= jiffies
+ max_delay
;
3575 br_multicast_start_querier(br
, &br
->ip4_own_query
);
3577 #if IS_ENABLED(CONFIG_IPV6)
3578 if (!timer_pending(&br
->ip6_other_query
.timer
))
3579 br
->ip6_other_query
.delay_time
= jiffies
+ max_delay
;
3581 br_multicast_start_querier(br
, &br
->ip6_own_query
);
3585 spin_unlock_bh(&br
->multicast_lock
);
3590 int br_multicast_set_igmp_version(struct net_bridge
*br
, unsigned long val
)
3592 /* Currently we support only version 2 and 3 */
3601 spin_lock_bh(&br
->multicast_lock
);
3602 br
->multicast_igmp_version
= val
;
3603 spin_unlock_bh(&br
->multicast_lock
);
3608 #if IS_ENABLED(CONFIG_IPV6)
3609 int br_multicast_set_mld_version(struct net_bridge
*br
, unsigned long val
)
3611 /* Currently we support version 1 and 2 */
3620 spin_lock_bh(&br
->multicast_lock
);
3621 br
->multicast_mld_version
= val
;
3622 spin_unlock_bh(&br
->multicast_lock
);
3629 * br_multicast_list_adjacent - Returns snooped multicast addresses
3630 * @dev: The bridge port adjacent to which to retrieve addresses
3631 * @br_ip_list: The list to store found, snooped multicast IP addresses in
3633 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
3634 * snooping feature on all bridge ports of dev's bridge device, excluding
3635 * the addresses from dev itself.
3637 * Returns the number of items added to br_ip_list.
3640 * - br_ip_list needs to be initialized by caller
3641 * - br_ip_list might contain duplicates in the end
3642 * (needs to be taken care of by caller)
3643 * - br_ip_list needs to be freed by caller
3645 int br_multicast_list_adjacent(struct net_device
*dev
,
3646 struct list_head
*br_ip_list
)
3648 struct net_bridge
*br
;
3649 struct net_bridge_port
*port
;
3650 struct net_bridge_port_group
*group
;
3651 struct br_ip_list
*entry
;
3655 if (!br_ip_list
|| !netif_is_bridge_port(dev
))
3658 port
= br_port_get_rcu(dev
);
3659 if (!port
|| !port
->br
)
3664 list_for_each_entry_rcu(port
, &br
->port_list
, list
) {
3665 if (!port
->dev
|| port
->dev
== dev
)
3668 hlist_for_each_entry_rcu(group
, &port
->mglist
, mglist
) {
3669 entry
= kmalloc(sizeof(*entry
), GFP_ATOMIC
);
3673 entry
->addr
= group
->key
.addr
;
3674 list_add(&entry
->list
, br_ip_list
);
3683 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent
);
3686 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
3687 * @dev: The bridge port providing the bridge on which to check for a querier
3688 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
3690 * Checks whether the given interface has a bridge on top and if so returns
3691 * true if a valid querier exists anywhere on the bridged link layer.
3692 * Otherwise returns false.
3694 bool br_multicast_has_querier_anywhere(struct net_device
*dev
, int proto
)
3696 struct net_bridge
*br
;
3697 struct net_bridge_port
*port
;
3702 if (!netif_is_bridge_port(dev
))
3705 port
= br_port_get_rcu(dev
);
3706 if (!port
|| !port
->br
)
3711 memset(ð
, 0, sizeof(eth
));
3712 eth
.h_proto
= htons(proto
);
3714 ret
= br_multicast_querier_exists(br
, ð
, NULL
);
3720 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere
);
3723 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
3724 * @dev: The bridge port adjacent to which to check for a querier
3725 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
3727 * Checks whether the given interface has a bridge on top and if so returns
3728 * true if a selected querier is behind one of the other ports of this
3729 * bridge. Otherwise returns false.
3731 bool br_multicast_has_querier_adjacent(struct net_device
*dev
, int proto
)
3733 struct net_bridge
*br
;
3734 struct net_bridge_port
*port
;
3738 if (!netif_is_bridge_port(dev
))
3741 port
= br_port_get_rcu(dev
);
3742 if (!port
|| !port
->br
)
3749 if (!timer_pending(&br
->ip4_other_query
.timer
) ||
3750 rcu_dereference(br
->ip4_querier
.port
) == port
)
3753 #if IS_ENABLED(CONFIG_IPV6)
3755 if (!timer_pending(&br
->ip6_other_query
.timer
) ||
3756 rcu_dereference(br
->ip6_querier
.port
) == port
)
3769 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent
);
3771 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu
*stats
,
3772 const struct sk_buff
*skb
, u8 type
, u8 dir
)
3774 struct bridge_mcast_stats
*pstats
= this_cpu_ptr(stats
);
3775 __be16 proto
= skb
->protocol
;
3778 u64_stats_update_begin(&pstats
->syncp
);
3780 case htons(ETH_P_IP
):
3781 t_len
= ntohs(ip_hdr(skb
)->tot_len
) - ip_hdrlen(skb
);
3783 case IGMP_HOST_MEMBERSHIP_REPORT
:
3784 pstats
->mstats
.igmp_v1reports
[dir
]++;
3786 case IGMPV2_HOST_MEMBERSHIP_REPORT
:
3787 pstats
->mstats
.igmp_v2reports
[dir
]++;
3789 case IGMPV3_HOST_MEMBERSHIP_REPORT
:
3790 pstats
->mstats
.igmp_v3reports
[dir
]++;
3792 case IGMP_HOST_MEMBERSHIP_QUERY
:
3793 if (t_len
!= sizeof(struct igmphdr
)) {
3794 pstats
->mstats
.igmp_v3queries
[dir
]++;
3796 unsigned int offset
= skb_transport_offset(skb
);
3797 struct igmphdr
*ih
, _ihdr
;
3799 ih
= skb_header_pointer(skb
, offset
,
3800 sizeof(_ihdr
), &_ihdr
);
3804 pstats
->mstats
.igmp_v1queries
[dir
]++;
3806 pstats
->mstats
.igmp_v2queries
[dir
]++;
3809 case IGMP_HOST_LEAVE_MESSAGE
:
3810 pstats
->mstats
.igmp_leaves
[dir
]++;
3814 #if IS_ENABLED(CONFIG_IPV6)
3815 case htons(ETH_P_IPV6
):
3816 t_len
= ntohs(ipv6_hdr(skb
)->payload_len
) +
3817 sizeof(struct ipv6hdr
);
3818 t_len
-= skb_network_header_len(skb
);
3820 case ICMPV6_MGM_REPORT
:
3821 pstats
->mstats
.mld_v1reports
[dir
]++;
3823 case ICMPV6_MLD2_REPORT
:
3824 pstats
->mstats
.mld_v2reports
[dir
]++;
3826 case ICMPV6_MGM_QUERY
:
3827 if (t_len
!= sizeof(struct mld_msg
))
3828 pstats
->mstats
.mld_v2queries
[dir
]++;
3830 pstats
->mstats
.mld_v1queries
[dir
]++;
3832 case ICMPV6_MGM_REDUCTION
:
3833 pstats
->mstats
.mld_leaves
[dir
]++;
3837 #endif /* CONFIG_IPV6 */
3839 u64_stats_update_end(&pstats
->syncp
);
3842 void br_multicast_count(struct net_bridge
*br
, const struct net_bridge_port
*p
,
3843 const struct sk_buff
*skb
, u8 type
, u8 dir
)
3845 struct bridge_mcast_stats __percpu
*stats
;
3847 /* if multicast_disabled is true then igmp type can't be set */
3848 if (!type
|| !br_opt_get(br
, BROPT_MULTICAST_STATS_ENABLED
))
3852 stats
= p
->mcast_stats
;
3854 stats
= br
->mcast_stats
;
3855 if (WARN_ON(!stats
))
3858 br_mcast_stats_add(stats
, skb
, type
, dir
);
3861 int br_multicast_init_stats(struct net_bridge
*br
)
3863 br
->mcast_stats
= netdev_alloc_pcpu_stats(struct bridge_mcast_stats
);
3864 if (!br
->mcast_stats
)
3870 void br_multicast_uninit_stats(struct net_bridge
*br
)
3872 free_percpu(br
->mcast_stats
);
3875 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */
3876 static noinline_for_stack
void mcast_stats_add_dir(u64
*dst
, u64
*src
)
3878 dst
[BR_MCAST_DIR_RX
] += src
[BR_MCAST_DIR_RX
];
3879 dst
[BR_MCAST_DIR_TX
] += src
[BR_MCAST_DIR_TX
];
3882 void br_multicast_get_stats(const struct net_bridge
*br
,
3883 const struct net_bridge_port
*p
,
3884 struct br_mcast_stats
*dest
)
3886 struct bridge_mcast_stats __percpu
*stats
;
3887 struct br_mcast_stats tdst
;
3890 memset(dest
, 0, sizeof(*dest
));
3892 stats
= p
->mcast_stats
;
3894 stats
= br
->mcast_stats
;
3895 if (WARN_ON(!stats
))
3898 memset(&tdst
, 0, sizeof(tdst
));
3899 for_each_possible_cpu(i
) {
3900 struct bridge_mcast_stats
*cpu_stats
= per_cpu_ptr(stats
, i
);
3901 struct br_mcast_stats temp
;
3905 start
= u64_stats_fetch_begin_irq(&cpu_stats
->syncp
);
3906 memcpy(&temp
, &cpu_stats
->mstats
, sizeof(temp
));
3907 } while (u64_stats_fetch_retry_irq(&cpu_stats
->syncp
, start
));
3909 mcast_stats_add_dir(tdst
.igmp_v1queries
, temp
.igmp_v1queries
);
3910 mcast_stats_add_dir(tdst
.igmp_v2queries
, temp
.igmp_v2queries
);
3911 mcast_stats_add_dir(tdst
.igmp_v3queries
, temp
.igmp_v3queries
);
3912 mcast_stats_add_dir(tdst
.igmp_leaves
, temp
.igmp_leaves
);
3913 mcast_stats_add_dir(tdst
.igmp_v1reports
, temp
.igmp_v1reports
);
3914 mcast_stats_add_dir(tdst
.igmp_v2reports
, temp
.igmp_v2reports
);
3915 mcast_stats_add_dir(tdst
.igmp_v3reports
, temp
.igmp_v3reports
);
3916 tdst
.igmp_parse_errors
+= temp
.igmp_parse_errors
;
3918 mcast_stats_add_dir(tdst
.mld_v1queries
, temp
.mld_v1queries
);
3919 mcast_stats_add_dir(tdst
.mld_v2queries
, temp
.mld_v2queries
);
3920 mcast_stats_add_dir(tdst
.mld_leaves
, temp
.mld_leaves
);
3921 mcast_stats_add_dir(tdst
.mld_v1reports
, temp
.mld_v1reports
);
3922 mcast_stats_add_dir(tdst
.mld_v2reports
, temp
.mld_v2reports
);
3923 tdst
.mld_parse_errors
+= temp
.mld_parse_errors
;
3925 memcpy(dest
, &tdst
, sizeof(*dest
));
3928 int br_mdb_hash_init(struct net_bridge
*br
)
3932 err
= rhashtable_init(&br
->sg_port_tbl
, &br_sg_port_rht_params
);
3936 err
= rhashtable_init(&br
->mdb_hash_tbl
, &br_mdb_rht_params
);
3938 rhashtable_destroy(&br
->sg_port_tbl
);
3945 void br_mdb_hash_fini(struct net_bridge
*br
)
3947 rhashtable_destroy(&br
->sg_port_tbl
);
3948 rhashtable_destroy(&br
->mdb_hash_tbl
);