1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // Copyright (c) 2020, Nikolay Aleksandrov <nikolay@nvidia.com>
4 #include <linux/export.h>
5 #include <linux/if_ether.h>
6 #include <linux/igmp.h>
8 #include <linux/jhash.h>
9 #include <linux/kernel.h>
10 #include <linux/log2.h>
11 #include <linux/netdevice.h>
12 #include <linux/netfilter_bridge.h>
13 #include <linux/random.h>
14 #include <linux/rculist.h>
15 #include <linux/skbuff.h>
16 #include <linux/slab.h>
17 #include <linux/timer.h>
18 #include <linux/inetdevice.h>
19 #include <linux/mroute.h>
21 #include <net/switchdev.h>
22 #if IS_ENABLED(CONFIG_IPV6)
23 #include <linux/icmpv6.h>
26 #include <net/ip6_checksum.h>
27 #include <net/addrconf.h>
30 #include "br_private.h"
31 #include "br_private_mcast_eht.h"
33 static bool br_multicast_del_eht_set_entry(struct net_bridge_port_group
*pg
,
34 union net_bridge_eht_addr
*src_addr
,
35 union net_bridge_eht_addr
*h_addr
);
36 static void br_multicast_create_eht_set_entry(const struct net_bridge_mcast
*brmctx
,
37 struct net_bridge_port_group
*pg
,
38 union net_bridge_eht_addr
*src_addr
,
39 union net_bridge_eht_addr
*h_addr
,
43 static struct net_bridge_group_eht_host
*
44 br_multicast_eht_host_lookup(struct net_bridge_port_group
*pg
,
45 union net_bridge_eht_addr
*h_addr
)
47 struct rb_node
*node
= pg
->eht_host_tree
.rb_node
;
50 struct net_bridge_group_eht_host
*this;
53 this = rb_entry(node
, struct net_bridge_group_eht_host
,
55 result
= memcmp(h_addr
, &this->h_addr
, sizeof(*h_addr
));
59 node
= node
->rb_right
;
67 static int br_multicast_eht_host_filter_mode(struct net_bridge_port_group
*pg
,
68 union net_bridge_eht_addr
*h_addr
)
70 struct net_bridge_group_eht_host
*eht_host
;
72 eht_host
= br_multicast_eht_host_lookup(pg
, h_addr
);
76 return eht_host
->filter_mode
;
79 static struct net_bridge_group_eht_set_entry
*
80 br_multicast_eht_set_entry_lookup(struct net_bridge_group_eht_set
*eht_set
,
81 union net_bridge_eht_addr
*h_addr
)
83 struct rb_node
*node
= eht_set
->entry_tree
.rb_node
;
86 struct net_bridge_group_eht_set_entry
*this;
89 this = rb_entry(node
, struct net_bridge_group_eht_set_entry
,
91 result
= memcmp(h_addr
, &this->h_addr
, sizeof(*h_addr
));
95 node
= node
->rb_right
;
103 static struct net_bridge_group_eht_set
*
104 br_multicast_eht_set_lookup(struct net_bridge_port_group
*pg
,
105 union net_bridge_eht_addr
*src_addr
)
107 struct rb_node
*node
= pg
->eht_set_tree
.rb_node
;
110 struct net_bridge_group_eht_set
*this;
113 this = rb_entry(node
, struct net_bridge_group_eht_set
,
115 result
= memcmp(src_addr
, &this->src_addr
, sizeof(*src_addr
));
117 node
= node
->rb_left
;
119 node
= node
->rb_right
;
127 static void __eht_destroy_host(struct net_bridge_group_eht_host
*eht_host
)
129 WARN_ON(!hlist_empty(&eht_host
->set_entries
));
131 br_multicast_eht_hosts_dec(eht_host
->pg
);
133 rb_erase(&eht_host
->rb_node
, &eht_host
->pg
->eht_host_tree
);
134 RB_CLEAR_NODE(&eht_host
->rb_node
);
138 static void br_multicast_destroy_eht_set_entry(struct net_bridge_mcast_gc
*gc
)
140 struct net_bridge_group_eht_set_entry
*set_h
;
142 set_h
= container_of(gc
, struct net_bridge_group_eht_set_entry
, mcast_gc
);
143 WARN_ON(!RB_EMPTY_NODE(&set_h
->rb_node
));
145 timer_shutdown_sync(&set_h
->timer
);
149 static void br_multicast_destroy_eht_set(struct net_bridge_mcast_gc
*gc
)
151 struct net_bridge_group_eht_set
*eht_set
;
153 eht_set
= container_of(gc
, struct net_bridge_group_eht_set
, mcast_gc
);
154 WARN_ON(!RB_EMPTY_NODE(&eht_set
->rb_node
));
155 WARN_ON(!RB_EMPTY_ROOT(&eht_set
->entry_tree
));
157 timer_shutdown_sync(&eht_set
->timer
);
161 static void __eht_del_set_entry(struct net_bridge_group_eht_set_entry
*set_h
)
163 struct net_bridge_group_eht_host
*eht_host
= set_h
->h_parent
;
164 union net_bridge_eht_addr zero_addr
;
166 rb_erase(&set_h
->rb_node
, &set_h
->eht_set
->entry_tree
);
167 RB_CLEAR_NODE(&set_h
->rb_node
);
168 hlist_del_init(&set_h
->host_list
);
169 memset(&zero_addr
, 0, sizeof(zero_addr
));
170 if (memcmp(&set_h
->h_addr
, &zero_addr
, sizeof(zero_addr
)))
171 eht_host
->num_entries
--;
172 hlist_add_head(&set_h
->mcast_gc
.gc_node
, &set_h
->br
->mcast_gc_list
);
173 queue_work(system_long_wq
, &set_h
->br
->mcast_gc_work
);
175 if (hlist_empty(&eht_host
->set_entries
))
176 __eht_destroy_host(eht_host
);
179 static void br_multicast_del_eht_set(struct net_bridge_group_eht_set
*eht_set
)
181 struct net_bridge_group_eht_set_entry
*set_h
;
182 struct rb_node
*node
;
184 while ((node
= rb_first(&eht_set
->entry_tree
))) {
185 set_h
= rb_entry(node
, struct net_bridge_group_eht_set_entry
,
187 __eht_del_set_entry(set_h
);
190 rb_erase(&eht_set
->rb_node
, &eht_set
->pg
->eht_set_tree
);
191 RB_CLEAR_NODE(&eht_set
->rb_node
);
192 hlist_add_head(&eht_set
->mcast_gc
.gc_node
, &eht_set
->br
->mcast_gc_list
);
193 queue_work(system_long_wq
, &eht_set
->br
->mcast_gc_work
);
196 void br_multicast_eht_clean_sets(struct net_bridge_port_group
*pg
)
198 struct net_bridge_group_eht_set
*eht_set
;
199 struct rb_node
*node
;
201 while ((node
= rb_first(&pg
->eht_set_tree
))) {
202 eht_set
= rb_entry(node
, struct net_bridge_group_eht_set
,
204 br_multicast_del_eht_set(eht_set
);
208 static void br_multicast_eht_set_entry_expired(struct timer_list
*t
)
210 struct net_bridge_group_eht_set_entry
*set_h
= from_timer(set_h
, t
, timer
);
211 struct net_bridge
*br
= set_h
->br
;
213 spin_lock(&br
->multicast_lock
);
214 if (RB_EMPTY_NODE(&set_h
->rb_node
) || timer_pending(&set_h
->timer
))
217 br_multicast_del_eht_set_entry(set_h
->eht_set
->pg
,
218 &set_h
->eht_set
->src_addr
,
221 spin_unlock(&br
->multicast_lock
);
224 static void br_multicast_eht_set_expired(struct timer_list
*t
)
226 struct net_bridge_group_eht_set
*eht_set
= from_timer(eht_set
, t
,
228 struct net_bridge
*br
= eht_set
->br
;
230 spin_lock(&br
->multicast_lock
);
231 if (RB_EMPTY_NODE(&eht_set
->rb_node
) || timer_pending(&eht_set
->timer
))
234 br_multicast_del_eht_set(eht_set
);
236 spin_unlock(&br
->multicast_lock
);
239 static struct net_bridge_group_eht_host
*
240 __eht_lookup_create_host(struct net_bridge_port_group
*pg
,
241 union net_bridge_eht_addr
*h_addr
,
242 unsigned char filter_mode
)
244 struct rb_node
**link
= &pg
->eht_host_tree
.rb_node
, *parent
= NULL
;
245 struct net_bridge_group_eht_host
*eht_host
;
248 struct net_bridge_group_eht_host
*this;
251 this = rb_entry(*link
, struct net_bridge_group_eht_host
,
253 result
= memcmp(h_addr
, &this->h_addr
, sizeof(*h_addr
));
256 link
= &((*link
)->rb_left
);
258 link
= &((*link
)->rb_right
);
263 if (br_multicast_eht_hosts_over_limit(pg
))
266 eht_host
= kzalloc(sizeof(*eht_host
), GFP_ATOMIC
);
270 memcpy(&eht_host
->h_addr
, h_addr
, sizeof(*h_addr
));
271 INIT_HLIST_HEAD(&eht_host
->set_entries
);
273 eht_host
->filter_mode
= filter_mode
;
275 rb_link_node(&eht_host
->rb_node
, parent
, link
);
276 rb_insert_color(&eht_host
->rb_node
, &pg
->eht_host_tree
);
278 br_multicast_eht_hosts_inc(pg
);
283 static struct net_bridge_group_eht_set_entry
*
284 __eht_lookup_create_set_entry(struct net_bridge
*br
,
285 struct net_bridge_group_eht_set
*eht_set
,
286 struct net_bridge_group_eht_host
*eht_host
,
289 struct rb_node
**link
= &eht_set
->entry_tree
.rb_node
, *parent
= NULL
;
290 struct net_bridge_group_eht_set_entry
*set_h
;
293 struct net_bridge_group_eht_set_entry
*this;
296 this = rb_entry(*link
, struct net_bridge_group_eht_set_entry
,
298 result
= memcmp(&eht_host
->h_addr
, &this->h_addr
,
299 sizeof(union net_bridge_eht_addr
));
302 link
= &((*link
)->rb_left
);
304 link
= &((*link
)->rb_right
);
309 /* always allow auto-created zero entry */
310 if (!allow_zero_src
&& eht_host
->num_entries
>= PG_SRC_ENT_LIMIT
)
313 set_h
= kzalloc(sizeof(*set_h
), GFP_ATOMIC
);
317 memcpy(&set_h
->h_addr
, &eht_host
->h_addr
,
318 sizeof(union net_bridge_eht_addr
));
319 set_h
->mcast_gc
.destroy
= br_multicast_destroy_eht_set_entry
;
320 set_h
->eht_set
= eht_set
;
321 set_h
->h_parent
= eht_host
;
323 timer_setup(&set_h
->timer
, br_multicast_eht_set_entry_expired
, 0);
325 hlist_add_head(&set_h
->host_list
, &eht_host
->set_entries
);
326 rb_link_node(&set_h
->rb_node
, parent
, link
);
327 rb_insert_color(&set_h
->rb_node
, &eht_set
->entry_tree
);
328 /* we must not count the auto-created zero entry otherwise we won't be
329 * able to track the full list of PG_SRC_ENT_LIMIT entries
332 eht_host
->num_entries
++;
337 static struct net_bridge_group_eht_set
*
338 __eht_lookup_create_set(struct net_bridge_port_group
*pg
,
339 union net_bridge_eht_addr
*src_addr
)
341 struct rb_node
**link
= &pg
->eht_set_tree
.rb_node
, *parent
= NULL
;
342 struct net_bridge_group_eht_set
*eht_set
;
345 struct net_bridge_group_eht_set
*this;
348 this = rb_entry(*link
, struct net_bridge_group_eht_set
,
350 result
= memcmp(src_addr
, &this->src_addr
, sizeof(*src_addr
));
353 link
= &((*link
)->rb_left
);
355 link
= &((*link
)->rb_right
);
360 eht_set
= kzalloc(sizeof(*eht_set
), GFP_ATOMIC
);
364 memcpy(&eht_set
->src_addr
, src_addr
, sizeof(*src_addr
));
365 eht_set
->mcast_gc
.destroy
= br_multicast_destroy_eht_set
;
367 eht_set
->br
= pg
->key
.port
->br
;
368 eht_set
->entry_tree
= RB_ROOT
;
369 timer_setup(&eht_set
->timer
, br_multicast_eht_set_expired
, 0);
371 rb_link_node(&eht_set
->rb_node
, parent
, link
);
372 rb_insert_color(&eht_set
->rb_node
, &pg
->eht_set_tree
);
377 static void br_multicast_ip_src_to_eht_addr(const struct br_ip
*src
,
378 union net_bridge_eht_addr
*dest
)
380 switch (src
->proto
) {
381 case htons(ETH_P_IP
):
382 dest
->ip4
= src
->src
.ip4
;
384 #if IS_ENABLED(CONFIG_IPV6)
385 case htons(ETH_P_IPV6
):
386 memcpy(&dest
->ip6
, &src
->src
.ip6
, sizeof(struct in6_addr
));
392 static void br_eht_convert_host_filter_mode(const struct net_bridge_mcast
*brmctx
,
393 struct net_bridge_port_group
*pg
,
394 union net_bridge_eht_addr
*h_addr
,
397 struct net_bridge_group_eht_host
*eht_host
;
398 union net_bridge_eht_addr zero_addr
;
400 eht_host
= br_multicast_eht_host_lookup(pg
, h_addr
);
402 eht_host
->filter_mode
= filter_mode
;
404 memset(&zero_addr
, 0, sizeof(zero_addr
));
405 switch (filter_mode
) {
407 br_multicast_del_eht_set_entry(pg
, &zero_addr
, h_addr
);
410 br_multicast_create_eht_set_entry(brmctx
, pg
, &zero_addr
,
411 h_addr
, MCAST_EXCLUDE
,
417 static void br_multicast_create_eht_set_entry(const struct net_bridge_mcast
*brmctx
,
418 struct net_bridge_port_group
*pg
,
419 union net_bridge_eht_addr
*src_addr
,
420 union net_bridge_eht_addr
*h_addr
,
424 struct net_bridge_group_eht_set_entry
*set_h
;
425 struct net_bridge_group_eht_host
*eht_host
;
426 struct net_bridge
*br
= pg
->key
.port
->br
;
427 struct net_bridge_group_eht_set
*eht_set
;
428 union net_bridge_eht_addr zero_addr
;
430 memset(&zero_addr
, 0, sizeof(zero_addr
));
431 if (!allow_zero_src
&& !memcmp(src_addr
, &zero_addr
, sizeof(zero_addr
)))
434 eht_set
= __eht_lookup_create_set(pg
, src_addr
);
438 eht_host
= __eht_lookup_create_host(pg
, h_addr
, filter_mode
);
442 set_h
= __eht_lookup_create_set_entry(br
, eht_set
, eht_host
,
447 mod_timer(&set_h
->timer
, jiffies
+ br_multicast_gmi(brmctx
));
448 mod_timer(&eht_set
->timer
, jiffies
+ br_multicast_gmi(brmctx
));
453 if (hlist_empty(&eht_host
->set_entries
))
454 __eht_destroy_host(eht_host
);
456 if (RB_EMPTY_ROOT(&eht_set
->entry_tree
))
457 br_multicast_del_eht_set(eht_set
);
460 static bool br_multicast_del_eht_set_entry(struct net_bridge_port_group
*pg
,
461 union net_bridge_eht_addr
*src_addr
,
462 union net_bridge_eht_addr
*h_addr
)
464 struct net_bridge_group_eht_set_entry
*set_h
;
465 struct net_bridge_group_eht_set
*eht_set
;
466 bool set_deleted
= false;
468 eht_set
= br_multicast_eht_set_lookup(pg
, src_addr
);
472 set_h
= br_multicast_eht_set_entry_lookup(eht_set
, h_addr
);
476 __eht_del_set_entry(set_h
);
478 if (RB_EMPTY_ROOT(&eht_set
->entry_tree
)) {
479 br_multicast_del_eht_set(eht_set
);
487 static void br_multicast_del_eht_host(struct net_bridge_port_group
*pg
,
488 union net_bridge_eht_addr
*h_addr
)
490 struct net_bridge_group_eht_set_entry
*set_h
;
491 struct net_bridge_group_eht_host
*eht_host
;
492 struct hlist_node
*tmp
;
494 eht_host
= br_multicast_eht_host_lookup(pg
, h_addr
);
498 hlist_for_each_entry_safe(set_h
, tmp
, &eht_host
->set_entries
, host_list
)
499 br_multicast_del_eht_set_entry(set_h
->eht_set
->pg
,
500 &set_h
->eht_set
->src_addr
,
504 /* create new set entries from reports */
505 static void __eht_create_set_entries(const struct net_bridge_mcast
*brmctx
,
506 struct net_bridge_port_group
*pg
,
507 union net_bridge_eht_addr
*h_addr
,
513 union net_bridge_eht_addr eht_src_addr
;
516 memset(&eht_src_addr
, 0, sizeof(eht_src_addr
));
517 for (src_idx
= 0; src_idx
< nsrcs
; src_idx
++) {
518 memcpy(&eht_src_addr
, srcs
+ (src_idx
* addr_size
), addr_size
);
519 br_multicast_create_eht_set_entry(brmctx
, pg
, &eht_src_addr
,
525 /* delete existing set entries and their (S,G) entries if they were the last */
526 static bool __eht_del_set_entries(struct net_bridge_port_group
*pg
,
527 union net_bridge_eht_addr
*h_addr
,
532 union net_bridge_eht_addr eht_src_addr
;
533 struct net_bridge_group_src
*src_ent
;
534 bool changed
= false;
538 memset(&eht_src_addr
, 0, sizeof(eht_src_addr
));
539 memset(&src_ip
, 0, sizeof(src_ip
));
540 src_ip
.proto
= pg
->key
.addr
.proto
;
541 for (src_idx
= 0; src_idx
< nsrcs
; src_idx
++) {
542 memcpy(&eht_src_addr
, srcs
+ (src_idx
* addr_size
), addr_size
);
543 if (!br_multicast_del_eht_set_entry(pg
, &eht_src_addr
, h_addr
))
545 memcpy(&src_ip
, srcs
+ (src_idx
* addr_size
), addr_size
);
546 src_ent
= br_multicast_find_group_src(pg
, &src_ip
);
549 br_multicast_del_group_src(src_ent
, true);
556 static bool br_multicast_eht_allow(const struct net_bridge_mcast
*brmctx
,
557 struct net_bridge_port_group
*pg
,
558 union net_bridge_eht_addr
*h_addr
,
563 bool changed
= false;
565 switch (br_multicast_eht_host_filter_mode(pg
, h_addr
)) {
567 __eht_create_set_entries(brmctx
, pg
, h_addr
, srcs
, nsrcs
,
568 addr_size
, MCAST_INCLUDE
);
571 changed
= __eht_del_set_entries(pg
, h_addr
, srcs
, nsrcs
,
579 static bool br_multicast_eht_block(const struct net_bridge_mcast
*brmctx
,
580 struct net_bridge_port_group
*pg
,
581 union net_bridge_eht_addr
*h_addr
,
586 bool changed
= false;
588 switch (br_multicast_eht_host_filter_mode(pg
, h_addr
)) {
590 changed
= __eht_del_set_entries(pg
, h_addr
, srcs
, nsrcs
,
594 __eht_create_set_entries(brmctx
, pg
, h_addr
, srcs
, nsrcs
, addr_size
,
602 /* flush_entries is true when changing mode */
603 static bool __eht_inc_exc(const struct net_bridge_mcast
*brmctx
,
604 struct net_bridge_port_group
*pg
,
605 union net_bridge_eht_addr
*h_addr
,
609 unsigned char filter_mode
,
612 bool changed
= false, flush_entries
= to_report
;
613 union net_bridge_eht_addr eht_src_addr
;
615 if (br_multicast_eht_host_filter_mode(pg
, h_addr
) != filter_mode
)
616 flush_entries
= true;
618 memset(&eht_src_addr
, 0, sizeof(eht_src_addr
));
619 /* if we're changing mode del host and its entries */
621 br_multicast_del_eht_host(pg
, h_addr
);
622 __eht_create_set_entries(brmctx
, pg
, h_addr
, srcs
, nsrcs
, addr_size
,
624 /* we can be missing sets only if we've deleted some entries */
626 struct net_bridge_group_eht_set
*eht_set
;
627 struct net_bridge_group_src
*src_ent
;
628 struct hlist_node
*tmp
;
630 hlist_for_each_entry_safe(src_ent
, tmp
, &pg
->src_list
, node
) {
631 br_multicast_ip_src_to_eht_addr(&src_ent
->addr
,
633 if (!br_multicast_eht_set_lookup(pg
, &eht_src_addr
)) {
634 br_multicast_del_group_src(src_ent
, true);
638 /* this is an optimization for TO_INCLUDE where we lower
639 * the set's timeout to LMQT to catch timeout hosts:
640 * - host A (timing out): set entries X, Y
641 * - host B: set entry Z (new from current TO_INCLUDE)
642 * sends BLOCK Z after LMQT but host A's EHT
643 * entries still exist (unless lowered to LMQT
644 * so they can timeout with the S,Gs)
645 * => we wait another LMQT, when we can just delete the
648 if (!(src_ent
->flags
& BR_SGRP_F_SEND
) ||
649 filter_mode
!= MCAST_INCLUDE
||
652 eht_set
= br_multicast_eht_set_lookup(pg
,
656 mod_timer(&eht_set
->timer
, jiffies
+ br_multicast_lmqt(brmctx
));
663 static bool br_multicast_eht_inc(const struct net_bridge_mcast
*brmctx
,
664 struct net_bridge_port_group
*pg
,
665 union net_bridge_eht_addr
*h_addr
,
673 changed
= __eht_inc_exc(brmctx
, pg
, h_addr
, srcs
, nsrcs
, addr_size
,
674 MCAST_INCLUDE
, to_report
);
675 br_eht_convert_host_filter_mode(brmctx
, pg
, h_addr
, MCAST_INCLUDE
);
680 static bool br_multicast_eht_exc(const struct net_bridge_mcast
*brmctx
,
681 struct net_bridge_port_group
*pg
,
682 union net_bridge_eht_addr
*h_addr
,
690 changed
= __eht_inc_exc(brmctx
, pg
, h_addr
, srcs
, nsrcs
, addr_size
,
691 MCAST_EXCLUDE
, to_report
);
692 br_eht_convert_host_filter_mode(brmctx
, pg
, h_addr
, MCAST_EXCLUDE
);
697 static bool __eht_ip4_handle(const struct net_bridge_mcast
*brmctx
,
698 struct net_bridge_port_group
*pg
,
699 union net_bridge_eht_addr
*h_addr
,
704 bool changed
= false, to_report
= false;
707 case IGMPV3_ALLOW_NEW_SOURCES
:
708 br_multicast_eht_allow(brmctx
, pg
, h_addr
, srcs
, nsrcs
,
711 case IGMPV3_BLOCK_OLD_SOURCES
:
712 changed
= br_multicast_eht_block(brmctx
, pg
, h_addr
, srcs
, nsrcs
,
715 case IGMPV3_CHANGE_TO_INCLUDE
:
718 case IGMPV3_MODE_IS_INCLUDE
:
719 changed
= br_multicast_eht_inc(brmctx
, pg
, h_addr
, srcs
, nsrcs
,
720 sizeof(__be32
), to_report
);
722 case IGMPV3_CHANGE_TO_EXCLUDE
:
725 case IGMPV3_MODE_IS_EXCLUDE
:
726 changed
= br_multicast_eht_exc(brmctx
, pg
, h_addr
, srcs
, nsrcs
,
727 sizeof(__be32
), to_report
);
734 #if IS_ENABLED(CONFIG_IPV6)
735 static bool __eht_ip6_handle(const struct net_bridge_mcast
*brmctx
,
736 struct net_bridge_port_group
*pg
,
737 union net_bridge_eht_addr
*h_addr
,
742 bool changed
= false, to_report
= false;
745 case MLD2_ALLOW_NEW_SOURCES
:
746 br_multicast_eht_allow(brmctx
, pg
, h_addr
, srcs
, nsrcs
,
747 sizeof(struct in6_addr
));
749 case MLD2_BLOCK_OLD_SOURCES
:
750 changed
= br_multicast_eht_block(brmctx
, pg
, h_addr
, srcs
, nsrcs
,
751 sizeof(struct in6_addr
));
753 case MLD2_CHANGE_TO_INCLUDE
:
756 case MLD2_MODE_IS_INCLUDE
:
757 changed
= br_multicast_eht_inc(brmctx
, pg
, h_addr
, srcs
, nsrcs
,
758 sizeof(struct in6_addr
),
761 case MLD2_CHANGE_TO_EXCLUDE
:
764 case MLD2_MODE_IS_EXCLUDE
:
765 changed
= br_multicast_eht_exc(brmctx
, pg
, h_addr
, srcs
, nsrcs
,
766 sizeof(struct in6_addr
),
775 /* true means an entry was deleted */
776 bool br_multicast_eht_handle(const struct net_bridge_mcast
*brmctx
,
777 struct net_bridge_port_group
*pg
,
784 bool eht_enabled
= !!(pg
->key
.port
->flags
& BR_MULTICAST_FAST_LEAVE
);
785 union net_bridge_eht_addr eht_host_addr
;
786 bool changed
= false;
791 memset(&eht_host_addr
, 0, sizeof(eht_host_addr
));
792 memcpy(&eht_host_addr
, h_addr
, addr_size
);
793 if (addr_size
== sizeof(__be32
))
794 changed
= __eht_ip4_handle(brmctx
, pg
, &eht_host_addr
, srcs
,
796 #if IS_ENABLED(CONFIG_IPV6)
798 changed
= __eht_ip6_handle(brmctx
, pg
, &eht_host_addr
, srcs
,
806 int br_multicast_eht_set_hosts_limit(struct net_bridge_port
*p
,
809 struct net_bridge
*br
= p
->br
;
811 if (!eht_hosts_limit
)
814 spin_lock_bh(&br
->multicast_lock
);
815 p
->multicast_eht_hosts_limit
= eht_hosts_limit
;
816 spin_unlock_bh(&br
->multicast_lock
);