1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/igmp.h>
4 #include <linux/kernel.h>
5 #include <linux/netdevice.h>
6 #include <linux/rculist.h>
7 #include <linux/skbuff.h>
8 #include <linux/if_ether.h>
10 #include <net/netlink.h>
11 #include <net/switchdev.h>
12 #if IS_ENABLED(CONFIG_IPV6)
14 #include <net/addrconf.h>
17 #include "br_private.h"
20 br_ip4_rports_get_timer(struct net_bridge_mcast_port
*pmctx
,
23 *timer
= br_timer_value(&pmctx
->ip4_mc_router_timer
);
24 return !hlist_unhashed(&pmctx
->ip4_rlist
);
28 br_ip6_rports_get_timer(struct net_bridge_mcast_port
*pmctx
,
31 #if IS_ENABLED(CONFIG_IPV6)
32 *timer
= br_timer_value(&pmctx
->ip6_mc_router_timer
);
33 return !hlist_unhashed(&pmctx
->ip6_rlist
);
40 static size_t __br_rports_one_size(void)
42 return nla_total_size(sizeof(u32
)) + /* MDBA_ROUTER_PORT */
43 nla_total_size(sizeof(u32
)) + /* MDBA_ROUTER_PATTR_TIMER */
44 nla_total_size(sizeof(u8
)) + /* MDBA_ROUTER_PATTR_TYPE */
45 nla_total_size(sizeof(u32
)) + /* MDBA_ROUTER_PATTR_INET_TIMER */
46 nla_total_size(sizeof(u32
)) + /* MDBA_ROUTER_PATTR_INET6_TIMER */
47 nla_total_size(sizeof(u32
)); /* MDBA_ROUTER_PATTR_VID */
50 size_t br_rports_size(const struct net_bridge_mcast
*brmctx
)
52 struct net_bridge_mcast_port
*pmctx
;
53 size_t size
= nla_total_size(0); /* MDBA_ROUTER */
56 hlist_for_each_entry_rcu(pmctx
, &brmctx
->ip4_mc_router_list
,
58 size
+= __br_rports_one_size();
60 #if IS_ENABLED(CONFIG_IPV6)
61 hlist_for_each_entry_rcu(pmctx
, &brmctx
->ip6_mc_router_list
,
63 size
+= __br_rports_one_size();
70 int br_rports_fill_info(struct sk_buff
*skb
,
71 const struct net_bridge_mcast
*brmctx
)
73 u16 vid
= brmctx
->vlan
? brmctx
->vlan
->vid
: 0;
74 bool have_ip4_mc_rtr
, have_ip6_mc_rtr
;
75 unsigned long ip4_timer
, ip6_timer
;
76 struct nlattr
*nest
, *port_nest
;
77 struct net_bridge_port
*p
;
79 if (!brmctx
->multicast_router
|| !br_rports_have_mc_router(brmctx
))
82 nest
= nla_nest_start_noflag(skb
, MDBA_ROUTER
);
86 list_for_each_entry_rcu(p
, &brmctx
->br
->port_list
, list
) {
87 struct net_bridge_mcast_port
*pmctx
;
90 struct net_bridge_vlan
*v
;
92 v
= br_vlan_find(nbp_vlan_group(p
), vid
);
95 pmctx
= &v
->port_mcast_ctx
;
97 pmctx
= &p
->multicast_ctx
;
100 have_ip4_mc_rtr
= br_ip4_rports_get_timer(pmctx
, &ip4_timer
);
101 have_ip6_mc_rtr
= br_ip6_rports_get_timer(pmctx
, &ip6_timer
);
103 if (!have_ip4_mc_rtr
&& !have_ip6_mc_rtr
)
106 port_nest
= nla_nest_start_noflag(skb
, MDBA_ROUTER_PORT
);
110 if (nla_put_nohdr(skb
, sizeof(u32
), &p
->dev
->ifindex
) ||
111 nla_put_u32(skb
, MDBA_ROUTER_PATTR_TIMER
,
112 max(ip4_timer
, ip6_timer
)) ||
113 nla_put_u8(skb
, MDBA_ROUTER_PATTR_TYPE
,
114 p
->multicast_ctx
.multicast_router
) ||
116 nla_put_u32(skb
, MDBA_ROUTER_PATTR_INET_TIMER
,
119 nla_put_u32(skb
, MDBA_ROUTER_PATTR_INET6_TIMER
,
121 (vid
&& nla_put_u16(skb
, MDBA_ROUTER_PATTR_VID
, vid
))) {
122 nla_nest_cancel(skb
, port_nest
);
125 nla_nest_end(skb
, port_nest
);
128 nla_nest_end(skb
, nest
);
131 nla_nest_cancel(skb
, nest
);
135 static void __mdb_entry_fill_flags(struct br_mdb_entry
*e
, unsigned char flags
)
137 e
->state
= flags
& MDB_PG_FLAGS_PERMANENT
;
139 if (flags
& MDB_PG_FLAGS_OFFLOAD
)
140 e
->flags
|= MDB_FLAGS_OFFLOAD
;
141 if (flags
& MDB_PG_FLAGS_FAST_LEAVE
)
142 e
->flags
|= MDB_FLAGS_FAST_LEAVE
;
143 if (flags
& MDB_PG_FLAGS_STAR_EXCL
)
144 e
->flags
|= MDB_FLAGS_STAR_EXCL
;
145 if (flags
& MDB_PG_FLAGS_BLOCKED
)
146 e
->flags
|= MDB_FLAGS_BLOCKED
;
149 static void __mdb_entry_to_br_ip(struct br_mdb_entry
*entry
, struct br_ip
*ip
,
150 struct nlattr
**mdb_attrs
)
152 memset(ip
, 0, sizeof(struct br_ip
));
153 ip
->vid
= entry
->vid
;
154 ip
->proto
= entry
->addr
.proto
;
156 case htons(ETH_P_IP
):
157 ip
->dst
.ip4
= entry
->addr
.u
.ip4
;
158 if (mdb_attrs
&& mdb_attrs
[MDBE_ATTR_SOURCE
])
159 ip
->src
.ip4
= nla_get_in_addr(mdb_attrs
[MDBE_ATTR_SOURCE
]);
161 #if IS_ENABLED(CONFIG_IPV6)
162 case htons(ETH_P_IPV6
):
163 ip
->dst
.ip6
= entry
->addr
.u
.ip6
;
164 if (mdb_attrs
&& mdb_attrs
[MDBE_ATTR_SOURCE
])
165 ip
->src
.ip6
= nla_get_in6_addr(mdb_attrs
[MDBE_ATTR_SOURCE
]);
169 ether_addr_copy(ip
->dst
.mac_addr
, entry
->addr
.u
.mac_addr
);
174 static int __mdb_fill_srcs(struct sk_buff
*skb
,
175 struct net_bridge_port_group
*p
)
177 struct net_bridge_group_src
*ent
;
178 struct nlattr
*nest
, *nest_ent
;
180 if (hlist_empty(&p
->src_list
))
183 nest
= nla_nest_start(skb
, MDBA_MDB_EATTR_SRC_LIST
);
187 hlist_for_each_entry_rcu(ent
, &p
->src_list
, node
,
188 lockdep_is_held(&p
->key
.port
->br
->multicast_lock
)) {
189 nest_ent
= nla_nest_start(skb
, MDBA_MDB_SRCLIST_ENTRY
);
192 switch (ent
->addr
.proto
) {
193 case htons(ETH_P_IP
):
194 if (nla_put_in_addr(skb
, MDBA_MDB_SRCATTR_ADDRESS
,
195 ent
->addr
.src
.ip4
)) {
196 nla_nest_cancel(skb
, nest_ent
);
200 #if IS_ENABLED(CONFIG_IPV6)
201 case htons(ETH_P_IPV6
):
202 if (nla_put_in6_addr(skb
, MDBA_MDB_SRCATTR_ADDRESS
,
203 &ent
->addr
.src
.ip6
)) {
204 nla_nest_cancel(skb
, nest_ent
);
210 nla_nest_cancel(skb
, nest_ent
);
213 if (nla_put_u32(skb
, MDBA_MDB_SRCATTR_TIMER
,
214 br_timer_value(&ent
->timer
))) {
215 nla_nest_cancel(skb
, nest_ent
);
218 nla_nest_end(skb
, nest_ent
);
221 nla_nest_end(skb
, nest
);
226 nla_nest_cancel(skb
, nest
);
230 static int __mdb_fill_info(struct sk_buff
*skb
,
231 struct net_bridge_mdb_entry
*mp
,
232 struct net_bridge_port_group
*p
)
234 bool dump_srcs_mode
= false;
235 struct timer_list
*mtimer
;
236 struct nlattr
*nest_ent
;
237 struct br_mdb_entry e
;
241 memset(&e
, 0, sizeof(e
));
243 ifindex
= p
->key
.port
->dev
->ifindex
;
247 ifindex
= mp
->br
->dev
->ifindex
;
251 __mdb_entry_fill_flags(&e
, flags
);
253 e
.vid
= mp
->addr
.vid
;
254 if (mp
->addr
.proto
== htons(ETH_P_IP
)) {
255 e
.addr
.u
.ip4
= mp
->addr
.dst
.ip4
;
256 #if IS_ENABLED(CONFIG_IPV6)
257 } else if (mp
->addr
.proto
== htons(ETH_P_IPV6
)) {
258 e
.addr
.u
.ip6
= mp
->addr
.dst
.ip6
;
261 ether_addr_copy(e
.addr
.u
.mac_addr
, mp
->addr
.dst
.mac_addr
);
262 e
.state
= MDB_PERMANENT
;
264 e
.addr
.proto
= mp
->addr
.proto
;
265 nest_ent
= nla_nest_start_noflag(skb
,
266 MDBA_MDB_ENTRY_INFO
);
270 if (nla_put_nohdr(skb
, sizeof(e
), &e
) ||
272 MDBA_MDB_EATTR_TIMER
,
273 br_timer_value(mtimer
)))
276 switch (mp
->addr
.proto
) {
277 case htons(ETH_P_IP
):
278 dump_srcs_mode
= !!(mp
->br
->multicast_ctx
.multicast_igmp_version
== 3);
279 if (mp
->addr
.src
.ip4
) {
280 if (nla_put_in_addr(skb
, MDBA_MDB_EATTR_SOURCE
,
286 #if IS_ENABLED(CONFIG_IPV6)
287 case htons(ETH_P_IPV6
):
288 dump_srcs_mode
= !!(mp
->br
->multicast_ctx
.multicast_mld_version
== 2);
289 if (!ipv6_addr_any(&mp
->addr
.src
.ip6
)) {
290 if (nla_put_in6_addr(skb
, MDBA_MDB_EATTR_SOURCE
,
298 ether_addr_copy(e
.addr
.u
.mac_addr
, mp
->addr
.dst
.mac_addr
);
301 if (nla_put_u8(skb
, MDBA_MDB_EATTR_RTPROT
, p
->rt_protocol
))
303 if (dump_srcs_mode
&&
304 (__mdb_fill_srcs(skb
, p
) ||
305 nla_put_u8(skb
, MDBA_MDB_EATTR_GROUP_MODE
,
309 nla_nest_end(skb
, nest_ent
);
314 nla_nest_cancel(skb
, nest_ent
);
318 static int br_mdb_fill_info(struct sk_buff
*skb
, struct netlink_callback
*cb
,
319 struct net_device
*dev
)
321 int idx
= 0, s_idx
= cb
->args
[1], err
= 0, pidx
= 0, s_pidx
= cb
->args
[2];
322 struct net_bridge
*br
= netdev_priv(dev
);
323 struct net_bridge_mdb_entry
*mp
;
324 struct nlattr
*nest
, *nest2
;
326 nest
= nla_nest_start_noflag(skb
, MDBA_MDB
);
330 hlist_for_each_entry_rcu(mp
, &br
->mdb_list
, mdb_node
) {
331 struct net_bridge_port_group
*p
;
332 struct net_bridge_port_group __rcu
**pp
;
337 nest2
= nla_nest_start_noflag(skb
, MDBA_MDB_ENTRY
);
343 if (!s_pidx
&& mp
->host_joined
) {
344 err
= __mdb_fill_info(skb
, mp
, NULL
);
346 nla_nest_cancel(skb
, nest2
);
351 for (pp
= &mp
->ports
; (p
= rcu_dereference(*pp
)) != NULL
;
358 err
= __mdb_fill_info(skb
, mp
, p
);
360 nla_nest_end(skb
, nest2
);
368 nla_nest_end(skb
, nest2
);
376 nla_nest_end(skb
, nest
);
380 int br_mdb_dump(struct net_device
*dev
, struct sk_buff
*skb
,
381 struct netlink_callback
*cb
)
383 struct net_bridge
*br
= netdev_priv(dev
);
384 struct br_port_msg
*bpm
;
385 struct nlmsghdr
*nlh
;
388 nlh
= nlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
,
389 cb
->nlh
->nlmsg_seq
, RTM_GETMDB
, sizeof(*bpm
),
394 bpm
= nlmsg_data(nlh
);
395 memset(bpm
, 0, sizeof(*bpm
));
396 bpm
->ifindex
= dev
->ifindex
;
400 err
= br_mdb_fill_info(skb
, cb
, dev
);
403 err
= br_rports_fill_info(skb
, &br
->multicast_ctx
);
413 static int nlmsg_populate_mdb_fill(struct sk_buff
*skb
,
414 struct net_device
*dev
,
415 struct net_bridge_mdb_entry
*mp
,
416 struct net_bridge_port_group
*pg
,
419 struct nlmsghdr
*nlh
;
420 struct br_port_msg
*bpm
;
421 struct nlattr
*nest
, *nest2
;
423 nlh
= nlmsg_put(skb
, 0, 0, type
, sizeof(*bpm
), 0);
427 bpm
= nlmsg_data(nlh
);
428 memset(bpm
, 0, sizeof(*bpm
));
429 bpm
->family
= AF_BRIDGE
;
430 bpm
->ifindex
= dev
->ifindex
;
431 nest
= nla_nest_start_noflag(skb
, MDBA_MDB
);
434 nest2
= nla_nest_start_noflag(skb
, MDBA_MDB_ENTRY
);
438 if (__mdb_fill_info(skb
, mp
, pg
))
441 nla_nest_end(skb
, nest2
);
442 nla_nest_end(skb
, nest
);
447 nla_nest_end(skb
, nest
);
449 nlmsg_cancel(skb
, nlh
);
453 static size_t rtnl_mdb_nlmsg_pg_size(const struct net_bridge_port_group
*pg
)
455 struct net_bridge_group_src
*ent
;
456 size_t nlmsg_size
, addr_size
= 0;
458 /* MDBA_MDB_ENTRY_INFO */
459 nlmsg_size
= nla_total_size(sizeof(struct br_mdb_entry
)) +
460 /* MDBA_MDB_EATTR_TIMER */
461 nla_total_size(sizeof(u32
));
466 /* MDBA_MDB_EATTR_RTPROT */
467 nlmsg_size
+= nla_total_size(sizeof(u8
));
469 switch (pg
->key
.addr
.proto
) {
470 case htons(ETH_P_IP
):
471 /* MDBA_MDB_EATTR_SOURCE */
472 if (pg
->key
.addr
.src
.ip4
)
473 nlmsg_size
+= nla_total_size(sizeof(__be32
));
474 if (pg
->key
.port
->br
->multicast_ctx
.multicast_igmp_version
== 2)
476 addr_size
= sizeof(__be32
);
478 #if IS_ENABLED(CONFIG_IPV6)
479 case htons(ETH_P_IPV6
):
480 /* MDBA_MDB_EATTR_SOURCE */
481 if (!ipv6_addr_any(&pg
->key
.addr
.src
.ip6
))
482 nlmsg_size
+= nla_total_size(sizeof(struct in6_addr
));
483 if (pg
->key
.port
->br
->multicast_ctx
.multicast_mld_version
== 1)
485 addr_size
= sizeof(struct in6_addr
);
490 /* MDBA_MDB_EATTR_GROUP_MODE */
491 nlmsg_size
+= nla_total_size(sizeof(u8
));
493 /* MDBA_MDB_EATTR_SRC_LIST nested attr */
494 if (!hlist_empty(&pg
->src_list
))
495 nlmsg_size
+= nla_total_size(0);
497 hlist_for_each_entry(ent
, &pg
->src_list
, node
) {
498 /* MDBA_MDB_SRCLIST_ENTRY nested attr +
499 * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
501 nlmsg_size
+= nla_total_size(0) +
502 nla_total_size(addr_size
) +
503 nla_total_size(sizeof(u32
));
509 static size_t rtnl_mdb_nlmsg_size(const struct net_bridge_port_group
*pg
)
511 return NLMSG_ALIGN(sizeof(struct br_port_msg
)) +
516 /* Port group entry */
517 rtnl_mdb_nlmsg_pg_size(pg
);
520 void br_mdb_notify(struct net_device
*dev
,
521 struct net_bridge_mdb_entry
*mp
,
522 struct net_bridge_port_group
*pg
,
525 struct net
*net
= dev_net(dev
);
529 br_switchdev_mdb_notify(dev
, mp
, pg
, type
);
531 skb
= nlmsg_new(rtnl_mdb_nlmsg_size(pg
), GFP_ATOMIC
);
535 err
= nlmsg_populate_mdb_fill(skb
, dev
, mp
, pg
, type
);
541 rtnl_notify(skb
, net
, 0, RTNLGRP_MDB
, NULL
, GFP_ATOMIC
);
544 rtnl_set_sk_err(net
, RTNLGRP_MDB
, err
);
547 static int nlmsg_populate_rtr_fill(struct sk_buff
*skb
,
548 struct net_device
*dev
,
549 int ifindex
, u16 vid
, u32 pid
,
550 u32 seq
, int type
, unsigned int flags
)
552 struct nlattr
*nest
, *port_nest
;
553 struct br_port_msg
*bpm
;
554 struct nlmsghdr
*nlh
;
556 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*bpm
), 0);
560 bpm
= nlmsg_data(nlh
);
561 memset(bpm
, 0, sizeof(*bpm
));
562 bpm
->family
= AF_BRIDGE
;
563 bpm
->ifindex
= dev
->ifindex
;
564 nest
= nla_nest_start_noflag(skb
, MDBA_ROUTER
);
568 port_nest
= nla_nest_start_noflag(skb
, MDBA_ROUTER_PORT
);
571 if (nla_put_nohdr(skb
, sizeof(u32
), &ifindex
)) {
572 nla_nest_cancel(skb
, port_nest
);
575 if (vid
&& nla_put_u16(skb
, MDBA_ROUTER_PATTR_VID
, vid
)) {
576 nla_nest_cancel(skb
, port_nest
);
579 nla_nest_end(skb
, port_nest
);
581 nla_nest_end(skb
, nest
);
586 nla_nest_end(skb
, nest
);
588 nlmsg_cancel(skb
, nlh
);
592 static inline size_t rtnl_rtr_nlmsg_size(void)
594 return NLMSG_ALIGN(sizeof(struct br_port_msg
))
595 + nla_total_size(sizeof(__u32
))
596 + nla_total_size(sizeof(u16
));
599 void br_rtr_notify(struct net_device
*dev
, struct net_bridge_mcast_port
*pmctx
,
602 struct net
*net
= dev_net(dev
);
608 ifindex
= pmctx
? pmctx
->port
->dev
->ifindex
: 0;
609 vid
= pmctx
&& br_multicast_port_ctx_is_vlan(pmctx
) ? pmctx
->vlan
->vid
:
611 skb
= nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC
);
615 err
= nlmsg_populate_rtr_fill(skb
, dev
, ifindex
, vid
, 0, 0, type
,
622 rtnl_notify(skb
, net
, 0, RTNLGRP_MDB
, NULL
, GFP_ATOMIC
);
626 rtnl_set_sk_err(net
, RTNLGRP_MDB
, err
);
629 static const struct nla_policy
630 br_mdbe_src_list_entry_pol
[MDBE_SRCATTR_MAX
+ 1] = {
631 [MDBE_SRCATTR_ADDRESS
] = NLA_POLICY_RANGE(NLA_BINARY
,
632 sizeof(struct in_addr
),
633 sizeof(struct in6_addr
)),
636 static const struct nla_policy
637 br_mdbe_src_list_pol
[MDBE_SRC_LIST_MAX
+ 1] = {
638 [MDBE_SRC_LIST_ENTRY
] = NLA_POLICY_NESTED(br_mdbe_src_list_entry_pol
),
641 static const struct nla_policy br_mdbe_attrs_pol
[MDBE_ATTR_MAX
+ 1] = {
642 [MDBE_ATTR_SOURCE
] = NLA_POLICY_RANGE(NLA_BINARY
,
643 sizeof(struct in_addr
),
644 sizeof(struct in6_addr
)),
645 [MDBE_ATTR_GROUP_MODE
] = NLA_POLICY_RANGE(NLA_U8
, MCAST_EXCLUDE
,
647 [MDBE_ATTR_SRC_LIST
] = NLA_POLICY_NESTED(br_mdbe_src_list_pol
),
648 [MDBE_ATTR_RTPROT
] = NLA_POLICY_MIN(NLA_U8
, RTPROT_STATIC
),
651 static bool is_valid_mdb_source(struct nlattr
*attr
, __be16 proto
,
652 struct netlink_ext_ack
*extack
)
655 case htons(ETH_P_IP
):
656 if (nla_len(attr
) != sizeof(struct in_addr
)) {
657 NL_SET_ERR_MSG_MOD(extack
, "IPv4 invalid source address length");
660 if (ipv4_is_multicast(nla_get_in_addr(attr
))) {
661 NL_SET_ERR_MSG_MOD(extack
, "IPv4 multicast source address is not allowed");
665 #if IS_ENABLED(CONFIG_IPV6)
666 case htons(ETH_P_IPV6
): {
669 if (nla_len(attr
) != sizeof(struct in6_addr
)) {
670 NL_SET_ERR_MSG_MOD(extack
, "IPv6 invalid source address length");
673 src
= nla_get_in6_addr(attr
);
674 if (ipv6_addr_is_multicast(&src
)) {
675 NL_SET_ERR_MSG_MOD(extack
, "IPv6 multicast source address is not allowed");
682 NL_SET_ERR_MSG_MOD(extack
, "Invalid protocol used with source address");
689 static struct net_bridge_mcast
*
690 __br_mdb_choose_context(struct net_bridge
*br
,
691 const struct br_mdb_entry
*entry
,
692 struct netlink_ext_ack
*extack
)
694 struct net_bridge_mcast
*brmctx
= NULL
;
695 struct net_bridge_vlan
*v
;
697 if (!br_opt_get(br
, BROPT_MCAST_VLAN_SNOOPING_ENABLED
)) {
698 brmctx
= &br
->multicast_ctx
;
703 NL_SET_ERR_MSG_MOD(extack
, "Cannot add an entry without a vlan when vlan snooping is enabled");
707 v
= br_vlan_find(br_vlan_group(br
), entry
->vid
);
709 NL_SET_ERR_MSG_MOD(extack
, "Vlan is not configured");
712 if (br_multicast_ctx_vlan_global_disabled(&v
->br_mcast_ctx
)) {
713 NL_SET_ERR_MSG_MOD(extack
, "Vlan's multicast processing is disabled");
716 brmctx
= &v
->br_mcast_ctx
;
721 static int br_mdb_replace_group_sg(const struct br_mdb_config
*cfg
,
722 struct net_bridge_mdb_entry
*mp
,
723 struct net_bridge_port_group
*pg
,
724 struct net_bridge_mcast
*brmctx
,
727 unsigned long now
= jiffies
;
730 pg
->rt_protocol
= cfg
->rt_protocol
;
731 if (!(flags
& MDB_PG_FLAGS_PERMANENT
) && !cfg
->src_entry
)
732 mod_timer(&pg
->timer
,
733 now
+ brmctx
->multicast_membership_interval
);
735 del_timer(&pg
->timer
);
737 br_mdb_notify(cfg
->br
->dev
, mp
, pg
, RTM_NEWMDB
);
742 static int br_mdb_add_group_sg(const struct br_mdb_config
*cfg
,
743 struct net_bridge_mdb_entry
*mp
,
744 struct net_bridge_mcast
*brmctx
,
746 struct netlink_ext_ack
*extack
)
748 struct net_bridge_port_group __rcu
**pp
;
749 struct net_bridge_port_group
*p
;
750 unsigned long now
= jiffies
;
752 for (pp
= &mp
->ports
;
753 (p
= mlock_dereference(*pp
, cfg
->br
)) != NULL
;
755 if (p
->key
.port
== cfg
->p
) {
756 if (!(cfg
->nlflags
& NLM_F_REPLACE
)) {
757 NL_SET_ERR_MSG_MOD(extack
, "(S, G) group is already joined by port");
760 return br_mdb_replace_group_sg(cfg
, mp
, p
, brmctx
,
763 if ((unsigned long)p
->key
.port
< (unsigned long)cfg
->p
)
767 p
= br_multicast_new_port_group(cfg
->p
, &cfg
->group
, *pp
, flags
, NULL
,
768 MCAST_INCLUDE
, cfg
->rt_protocol
, extack
);
772 rcu_assign_pointer(*pp
, p
);
773 if (!(flags
& MDB_PG_FLAGS_PERMANENT
) && !cfg
->src_entry
)
775 now
+ brmctx
->multicast_membership_interval
);
776 br_mdb_notify(cfg
->br
->dev
, mp
, p
, RTM_NEWMDB
);
778 /* All of (*, G) EXCLUDE ports need to be added to the new (S, G) for
779 * proper replication.
781 if (br_multicast_should_handle_mode(brmctx
, cfg
->group
.proto
)) {
782 struct net_bridge_mdb_entry
*star_mp
;
783 struct br_ip star_group
;
785 star_group
= p
->key
.addr
;
786 memset(&star_group
.src
, 0, sizeof(star_group
.src
));
787 star_mp
= br_mdb_ip_get(cfg
->br
, &star_group
);
789 br_multicast_sg_add_exclude_ports(star_mp
, p
);
795 static int br_mdb_add_group_src_fwd(const struct br_mdb_config
*cfg
,
796 struct br_ip
*src_ip
,
797 struct net_bridge_mcast
*brmctx
,
798 struct netlink_ext_ack
*extack
)
800 struct net_bridge_mdb_entry
*sgmp
;
801 struct br_mdb_config sg_cfg
;
806 sg_ip
.src
= src_ip
->src
;
807 sgmp
= br_multicast_new_group(cfg
->br
, &sg_ip
);
809 NL_SET_ERR_MSG_MOD(extack
, "Failed to add (S, G) MDB entry");
810 return PTR_ERR(sgmp
);
813 if (cfg
->entry
->state
== MDB_PERMANENT
)
814 flags
|= MDB_PG_FLAGS_PERMANENT
;
815 if (cfg
->filter_mode
== MCAST_EXCLUDE
)
816 flags
|= MDB_PG_FLAGS_BLOCKED
;
818 memset(&sg_cfg
, 0, sizeof(sg_cfg
));
821 sg_cfg
.entry
= cfg
->entry
;
822 sg_cfg
.group
= sg_ip
;
823 sg_cfg
.src_entry
= true;
824 sg_cfg
.filter_mode
= MCAST_INCLUDE
;
825 sg_cfg
.rt_protocol
= cfg
->rt_protocol
;
826 sg_cfg
.nlflags
= cfg
->nlflags
;
827 return br_mdb_add_group_sg(&sg_cfg
, sgmp
, brmctx
, flags
, extack
);
830 static int br_mdb_add_group_src(const struct br_mdb_config
*cfg
,
831 struct net_bridge_port_group
*pg
,
832 struct net_bridge_mcast
*brmctx
,
833 struct br_mdb_src_entry
*src
,
834 struct netlink_ext_ack
*extack
)
836 struct net_bridge_group_src
*ent
;
837 unsigned long now
= jiffies
;
840 ent
= br_multicast_find_group_src(pg
, &src
->addr
);
842 ent
= br_multicast_new_group_src(pg
, &src
->addr
);
844 NL_SET_ERR_MSG_MOD(extack
, "Failed to add new source entry");
847 } else if (!(cfg
->nlflags
& NLM_F_REPLACE
)) {
848 NL_SET_ERR_MSG_MOD(extack
, "Source entry already exists");
852 if (cfg
->filter_mode
== MCAST_INCLUDE
&&
853 cfg
->entry
->state
== MDB_TEMPORARY
)
854 mod_timer(&ent
->timer
, now
+ br_multicast_gmi(brmctx
));
856 del_timer(&ent
->timer
);
858 /* Install a (S, G) forwarding entry for the source. */
859 err
= br_mdb_add_group_src_fwd(cfg
, &src
->addr
, brmctx
, extack
);
863 ent
->flags
= BR_SGRP_F_INSTALLED
| BR_SGRP_F_USER_ADDED
;
868 __br_multicast_del_group_src(ent
);
872 static void br_mdb_del_group_src(struct net_bridge_port_group
*pg
,
873 struct br_mdb_src_entry
*src
)
875 struct net_bridge_group_src
*ent
;
877 ent
= br_multicast_find_group_src(pg
, &src
->addr
);
878 if (WARN_ON_ONCE(!ent
))
880 br_multicast_del_group_src(ent
, false);
883 static int br_mdb_add_group_srcs(const struct br_mdb_config
*cfg
,
884 struct net_bridge_port_group
*pg
,
885 struct net_bridge_mcast
*brmctx
,
886 struct netlink_ext_ack
*extack
)
890 for (i
= 0; i
< cfg
->num_src_entries
; i
++) {
891 err
= br_mdb_add_group_src(cfg
, pg
, brmctx
,
892 &cfg
->src_entries
[i
], extack
);
894 goto err_del_group_srcs
;
900 for (i
--; i
>= 0; i
--)
901 br_mdb_del_group_src(pg
, &cfg
->src_entries
[i
]);
905 static int br_mdb_replace_group_srcs(const struct br_mdb_config
*cfg
,
906 struct net_bridge_port_group
*pg
,
907 struct net_bridge_mcast
*brmctx
,
908 struct netlink_ext_ack
*extack
)
910 struct net_bridge_group_src
*ent
;
911 struct hlist_node
*tmp
;
914 hlist_for_each_entry(ent
, &pg
->src_list
, node
)
915 ent
->flags
|= BR_SGRP_F_DELETE
;
917 err
= br_mdb_add_group_srcs(cfg
, pg
, brmctx
, extack
);
919 goto err_clear_delete
;
921 hlist_for_each_entry_safe(ent
, tmp
, &pg
->src_list
, node
) {
922 if (ent
->flags
& BR_SGRP_F_DELETE
)
923 br_multicast_del_group_src(ent
, false);
929 hlist_for_each_entry(ent
, &pg
->src_list
, node
)
930 ent
->flags
&= ~BR_SGRP_F_DELETE
;
934 static int br_mdb_replace_group_star_g(const struct br_mdb_config
*cfg
,
935 struct net_bridge_mdb_entry
*mp
,
936 struct net_bridge_port_group
*pg
,
937 struct net_bridge_mcast
*brmctx
,
939 struct netlink_ext_ack
*extack
)
941 unsigned long now
= jiffies
;
944 err
= br_mdb_replace_group_srcs(cfg
, pg
, brmctx
, extack
);
949 pg
->filter_mode
= cfg
->filter_mode
;
950 pg
->rt_protocol
= cfg
->rt_protocol
;
951 if (!(flags
& MDB_PG_FLAGS_PERMANENT
) &&
952 cfg
->filter_mode
== MCAST_EXCLUDE
)
953 mod_timer(&pg
->timer
,
954 now
+ brmctx
->multicast_membership_interval
);
956 del_timer(&pg
->timer
);
958 br_mdb_notify(cfg
->br
->dev
, mp
, pg
, RTM_NEWMDB
);
960 if (br_multicast_should_handle_mode(brmctx
, cfg
->group
.proto
))
961 br_multicast_star_g_handle_mode(pg
, cfg
->filter_mode
);
966 static int br_mdb_add_group_star_g(const struct br_mdb_config
*cfg
,
967 struct net_bridge_mdb_entry
*mp
,
968 struct net_bridge_mcast
*brmctx
,
970 struct netlink_ext_ack
*extack
)
972 struct net_bridge_port_group __rcu
**pp
;
973 struct net_bridge_port_group
*p
;
974 unsigned long now
= jiffies
;
977 for (pp
= &mp
->ports
;
978 (p
= mlock_dereference(*pp
, cfg
->br
)) != NULL
;
980 if (p
->key
.port
== cfg
->p
) {
981 if (!(cfg
->nlflags
& NLM_F_REPLACE
)) {
982 NL_SET_ERR_MSG_MOD(extack
, "(*, G) group is already joined by port");
985 return br_mdb_replace_group_star_g(cfg
, mp
, p
, brmctx
,
988 if ((unsigned long)p
->key
.port
< (unsigned long)cfg
->p
)
992 p
= br_multicast_new_port_group(cfg
->p
, &cfg
->group
, *pp
, flags
, NULL
,
993 cfg
->filter_mode
, cfg
->rt_protocol
,
998 err
= br_mdb_add_group_srcs(cfg
, p
, brmctx
, extack
);
1000 goto err_del_port_group
;
1002 rcu_assign_pointer(*pp
, p
);
1003 if (!(flags
& MDB_PG_FLAGS_PERMANENT
) &&
1004 cfg
->filter_mode
== MCAST_EXCLUDE
)
1005 mod_timer(&p
->timer
,
1006 now
+ brmctx
->multicast_membership_interval
);
1007 br_mdb_notify(cfg
->br
->dev
, mp
, p
, RTM_NEWMDB
);
1008 /* If we are adding a new EXCLUDE port group (*, G), it needs to be
1009 * also added to all (S, G) entries for proper replication.
1011 if (br_multicast_should_handle_mode(brmctx
, cfg
->group
.proto
) &&
1012 cfg
->filter_mode
== MCAST_EXCLUDE
)
1013 br_multicast_star_g_handle_mode(p
, MCAST_EXCLUDE
);
1018 br_multicast_del_port_group(p
);
1022 static int br_mdb_add_group(const struct br_mdb_config
*cfg
,
1023 struct netlink_ext_ack
*extack
)
1025 struct br_mdb_entry
*entry
= cfg
->entry
;
1026 struct net_bridge_port
*port
= cfg
->p
;
1027 struct net_bridge_mdb_entry
*mp
;
1028 struct net_bridge
*br
= cfg
->br
;
1029 struct net_bridge_mcast
*brmctx
;
1030 struct br_ip group
= cfg
->group
;
1031 unsigned char flags
= 0;
1033 brmctx
= __br_mdb_choose_context(br
, entry
, extack
);
1037 mp
= br_multicast_new_group(br
, &group
);
1043 if (mp
->host_joined
) {
1044 NL_SET_ERR_MSG_MOD(extack
, "Group is already joined by host");
1048 br_multicast_host_join(brmctx
, mp
, false);
1049 br_mdb_notify(br
->dev
, mp
, NULL
, RTM_NEWMDB
);
1054 if (entry
->state
== MDB_PERMANENT
)
1055 flags
|= MDB_PG_FLAGS_PERMANENT
;
1057 if (br_multicast_is_star_g(&group
))
1058 return br_mdb_add_group_star_g(cfg
, mp
, brmctx
, flags
, extack
);
1060 return br_mdb_add_group_sg(cfg
, mp
, brmctx
, flags
, extack
);
1063 static int __br_mdb_add(const struct br_mdb_config
*cfg
,
1064 struct netlink_ext_ack
*extack
)
1068 spin_lock_bh(&cfg
->br
->multicast_lock
);
1069 ret
= br_mdb_add_group(cfg
, extack
);
1070 spin_unlock_bh(&cfg
->br
->multicast_lock
);
1075 static int br_mdb_config_src_entry_init(struct nlattr
*src_entry
,
1076 struct br_mdb_src_entry
*src
,
1078 struct netlink_ext_ack
*extack
)
1080 struct nlattr
*tb
[MDBE_SRCATTR_MAX
+ 1];
1083 err
= nla_parse_nested(tb
, MDBE_SRCATTR_MAX
, src_entry
,
1084 br_mdbe_src_list_entry_pol
, extack
);
1088 if (NL_REQ_ATTR_CHECK(extack
, src_entry
, tb
, MDBE_SRCATTR_ADDRESS
))
1091 if (!is_valid_mdb_source(tb
[MDBE_SRCATTR_ADDRESS
], proto
, extack
))
1094 src
->addr
.proto
= proto
;
1095 nla_memcpy(&src
->addr
.src
, tb
[MDBE_SRCATTR_ADDRESS
],
1096 nla_len(tb
[MDBE_SRCATTR_ADDRESS
]));
1101 static int br_mdb_config_src_list_init(struct nlattr
*src_list
,
1102 struct br_mdb_config
*cfg
,
1103 struct netlink_ext_ack
*extack
)
1105 struct nlattr
*src_entry
;
1109 nla_for_each_nested(src_entry
, src_list
, rem
)
1110 cfg
->num_src_entries
++;
1112 if (cfg
->num_src_entries
>= PG_SRC_ENT_LIMIT
) {
1113 NL_SET_ERR_MSG_FMT_MOD(extack
, "Exceeded maximum number of source entries (%u)",
1114 PG_SRC_ENT_LIMIT
- 1);
1118 cfg
->src_entries
= kcalloc(cfg
->num_src_entries
,
1119 sizeof(struct br_mdb_src_entry
), GFP_KERNEL
);
1120 if (!cfg
->src_entries
)
1123 nla_for_each_nested(src_entry
, src_list
, rem
) {
1124 err
= br_mdb_config_src_entry_init(src_entry
,
1125 &cfg
->src_entries
[i
],
1126 cfg
->entry
->addr
.proto
,
1129 goto err_src_entry_init
;
1136 kfree(cfg
->src_entries
);
1140 static void br_mdb_config_src_list_fini(struct br_mdb_config
*cfg
)
1142 kfree(cfg
->src_entries
);
1145 static int br_mdb_config_attrs_init(struct nlattr
*set_attrs
,
1146 struct br_mdb_config
*cfg
,
1147 struct netlink_ext_ack
*extack
)
1149 struct nlattr
*mdb_attrs
[MDBE_ATTR_MAX
+ 1];
1152 err
= nla_parse_nested(mdb_attrs
, MDBE_ATTR_MAX
, set_attrs
,
1153 br_mdbe_attrs_pol
, extack
);
1157 if (mdb_attrs
[MDBE_ATTR_SOURCE
] &&
1158 !is_valid_mdb_source(mdb_attrs
[MDBE_ATTR_SOURCE
],
1159 cfg
->entry
->addr
.proto
, extack
))
1162 __mdb_entry_to_br_ip(cfg
->entry
, &cfg
->group
, mdb_attrs
);
1164 if (mdb_attrs
[MDBE_ATTR_GROUP_MODE
]) {
1166 NL_SET_ERR_MSG_MOD(extack
, "Filter mode cannot be set for host groups");
1169 if (!br_multicast_is_star_g(&cfg
->group
)) {
1170 NL_SET_ERR_MSG_MOD(extack
, "Filter mode can only be set for (*, G) entries");
1173 cfg
->filter_mode
= nla_get_u8(mdb_attrs
[MDBE_ATTR_GROUP_MODE
]);
1175 cfg
->filter_mode
= MCAST_EXCLUDE
;
1178 if (mdb_attrs
[MDBE_ATTR_SRC_LIST
]) {
1180 NL_SET_ERR_MSG_MOD(extack
, "Source list cannot be set for host groups");
1183 if (!br_multicast_is_star_g(&cfg
->group
)) {
1184 NL_SET_ERR_MSG_MOD(extack
, "Source list can only be set for (*, G) entries");
1187 if (!mdb_attrs
[MDBE_ATTR_GROUP_MODE
]) {
1188 NL_SET_ERR_MSG_MOD(extack
, "Source list cannot be set without filter mode");
1191 err
= br_mdb_config_src_list_init(mdb_attrs
[MDBE_ATTR_SRC_LIST
],
1197 if (!cfg
->num_src_entries
&& cfg
->filter_mode
== MCAST_INCLUDE
) {
1198 NL_SET_ERR_MSG_MOD(extack
, "Cannot add (*, G) INCLUDE with an empty source list");
1202 if (mdb_attrs
[MDBE_ATTR_RTPROT
]) {
1204 NL_SET_ERR_MSG_MOD(extack
, "Protocol cannot be set for host groups");
1207 cfg
->rt_protocol
= nla_get_u8(mdb_attrs
[MDBE_ATTR_RTPROT
]);
1213 static int br_mdb_config_init(struct br_mdb_config
*cfg
, struct net_device
*dev
,
1214 struct nlattr
*tb
[], u16 nlmsg_flags
,
1215 struct netlink_ext_ack
*extack
)
1217 struct net
*net
= dev_net(dev
);
1219 memset(cfg
, 0, sizeof(*cfg
));
1220 cfg
->filter_mode
= MCAST_EXCLUDE
;
1221 cfg
->rt_protocol
= RTPROT_STATIC
;
1222 cfg
->nlflags
= nlmsg_flags
;
1224 cfg
->br
= netdev_priv(dev
);
1226 if (!netif_running(cfg
->br
->dev
)) {
1227 NL_SET_ERR_MSG_MOD(extack
, "Bridge device is not running");
1231 if (!br_opt_get(cfg
->br
, BROPT_MULTICAST_ENABLED
)) {
1232 NL_SET_ERR_MSG_MOD(extack
, "Bridge's multicast processing is disabled");
1236 cfg
->entry
= nla_data(tb
[MDBA_SET_ENTRY
]);
1238 if (cfg
->entry
->ifindex
!= cfg
->br
->dev
->ifindex
) {
1239 struct net_device
*pdev
;
1241 pdev
= __dev_get_by_index(net
, cfg
->entry
->ifindex
);
1243 NL_SET_ERR_MSG_MOD(extack
, "Port net device doesn't exist");
1247 cfg
->p
= br_port_get_rtnl(pdev
);
1249 NL_SET_ERR_MSG_MOD(extack
, "Net device is not a bridge port");
1253 if (cfg
->p
->br
!= cfg
->br
) {
1254 NL_SET_ERR_MSG_MOD(extack
, "Port belongs to a different bridge device");
1259 if (cfg
->entry
->addr
.proto
== htons(ETH_P_IP
) &&
1260 ipv4_is_zeronet(cfg
->entry
->addr
.u
.ip4
)) {
1261 NL_SET_ERR_MSG_MOD(extack
, "IPv4 entry group address 0.0.0.0 is not allowed");
1265 if (tb
[MDBA_SET_ENTRY_ATTRS
])
1266 return br_mdb_config_attrs_init(tb
[MDBA_SET_ENTRY_ATTRS
], cfg
,
1269 __mdb_entry_to_br_ip(cfg
->entry
, &cfg
->group
, NULL
);
1274 static void br_mdb_config_fini(struct br_mdb_config
*cfg
)
1276 br_mdb_config_src_list_fini(cfg
);
1279 int br_mdb_add(struct net_device
*dev
, struct nlattr
*tb
[], u16 nlmsg_flags
,
1280 struct netlink_ext_ack
*extack
)
1282 struct net_bridge_vlan_group
*vg
;
1283 struct net_bridge_vlan
*v
;
1284 struct br_mdb_config cfg
;
1287 err
= br_mdb_config_init(&cfg
, dev
, tb
, nlmsg_flags
, extack
);
1292 /* host join errors which can happen before creating the group */
1293 if (!cfg
.p
&& !br_group_is_l2(&cfg
.group
)) {
1294 /* don't allow any flags for host-joined IP groups */
1295 if (cfg
.entry
->state
) {
1296 NL_SET_ERR_MSG_MOD(extack
, "Flags are not allowed for host groups");
1299 if (!br_multicast_is_star_g(&cfg
.group
)) {
1300 NL_SET_ERR_MSG_MOD(extack
, "Groups with sources cannot be manually host joined");
1305 if (br_group_is_l2(&cfg
.group
) && cfg
.entry
->state
!= MDB_PERMANENT
) {
1306 NL_SET_ERR_MSG_MOD(extack
, "Only permanent L2 entries allowed");
1311 if (cfg
.p
->state
== BR_STATE_DISABLED
&& cfg
.entry
->state
!= MDB_PERMANENT
) {
1312 NL_SET_ERR_MSG_MOD(extack
, "Port is in disabled state and entry is not permanent");
1315 vg
= nbp_vlan_group(cfg
.p
);
1317 vg
= br_vlan_group(cfg
.br
);
1320 /* If vlan filtering is enabled and VLAN is not specified
1321 * install mdb entry on all vlans configured on the port.
1323 if (br_vlan_enabled(cfg
.br
->dev
) && vg
&& cfg
.entry
->vid
== 0) {
1324 list_for_each_entry(v
, &vg
->vlan_list
, vlist
) {
1325 cfg
.entry
->vid
= v
->vid
;
1326 cfg
.group
.vid
= v
->vid
;
1327 err
= __br_mdb_add(&cfg
, extack
);
1332 err
= __br_mdb_add(&cfg
, extack
);
1336 br_mdb_config_fini(&cfg
);
1340 static int __br_mdb_del(const struct br_mdb_config
*cfg
)
1342 struct br_mdb_entry
*entry
= cfg
->entry
;
1343 struct net_bridge
*br
= cfg
->br
;
1344 struct net_bridge_mdb_entry
*mp
;
1345 struct net_bridge_port_group
*p
;
1346 struct net_bridge_port_group __rcu
**pp
;
1347 struct br_ip ip
= cfg
->group
;
1350 spin_lock_bh(&br
->multicast_lock
);
1351 mp
= br_mdb_ip_get(br
, &ip
);
1356 if (entry
->ifindex
== mp
->br
->dev
->ifindex
&& mp
->host_joined
) {
1357 br_multicast_host_leave(mp
, false);
1359 br_mdb_notify(br
->dev
, mp
, NULL
, RTM_DELMDB
);
1360 if (!mp
->ports
&& netif_running(br
->dev
))
1361 mod_timer(&mp
->timer
, jiffies
);
1365 for (pp
= &mp
->ports
;
1366 (p
= mlock_dereference(*pp
, br
)) != NULL
;
1368 if (!p
->key
.port
|| p
->key
.port
->dev
->ifindex
!= entry
->ifindex
)
1371 br_multicast_del_pg(mp
, p
, pp
);
1377 spin_unlock_bh(&br
->multicast_lock
);
1381 int br_mdb_del(struct net_device
*dev
, struct nlattr
*tb
[],
1382 struct netlink_ext_ack
*extack
)
1384 struct net_bridge_vlan_group
*vg
;
1385 struct net_bridge_vlan
*v
;
1386 struct br_mdb_config cfg
;
1389 err
= br_mdb_config_init(&cfg
, dev
, tb
, 0, extack
);
1394 vg
= nbp_vlan_group(cfg
.p
);
1396 vg
= br_vlan_group(cfg
.br
);
1398 /* If vlan filtering is enabled and VLAN is not specified
1399 * delete mdb entry on all vlans configured on the port.
1401 if (br_vlan_enabled(cfg
.br
->dev
) && vg
&& cfg
.entry
->vid
== 0) {
1402 list_for_each_entry(v
, &vg
->vlan_list
, vlist
) {
1403 cfg
.entry
->vid
= v
->vid
;
1404 cfg
.group
.vid
= v
->vid
;
1405 err
= __br_mdb_del(&cfg
);
1408 err
= __br_mdb_del(&cfg
);
1411 br_mdb_config_fini(&cfg
);
1415 struct br_mdb_flush_desc
{
1423 static const struct nla_policy br_mdbe_attrs_del_bulk_pol
[MDBE_ATTR_MAX
+ 1] = {
1424 [MDBE_ATTR_RTPROT
] = NLA_POLICY_MIN(NLA_U8
, RTPROT_STATIC
),
1425 [MDBE_ATTR_STATE_MASK
] = NLA_POLICY_MASK(NLA_U8
, MDB_PERMANENT
),
1428 static int br_mdb_flush_desc_init(struct br_mdb_flush_desc
*desc
,
1429 struct nlattr
*tb
[],
1430 struct netlink_ext_ack
*extack
)
1432 struct br_mdb_entry
*entry
= nla_data(tb
[MDBA_SET_ENTRY
]);
1433 struct nlattr
*mdbe_attrs
[MDBE_ATTR_MAX
+ 1];
1436 desc
->port_ifindex
= entry
->ifindex
;
1437 desc
->vid
= entry
->vid
;
1438 desc
->state
= entry
->state
;
1440 if (!tb
[MDBA_SET_ENTRY_ATTRS
])
1443 err
= nla_parse_nested(mdbe_attrs
, MDBE_ATTR_MAX
,
1444 tb
[MDBA_SET_ENTRY_ATTRS
],
1445 br_mdbe_attrs_del_bulk_pol
, extack
);
1449 if (mdbe_attrs
[MDBE_ATTR_STATE_MASK
])
1450 desc
->state_mask
= nla_get_u8(mdbe_attrs
[MDBE_ATTR_STATE_MASK
]);
1452 if (mdbe_attrs
[MDBE_ATTR_RTPROT
])
1453 desc
->rt_protocol
= nla_get_u8(mdbe_attrs
[MDBE_ATTR_RTPROT
]);
1458 static void br_mdb_flush_host(struct net_bridge
*br
,
1459 struct net_bridge_mdb_entry
*mp
,
1460 const struct br_mdb_flush_desc
*desc
)
1464 if (desc
->port_ifindex
&& desc
->port_ifindex
!= br
->dev
->ifindex
)
1467 if (desc
->rt_protocol
)
1470 state
= br_group_is_l2(&mp
->addr
) ? MDB_PERMANENT
: 0;
1471 if (desc
->state_mask
&& (state
& desc
->state_mask
) != desc
->state
)
1474 br_multicast_host_leave(mp
, true);
1475 if (!mp
->ports
&& netif_running(br
->dev
))
1476 mod_timer(&mp
->timer
, jiffies
);
1479 static void br_mdb_flush_pgs(struct net_bridge
*br
,
1480 struct net_bridge_mdb_entry
*mp
,
1481 const struct br_mdb_flush_desc
*desc
)
1483 struct net_bridge_port_group __rcu
**pp
;
1484 struct net_bridge_port_group
*p
;
1486 for (pp
= &mp
->ports
; (p
= mlock_dereference(*pp
, br
)) != NULL
;) {
1489 if (desc
->port_ifindex
&&
1490 desc
->port_ifindex
!= p
->key
.port
->dev
->ifindex
) {
1495 if (desc
->rt_protocol
&& desc
->rt_protocol
!= p
->rt_protocol
) {
1500 state
= p
->flags
& MDB_PG_FLAGS_PERMANENT
? MDB_PERMANENT
: 0;
1501 if (desc
->state_mask
&&
1502 (state
& desc
->state_mask
) != desc
->state
) {
1507 br_multicast_del_pg(mp
, p
, pp
);
1511 static void br_mdb_flush(struct net_bridge
*br
,
1512 const struct br_mdb_flush_desc
*desc
)
1514 struct net_bridge_mdb_entry
*mp
;
1516 spin_lock_bh(&br
->multicast_lock
);
1518 /* Safe variant is not needed because entries are removed from the list
1519 * upon group timer expiration or bridge deletion.
1521 hlist_for_each_entry(mp
, &br
->mdb_list
, mdb_node
) {
1522 if (desc
->vid
&& desc
->vid
!= mp
->addr
.vid
)
1525 br_mdb_flush_host(br
, mp
, desc
);
1526 br_mdb_flush_pgs(br
, mp
, desc
);
1529 spin_unlock_bh(&br
->multicast_lock
);
1532 int br_mdb_del_bulk(struct net_device
*dev
, struct nlattr
*tb
[],
1533 struct netlink_ext_ack
*extack
)
1535 struct net_bridge
*br
= netdev_priv(dev
);
1536 struct br_mdb_flush_desc desc
= {};
1539 err
= br_mdb_flush_desc_init(&desc
, tb
, extack
);
1543 br_mdb_flush(br
, &desc
);
1548 static const struct nla_policy br_mdbe_attrs_get_pol
[MDBE_ATTR_MAX
+ 1] = {
1549 [MDBE_ATTR_SOURCE
] = NLA_POLICY_RANGE(NLA_BINARY
,
1550 sizeof(struct in_addr
),
1551 sizeof(struct in6_addr
)),
1554 static int br_mdb_get_parse(struct net_device
*dev
, struct nlattr
*tb
[],
1555 struct br_ip
*group
, struct netlink_ext_ack
*extack
)
1557 struct br_mdb_entry
*entry
= nla_data(tb
[MDBA_GET_ENTRY
]);
1558 struct nlattr
*mdbe_attrs
[MDBE_ATTR_MAX
+ 1];
1561 if (!tb
[MDBA_GET_ENTRY_ATTRS
]) {
1562 __mdb_entry_to_br_ip(entry
, group
, NULL
);
1566 err
= nla_parse_nested(mdbe_attrs
, MDBE_ATTR_MAX
,
1567 tb
[MDBA_GET_ENTRY_ATTRS
], br_mdbe_attrs_get_pol
,
1572 if (mdbe_attrs
[MDBE_ATTR_SOURCE
] &&
1573 !is_valid_mdb_source(mdbe_attrs
[MDBE_ATTR_SOURCE
],
1574 entry
->addr
.proto
, extack
))
1577 __mdb_entry_to_br_ip(entry
, group
, mdbe_attrs
);
1582 static struct sk_buff
*
1583 br_mdb_get_reply_alloc(const struct net_bridge_mdb_entry
*mp
)
1585 struct net_bridge_port_group
*pg
;
1588 nlmsg_size
= NLMSG_ALIGN(sizeof(struct br_port_msg
)) +
1591 /* MDBA_MDB_ENTRY */
1594 if (mp
->host_joined
)
1595 nlmsg_size
+= rtnl_mdb_nlmsg_pg_size(NULL
);
1597 for (pg
= mlock_dereference(mp
->ports
, mp
->br
); pg
;
1598 pg
= mlock_dereference(pg
->next
, mp
->br
))
1599 nlmsg_size
+= rtnl_mdb_nlmsg_pg_size(pg
);
1601 return nlmsg_new(nlmsg_size
, GFP_ATOMIC
);
1604 static int br_mdb_get_reply_fill(struct sk_buff
*skb
,
1605 struct net_bridge_mdb_entry
*mp
, u32 portid
,
1608 struct nlattr
*mdb_nest
, *mdb_entry_nest
;
1609 struct net_bridge_port_group
*pg
;
1610 struct br_port_msg
*bpm
;
1611 struct nlmsghdr
*nlh
;
1614 nlh
= nlmsg_put(skb
, portid
, seq
, RTM_NEWMDB
, sizeof(*bpm
), 0);
1618 bpm
= nlmsg_data(nlh
);
1619 memset(bpm
, 0, sizeof(*bpm
));
1620 bpm
->family
= AF_BRIDGE
;
1621 bpm
->ifindex
= mp
->br
->dev
->ifindex
;
1622 mdb_nest
= nla_nest_start_noflag(skb
, MDBA_MDB
);
1627 mdb_entry_nest
= nla_nest_start_noflag(skb
, MDBA_MDB_ENTRY
);
1628 if (!mdb_entry_nest
) {
1633 if (mp
->host_joined
) {
1634 err
= __mdb_fill_info(skb
, mp
, NULL
);
1639 for (pg
= mlock_dereference(mp
->ports
, mp
->br
); pg
;
1640 pg
= mlock_dereference(pg
->next
, mp
->br
)) {
1641 err
= __mdb_fill_info(skb
, mp
, pg
);
1646 nla_nest_end(skb
, mdb_entry_nest
);
1647 nla_nest_end(skb
, mdb_nest
);
1648 nlmsg_end(skb
, nlh
);
1653 nlmsg_cancel(skb
, nlh
);
1657 int br_mdb_get(struct net_device
*dev
, struct nlattr
*tb
[], u32 portid
, u32 seq
,
1658 struct netlink_ext_ack
*extack
)
1660 struct net_bridge
*br
= netdev_priv(dev
);
1661 struct net_bridge_mdb_entry
*mp
;
1662 struct sk_buff
*skb
;
1666 err
= br_mdb_get_parse(dev
, tb
, &group
, extack
);
1670 /* Hold the multicast lock to ensure that the MDB entry does not change
1671 * between the time the reply size is determined and when the reply is
1674 spin_lock_bh(&br
->multicast_lock
);
1676 mp
= br_mdb_ip_get(br
, &group
);
1677 if (!mp
|| (!mp
->ports
&& !mp
->host_joined
)) {
1678 NL_SET_ERR_MSG_MOD(extack
, "MDB entry not found");
1683 skb
= br_mdb_get_reply_alloc(mp
);
1689 err
= br_mdb_get_reply_fill(skb
, mp
, portid
, seq
);
1691 NL_SET_ERR_MSG_MOD(extack
, "Failed to fill MDB get reply");
1695 spin_unlock_bh(&br
->multicast_lock
);
1697 return rtnl_unicast(skb
, dev_net(dev
), portid
);
1702 spin_unlock_bh(&br
->multicast_lock
);