2 #include <linux/igmp.h>
3 #include <linux/kernel.h>
4 #include <linux/netdevice.h>
5 #include <linux/rculist.h>
6 #include <linux/skbuff.h>
7 #include <linux/if_ether.h>
9 #include <net/netlink.h>
10 #include <net/switchdev.h>
11 #if IS_ENABLED(CONFIG_IPV6)
13 #include <net/addrconf.h>
16 #include "br_private.h"
18 static int br_rports_fill_info(struct sk_buff
*skb
, struct netlink_callback
*cb
,
19 struct net_device
*dev
)
21 struct net_bridge
*br
= netdev_priv(dev
);
22 struct net_bridge_port
*p
;
25 if (!br
->multicast_router
|| hlist_empty(&br
->router_list
))
28 nest
= nla_nest_start(skb
, MDBA_ROUTER
);
32 hlist_for_each_entry_rcu(p
, &br
->router_list
, rlist
) {
33 if (p
&& nla_put_u32(skb
, MDBA_ROUTER_PORT
, p
->dev
->ifindex
))
37 nla_nest_end(skb
, nest
);
40 nla_nest_cancel(skb
, nest
);
44 static int br_mdb_fill_info(struct sk_buff
*skb
, struct netlink_callback
*cb
,
45 struct net_device
*dev
)
47 struct net_bridge
*br
= netdev_priv(dev
);
48 struct net_bridge_mdb_htable
*mdb
;
49 struct nlattr
*nest
, *nest2
;
51 int idx
= 0, s_idx
= cb
->args
[1];
53 if (br
->multicast_disabled
)
56 mdb
= rcu_dereference(br
->mdb
);
60 nest
= nla_nest_start(skb
, MDBA_MDB
);
64 for (i
= 0; i
< mdb
->max
; i
++) {
65 struct net_bridge_mdb_entry
*mp
;
66 struct net_bridge_port_group
*p
;
67 struct net_bridge_port_group __rcu
**pp
;
68 struct net_bridge_port
*port
;
70 hlist_for_each_entry_rcu(mp
, &mdb
->mhash
[i
], hlist
[mdb
->ver
]) {
74 nest2
= nla_nest_start(skb
, MDBA_MDB_ENTRY
);
81 (p
= rcu_dereference(*pp
)) != NULL
;
85 struct br_mdb_entry e
;
86 memset(&e
, 0, sizeof(e
));
87 e
.ifindex
= port
->dev
->ifindex
;
90 if (p
->addr
.proto
== htons(ETH_P_IP
))
91 e
.addr
.u
.ip4
= p
->addr
.u
.ip4
;
92 #if IS_ENABLED(CONFIG_IPV6)
93 if (p
->addr
.proto
== htons(ETH_P_IPV6
))
94 e
.addr
.u
.ip6
= p
->addr
.u
.ip6
;
96 e
.addr
.proto
= p
->addr
.proto
;
97 if (nla_put(skb
, MDBA_MDB_ENTRY_INFO
, sizeof(e
), &e
)) {
98 nla_nest_cancel(skb
, nest2
);
104 nla_nest_end(skb
, nest2
);
112 nla_nest_end(skb
, nest
);
116 static int br_mdb_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
118 struct net_device
*dev
;
119 struct net
*net
= sock_net(skb
->sk
);
120 struct nlmsghdr
*nlh
= NULL
;
127 /* In theory this could be wrapped to 0... */
128 cb
->seq
= net
->dev_base_seq
+ br_mdb_rehash_seq
;
130 for_each_netdev_rcu(net
, dev
) {
131 if (dev
->priv_flags
& IFF_EBRIDGE
) {
132 struct br_port_msg
*bpm
;
137 nlh
= nlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
,
138 cb
->nlh
->nlmsg_seq
, RTM_GETMDB
,
139 sizeof(*bpm
), NLM_F_MULTI
);
143 bpm
= nlmsg_data(nlh
);
144 memset(bpm
, 0, sizeof(*bpm
));
145 bpm
->ifindex
= dev
->ifindex
;
146 if (br_mdb_fill_info(skb
, cb
, dev
) < 0)
148 if (br_rports_fill_info(skb
, cb
, dev
) < 0)
166 static int nlmsg_populate_mdb_fill(struct sk_buff
*skb
,
167 struct net_device
*dev
,
168 struct br_mdb_entry
*entry
, u32 pid
,
169 u32 seq
, int type
, unsigned int flags
)
171 struct nlmsghdr
*nlh
;
172 struct br_port_msg
*bpm
;
173 struct nlattr
*nest
, *nest2
;
175 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*bpm
), 0);
179 bpm
= nlmsg_data(nlh
);
180 memset(bpm
, 0, sizeof(*bpm
));
181 bpm
->family
= AF_BRIDGE
;
182 bpm
->ifindex
= dev
->ifindex
;
183 nest
= nla_nest_start(skb
, MDBA_MDB
);
186 nest2
= nla_nest_start(skb
, MDBA_MDB_ENTRY
);
190 if (nla_put(skb
, MDBA_MDB_ENTRY_INFO
, sizeof(*entry
), entry
))
193 nla_nest_end(skb
, nest2
);
194 nla_nest_end(skb
, nest
);
199 nla_nest_end(skb
, nest
);
201 nlmsg_cancel(skb
, nlh
);
205 static inline size_t rtnl_mdb_nlmsg_size(void)
207 return NLMSG_ALIGN(sizeof(struct br_port_msg
))
208 + nla_total_size(sizeof(struct br_mdb_entry
));
211 static void __br_mdb_notify(struct net_device
*dev
, struct br_mdb_entry
*entry
,
214 struct switchdev_obj_port_mdb mdb
= {
216 .id
= SWITCHDEV_OBJ_ID_PORT_MDB
,
217 .flags
= SWITCHDEV_F_DEFER
,
221 struct net_device
*port_dev
;
222 struct net
*net
= dev_net(dev
);
226 port_dev
= __dev_get_by_index(net
, entry
->ifindex
);
227 if (entry
->addr
.proto
== htons(ETH_P_IP
))
228 ip_eth_mc_map(entry
->addr
.u
.ip4
, mdb
.addr
);
229 #if IS_ENABLED(CONFIG_IPV6)
231 ipv6_eth_mc_map(&entry
->addr
.u
.ip6
, mdb
.addr
);
234 mdb
.obj
.orig_dev
= port_dev
;
235 if (port_dev
&& type
== RTM_NEWMDB
)
236 switchdev_port_obj_add(port_dev
, &mdb
.obj
);
237 else if (port_dev
&& type
== RTM_DELMDB
)
238 switchdev_port_obj_del(port_dev
, &mdb
.obj
);
240 skb
= nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC
);
244 err
= nlmsg_populate_mdb_fill(skb
, dev
, entry
, 0, 0, type
, NTF_SELF
);
250 rtnl_notify(skb
, net
, 0, RTNLGRP_MDB
, NULL
, GFP_ATOMIC
);
253 rtnl_set_sk_err(net
, RTNLGRP_MDB
, err
);
256 void br_mdb_notify(struct net_device
*dev
, struct net_bridge_port
*port
,
257 struct br_ip
*group
, int type
, u8 state
)
259 struct br_mdb_entry entry
;
261 memset(&entry
, 0, sizeof(entry
));
262 entry
.ifindex
= port
->dev
->ifindex
;
263 entry
.addr
.proto
= group
->proto
;
264 entry
.addr
.u
.ip4
= group
->u
.ip4
;
265 #if IS_ENABLED(CONFIG_IPV6)
266 entry
.addr
.u
.ip6
= group
->u
.ip6
;
269 entry
.vid
= group
->vid
;
270 __br_mdb_notify(dev
, &entry
, type
);
273 static int nlmsg_populate_rtr_fill(struct sk_buff
*skb
,
274 struct net_device
*dev
,
275 int ifindex
, u32 pid
,
276 u32 seq
, int type
, unsigned int flags
)
278 struct br_port_msg
*bpm
;
279 struct nlmsghdr
*nlh
;
282 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*bpm
), NLM_F_MULTI
);
286 bpm
= nlmsg_data(nlh
);
287 memset(bpm
, 0, sizeof(*bpm
));
288 bpm
->family
= AF_BRIDGE
;
289 bpm
->ifindex
= dev
->ifindex
;
290 nest
= nla_nest_start(skb
, MDBA_ROUTER
);
294 if (nla_put_u32(skb
, MDBA_ROUTER_PORT
, ifindex
))
297 nla_nest_end(skb
, nest
);
302 nla_nest_end(skb
, nest
);
304 nlmsg_cancel(skb
, nlh
);
308 static inline size_t rtnl_rtr_nlmsg_size(void)
310 return NLMSG_ALIGN(sizeof(struct br_port_msg
))
311 + nla_total_size(sizeof(__u32
));
314 void br_rtr_notify(struct net_device
*dev
, struct net_bridge_port
*port
,
317 struct net
*net
= dev_net(dev
);
322 ifindex
= port
? port
->dev
->ifindex
: 0;
323 skb
= nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC
);
327 err
= nlmsg_populate_rtr_fill(skb
, dev
, ifindex
, 0, 0, type
, NTF_SELF
);
333 rtnl_notify(skb
, net
, 0, RTNLGRP_MDB
, NULL
, GFP_ATOMIC
);
337 rtnl_set_sk_err(net
, RTNLGRP_MDB
, err
);
340 static bool is_valid_mdb_entry(struct br_mdb_entry
*entry
)
342 if (entry
->ifindex
== 0)
345 if (entry
->addr
.proto
== htons(ETH_P_IP
)) {
346 if (!ipv4_is_multicast(entry
->addr
.u
.ip4
))
348 if (ipv4_is_local_multicast(entry
->addr
.u
.ip4
))
350 #if IS_ENABLED(CONFIG_IPV6)
351 } else if (entry
->addr
.proto
== htons(ETH_P_IPV6
)) {
352 if (ipv6_addr_is_ll_all_nodes(&entry
->addr
.u
.ip6
))
357 if (entry
->state
!= MDB_PERMANENT
&& entry
->state
!= MDB_TEMPORARY
)
359 if (entry
->vid
>= VLAN_VID_MASK
)
365 static int br_mdb_parse(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
366 struct net_device
**pdev
, struct br_mdb_entry
**pentry
)
368 struct net
*net
= sock_net(skb
->sk
);
369 struct br_mdb_entry
*entry
;
370 struct br_port_msg
*bpm
;
371 struct nlattr
*tb
[MDBA_SET_ENTRY_MAX
+1];
372 struct net_device
*dev
;
375 err
= nlmsg_parse(nlh
, sizeof(*bpm
), tb
, MDBA_SET_ENTRY_MAX
, NULL
);
379 bpm
= nlmsg_data(nlh
);
380 if (bpm
->ifindex
== 0) {
381 pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n");
385 dev
= __dev_get_by_index(net
, bpm
->ifindex
);
387 pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n");
391 if (!(dev
->priv_flags
& IFF_EBRIDGE
)) {
392 pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n");
398 if (!tb
[MDBA_SET_ENTRY
] ||
399 nla_len(tb
[MDBA_SET_ENTRY
]) != sizeof(struct br_mdb_entry
)) {
400 pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n");
404 entry
= nla_data(tb
[MDBA_SET_ENTRY
]);
405 if (!is_valid_mdb_entry(entry
)) {
406 pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n");
414 static int br_mdb_add_group(struct net_bridge
*br
, struct net_bridge_port
*port
,
415 struct br_ip
*group
, unsigned char state
)
417 struct net_bridge_mdb_entry
*mp
;
418 struct net_bridge_port_group
*p
;
419 struct net_bridge_port_group __rcu
**pp
;
420 struct net_bridge_mdb_htable
*mdb
;
421 unsigned long now
= jiffies
;
424 mdb
= mlock_dereference(br
->mdb
, br
);
425 mp
= br_mdb_ip_get(mdb
, group
);
427 mp
= br_multicast_new_group(br
, port
, group
);
433 for (pp
= &mp
->ports
;
434 (p
= mlock_dereference(*pp
, br
)) != NULL
;
438 if ((unsigned long)p
->port
< (unsigned long)port
)
442 p
= br_multicast_new_port_group(port
, group
, *pp
, state
);
445 rcu_assign_pointer(*pp
, p
);
446 if (state
== MDB_TEMPORARY
)
447 mod_timer(&p
->timer
, now
+ br
->multicast_membership_interval
);
452 static int __br_mdb_add(struct net
*net
, struct net_bridge
*br
,
453 struct br_mdb_entry
*entry
)
456 struct net_device
*dev
;
457 struct net_bridge_port
*p
;
460 if (!netif_running(br
->dev
) || br
->multicast_disabled
)
463 dev
= __dev_get_by_index(net
, entry
->ifindex
);
467 p
= br_port_get_rtnl(dev
);
468 if (!p
|| p
->br
!= br
|| p
->state
== BR_STATE_DISABLED
)
471 memset(&ip
, 0, sizeof(ip
));
473 ip
.proto
= entry
->addr
.proto
;
474 if (ip
.proto
== htons(ETH_P_IP
))
475 ip
.u
.ip4
= entry
->addr
.u
.ip4
;
476 #if IS_ENABLED(CONFIG_IPV6)
478 ip
.u
.ip6
= entry
->addr
.u
.ip6
;
481 spin_lock_bh(&br
->multicast_lock
);
482 ret
= br_mdb_add_group(br
, p
, &ip
, entry
->state
);
483 spin_unlock_bh(&br
->multicast_lock
);
487 static int br_mdb_add(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
489 struct net
*net
= sock_net(skb
->sk
);
490 struct net_bridge_vlan_group
*vg
;
491 struct net_device
*dev
, *pdev
;
492 struct br_mdb_entry
*entry
;
493 struct net_bridge_port
*p
;
494 struct net_bridge_vlan
*v
;
495 struct net_bridge
*br
;
498 err
= br_mdb_parse(skb
, nlh
, &dev
, &entry
);
502 br
= netdev_priv(dev
);
504 /* If vlan filtering is enabled and VLAN is not specified
505 * install mdb entry on all vlans configured on the port.
507 pdev
= __dev_get_by_index(net
, entry
->ifindex
);
511 p
= br_port_get_rtnl(pdev
);
512 if (!p
|| p
->br
!= br
|| p
->state
== BR_STATE_DISABLED
)
515 vg
= nbp_vlan_group(p
);
516 if (br_vlan_enabled(br
) && vg
&& entry
->vid
== 0) {
517 list_for_each_entry(v
, &vg
->vlan_list
, vlist
) {
519 err
= __br_mdb_add(net
, br
, entry
);
522 __br_mdb_notify(dev
, entry
, RTM_NEWMDB
);
525 err
= __br_mdb_add(net
, br
, entry
);
527 __br_mdb_notify(dev
, entry
, RTM_NEWMDB
);
533 static int __br_mdb_del(struct net_bridge
*br
, struct br_mdb_entry
*entry
)
535 struct net_bridge_mdb_htable
*mdb
;
536 struct net_bridge_mdb_entry
*mp
;
537 struct net_bridge_port_group
*p
;
538 struct net_bridge_port_group __rcu
**pp
;
542 if (!netif_running(br
->dev
) || br
->multicast_disabled
)
545 memset(&ip
, 0, sizeof(ip
));
547 ip
.proto
= entry
->addr
.proto
;
548 if (ip
.proto
== htons(ETH_P_IP
))
549 ip
.u
.ip4
= entry
->addr
.u
.ip4
;
550 #if IS_ENABLED(CONFIG_IPV6)
552 ip
.u
.ip6
= entry
->addr
.u
.ip6
;
555 spin_lock_bh(&br
->multicast_lock
);
556 mdb
= mlock_dereference(br
->mdb
, br
);
558 mp
= br_mdb_ip_get(mdb
, &ip
);
562 for (pp
= &mp
->ports
;
563 (p
= mlock_dereference(*pp
, br
)) != NULL
;
565 if (!p
->port
|| p
->port
->dev
->ifindex
!= entry
->ifindex
)
568 if (p
->port
->state
== BR_STATE_DISABLED
)
571 entry
->state
= p
->state
;
572 rcu_assign_pointer(*pp
, p
->next
);
573 hlist_del_init(&p
->mglist
);
574 del_timer(&p
->timer
);
575 call_rcu_bh(&p
->rcu
, br_multicast_free_pg
);
578 if (!mp
->ports
&& !mp
->mglist
&&
579 netif_running(br
->dev
))
580 mod_timer(&mp
->timer
, jiffies
);
585 spin_unlock_bh(&br
->multicast_lock
);
589 static int br_mdb_del(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
591 struct net
*net
= sock_net(skb
->sk
);
592 struct net_bridge_vlan_group
*vg
;
593 struct net_device
*dev
, *pdev
;
594 struct br_mdb_entry
*entry
;
595 struct net_bridge_port
*p
;
596 struct net_bridge_vlan
*v
;
597 struct net_bridge
*br
;
600 err
= br_mdb_parse(skb
, nlh
, &dev
, &entry
);
604 br
= netdev_priv(dev
);
606 /* If vlan filtering is enabled and VLAN is not specified
607 * delete mdb entry on all vlans configured on the port.
609 pdev
= __dev_get_by_index(net
, entry
->ifindex
);
613 p
= br_port_get_rtnl(pdev
);
614 if (!p
|| p
->br
!= br
|| p
->state
== BR_STATE_DISABLED
)
617 vg
= nbp_vlan_group(p
);
618 if (br_vlan_enabled(br
) && vg
&& entry
->vid
== 0) {
619 list_for_each_entry(v
, &vg
->vlan_list
, vlist
) {
621 err
= __br_mdb_del(br
, entry
);
623 __br_mdb_notify(dev
, entry
, RTM_DELMDB
);
626 err
= __br_mdb_del(br
, entry
);
628 __br_mdb_notify(dev
, entry
, RTM_DELMDB
);
634 void br_mdb_init(void)
636 rtnl_register(PF_BRIDGE
, RTM_GETMDB
, NULL
, br_mdb_dump
, NULL
);
637 rtnl_register(PF_BRIDGE
, RTM_NEWMDB
, br_mdb_add
, NULL
, NULL
);
638 rtnl_register(PF_BRIDGE
, RTM_DELMDB
, br_mdb_del
, NULL
, NULL
);
641 void br_mdb_uninit(void)
643 rtnl_unregister(PF_BRIDGE
, RTM_GETMDB
);
644 rtnl_unregister(PF_BRIDGE
, RTM_NEWMDB
);
645 rtnl_unregister(PF_BRIDGE
, RTM_DELMDB
);