1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Multicast support for IPv6
4 * Linux INET6 implementation
7 * Pedro Roque <roque@di.fc.ul.pt>
9 * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
14 * yoshfuji : fix format of router-alert option
15 * YOSHIFUJI Hideaki @USAGI:
16 * Fixed source address for MLD message based on
17 * <draft-ietf-magma-mld-source-05.txt>.
18 * YOSHIFUJI Hideaki @USAGI:
19 * - Ignore Queries for invalid addresses.
20 * - MLD for link-local addresses.
21 * David L Stevens <dlstevens@us.ibm.com>:
25 #include <linux/module.h>
26 #include <linux/errno.h>
27 #include <linux/types.h>
28 #include <linux/string.h>
29 #include <linux/socket.h>
30 #include <linux/sockios.h>
31 #include <linux/jiffies.h>
32 #include <linux/net.h>
34 #include <linux/in6.h>
35 #include <linux/netdevice.h>
36 #include <linux/if_arp.h>
37 #include <linux/route.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/seq_file.h>
41 #include <linux/slab.h>
42 #include <linux/pkt_sched.h>
44 #include <linux/workqueue.h>
46 #include <linux/netfilter.h>
47 #include <linux/netfilter_ipv6.h>
49 #include <net/net_namespace.h>
54 #include <net/protocol.h>
55 #include <net/if_inet6.h>
56 #include <net/ndisc.h>
57 #include <net/addrconf.h>
58 #include <net/ip6_route.h>
59 #include <net/inet_common.h>
61 #include <net/ip6_checksum.h>
63 /* Ensure that we have struct in6_addr aligned on 32bit word. */
64 static int __mld2_query_bugs
[] __attribute__((__unused__
)) = {
65 BUILD_BUG_ON_ZERO(offsetof(struct mld2_query
, mld2q_srcs
) % 4),
66 BUILD_BUG_ON_ZERO(offsetof(struct mld2_report
, mld2r_grec
) % 4),
67 BUILD_BUG_ON_ZERO(offsetof(struct mld2_grec
, grec_mca
) % 4)
70 static struct workqueue_struct
*mld_wq
;
71 static struct in6_addr mld2_all_mcr
= MLD2_ALL_MCR_INIT
;
73 static void igmp6_join_group(struct ifmcaddr6
*ma
);
74 static void igmp6_leave_group(struct ifmcaddr6
*ma
);
75 static void mld_mca_work(struct work_struct
*work
);
77 static void mld_ifc_event(struct inet6_dev
*idev
);
78 static bool mld_in_v1_mode(const struct inet6_dev
*idev
);
79 static int sf_setstate(struct ifmcaddr6
*pmc
);
80 static void sf_markstate(struct ifmcaddr6
*pmc
);
81 static void ip6_mc_clear_src(struct ifmcaddr6
*pmc
);
82 static int ip6_mc_del_src(struct inet6_dev
*idev
, const struct in6_addr
*pmca
,
83 int sfmode
, int sfcount
, const struct in6_addr
*psfsrc
,
85 static int ip6_mc_add_src(struct inet6_dev
*idev
, const struct in6_addr
*pmca
,
86 int sfmode
, int sfcount
, const struct in6_addr
*psfsrc
,
88 static int ip6_mc_leave_src(struct sock
*sk
, struct ipv6_mc_socklist
*iml
,
89 struct inet6_dev
*idev
);
90 static int __ipv6_dev_mc_inc(struct net_device
*dev
,
91 const struct in6_addr
*addr
, unsigned int mode
);
93 #define MLD_QRV_DEFAULT 2
94 /* RFC3810, 9.2. Query Interval */
95 #define MLD_QI_DEFAULT (125 * HZ)
96 /* RFC3810, 9.3. Query Response Interval */
97 #define MLD_QRI_DEFAULT (10 * HZ)
99 /* RFC3810, 8.1 Query Version Distinctions */
100 #define MLD_V1_QUERY_LEN 24
101 #define MLD_V2_QUERY_LEN_MIN 28
103 #define IPV6_MLD_MAX_MSF 64
105 int sysctl_mld_max_msf __read_mostly
= IPV6_MLD_MAX_MSF
;
106 int sysctl_mld_qrv __read_mostly
= MLD_QRV_DEFAULT
;
109 * socket join on multicast group
111 #define mc_dereference(e, idev) \
112 rcu_dereference_protected(e, lockdep_is_held(&(idev)->mc_lock))
114 #define sock_dereference(e, sk) \
115 rcu_dereference_protected(e, lockdep_sock_is_held(sk))
117 #define for_each_pmc_socklock(np, sk, pmc) \
118 for (pmc = sock_dereference((np)->ipv6_mc_list, sk); \
120 pmc = sock_dereference(pmc->next, sk))
122 #define for_each_pmc_rcu(np, pmc) \
123 for (pmc = rcu_dereference((np)->ipv6_mc_list); \
125 pmc = rcu_dereference(pmc->next))
127 #define for_each_psf_mclock(mc, psf) \
128 for (psf = mc_dereference((mc)->mca_sources, mc->idev); \
130 psf = mc_dereference(psf->sf_next, mc->idev))
132 #define for_each_psf_rcu(mc, psf) \
133 for (psf = rcu_dereference((mc)->mca_sources); \
135 psf = rcu_dereference(psf->sf_next))
137 #define for_each_psf_tomb(mc, psf) \
138 for (psf = mc_dereference((mc)->mca_tomb, mc->idev); \
140 psf = mc_dereference(psf->sf_next, mc->idev))
142 #define for_each_mc_mclock(idev, mc) \
143 for (mc = mc_dereference((idev)->mc_list, idev); \
145 mc = mc_dereference(mc->next, idev))
147 #define for_each_mc_rcu(idev, mc) \
148 for (mc = rcu_dereference((idev)->mc_list); \
150 mc = rcu_dereference(mc->next))
152 #define for_each_mc_tomb(idev, mc) \
153 for (mc = mc_dereference((idev)->mc_tomb, idev); \
155 mc = mc_dereference(mc->next, idev))
157 static int unsolicited_report_interval(struct inet6_dev
*idev
)
161 if (mld_in_v1_mode(idev
))
162 iv
= READ_ONCE(idev
->cnf
.mldv1_unsolicited_report_interval
);
164 iv
= READ_ONCE(idev
->cnf
.mldv2_unsolicited_report_interval
);
166 return iv
> 0 ? iv
: 1;
169 static int __ipv6_sock_mc_join(struct sock
*sk
, int ifindex
,
170 const struct in6_addr
*addr
, unsigned int mode
)
172 struct net_device
*dev
= NULL
;
173 struct ipv6_mc_socklist
*mc_lst
;
174 struct ipv6_pinfo
*np
= inet6_sk(sk
);
175 struct net
*net
= sock_net(sk
);
180 if (!ipv6_addr_is_multicast(addr
))
183 for_each_pmc_socklock(np
, sk
, mc_lst
) {
184 if ((ifindex
== 0 || mc_lst
->ifindex
== ifindex
) &&
185 ipv6_addr_equal(&mc_lst
->addr
, addr
))
189 mc_lst
= sock_kmalloc(sk
, sizeof(struct ipv6_mc_socklist
), GFP_KERNEL
);
195 mc_lst
->addr
= *addr
;
199 rt
= rt6_lookup(net
, addr
, NULL
, 0, NULL
, 0);
205 dev
= __dev_get_by_index(net
, ifindex
);
208 sock_kfree_s(sk
, mc_lst
, sizeof(*mc_lst
));
212 mc_lst
->ifindex
= dev
->ifindex
;
213 mc_lst
->sfmode
= mode
;
214 RCU_INIT_POINTER(mc_lst
->sflist
, NULL
);
217 * now add/increase the group membership on the device
220 err
= __ipv6_dev_mc_inc(dev
, addr
, mode
);
223 sock_kfree_s(sk
, mc_lst
, sizeof(*mc_lst
));
227 mc_lst
->next
= np
->ipv6_mc_list
;
228 rcu_assign_pointer(np
->ipv6_mc_list
, mc_lst
);
233 int ipv6_sock_mc_join(struct sock
*sk
, int ifindex
, const struct in6_addr
*addr
)
235 return __ipv6_sock_mc_join(sk
, ifindex
, addr
, MCAST_EXCLUDE
);
237 EXPORT_SYMBOL(ipv6_sock_mc_join
);
239 int ipv6_sock_mc_join_ssm(struct sock
*sk
, int ifindex
,
240 const struct in6_addr
*addr
, unsigned int mode
)
242 return __ipv6_sock_mc_join(sk
, ifindex
, addr
, mode
);
246 * socket leave on multicast group
248 int ipv6_sock_mc_drop(struct sock
*sk
, int ifindex
, const struct in6_addr
*addr
)
250 struct ipv6_pinfo
*np
= inet6_sk(sk
);
251 struct ipv6_mc_socklist
*mc_lst
;
252 struct ipv6_mc_socklist __rcu
**lnk
;
253 struct net
*net
= sock_net(sk
);
257 if (!ipv6_addr_is_multicast(addr
))
260 for (lnk
= &np
->ipv6_mc_list
;
261 (mc_lst
= sock_dereference(*lnk
, sk
)) != NULL
;
262 lnk
= &mc_lst
->next
) {
263 if ((ifindex
== 0 || mc_lst
->ifindex
== ifindex
) &&
264 ipv6_addr_equal(&mc_lst
->addr
, addr
)) {
265 struct net_device
*dev
;
269 dev
= __dev_get_by_index(net
, mc_lst
->ifindex
);
271 struct inet6_dev
*idev
= __in6_dev_get(dev
);
273 ip6_mc_leave_src(sk
, mc_lst
, idev
);
275 __ipv6_dev_mc_dec(idev
, &mc_lst
->addr
);
277 ip6_mc_leave_src(sk
, mc_lst
, NULL
);
280 atomic_sub(sizeof(*mc_lst
), &sk
->sk_omem_alloc
);
281 kfree_rcu(mc_lst
, rcu
);
286 return -EADDRNOTAVAIL
;
288 EXPORT_SYMBOL(ipv6_sock_mc_drop
);
290 static struct inet6_dev
*ip6_mc_find_dev_rtnl(struct net
*net
,
291 const struct in6_addr
*group
,
294 struct net_device
*dev
= NULL
;
295 struct inet6_dev
*idev
= NULL
;
298 struct rt6_info
*rt
= rt6_lookup(net
, group
, NULL
, 0, NULL
, 0);
305 dev
= __dev_get_by_index(net
, ifindex
);
310 idev
= __in6_dev_get(dev
);
318 void __ipv6_sock_mc_close(struct sock
*sk
)
320 struct ipv6_pinfo
*np
= inet6_sk(sk
);
321 struct ipv6_mc_socklist
*mc_lst
;
322 struct net
*net
= sock_net(sk
);
326 while ((mc_lst
= sock_dereference(np
->ipv6_mc_list
, sk
)) != NULL
) {
327 struct net_device
*dev
;
329 np
->ipv6_mc_list
= mc_lst
->next
;
331 dev
= __dev_get_by_index(net
, mc_lst
->ifindex
);
333 struct inet6_dev
*idev
= __in6_dev_get(dev
);
335 ip6_mc_leave_src(sk
, mc_lst
, idev
);
337 __ipv6_dev_mc_dec(idev
, &mc_lst
->addr
);
339 ip6_mc_leave_src(sk
, mc_lst
, NULL
);
342 atomic_sub(sizeof(*mc_lst
), &sk
->sk_omem_alloc
);
343 kfree_rcu(mc_lst
, rcu
);
347 void ipv6_sock_mc_close(struct sock
*sk
)
349 struct ipv6_pinfo
*np
= inet6_sk(sk
);
351 if (!rcu_access_pointer(np
->ipv6_mc_list
))
356 __ipv6_sock_mc_close(sk
);
361 int ip6_mc_source(int add
, int omode
, struct sock
*sk
,
362 struct group_source_req
*pgsr
)
364 struct in6_addr
*source
, *group
;
365 struct ipv6_mc_socklist
*pmc
;
366 struct inet6_dev
*idev
;
367 struct ipv6_pinfo
*inet6
= inet6_sk(sk
);
368 struct ip6_sf_socklist
*psl
;
369 struct net
*net
= sock_net(sk
);
374 source
= &((struct sockaddr_in6
*)&pgsr
->gsr_source
)->sin6_addr
;
375 group
= &((struct sockaddr_in6
*)&pgsr
->gsr_group
)->sin6_addr
;
377 if (!ipv6_addr_is_multicast(group
))
380 idev
= ip6_mc_find_dev_rtnl(net
, group
, pgsr
->gsr_interface
);
384 err
= -EADDRNOTAVAIL
;
386 mutex_lock(&idev
->mc_lock
);
387 for_each_pmc_socklock(inet6
, sk
, pmc
) {
388 if (pgsr
->gsr_interface
&& pmc
->ifindex
!= pgsr
->gsr_interface
)
390 if (ipv6_addr_equal(&pmc
->addr
, group
))
393 if (!pmc
) { /* must have a prior join */
397 /* if a source filter was set, must be the same mode as before */
398 if (rcu_access_pointer(pmc
->sflist
)) {
399 if (pmc
->sfmode
!= omode
) {
403 } else if (pmc
->sfmode
!= omode
) {
404 /* allow mode switches for empty-set filters */
405 ip6_mc_add_src(idev
, group
, omode
, 0, NULL
, 0);
406 ip6_mc_del_src(idev
, group
, pmc
->sfmode
, 0, NULL
, 0);
410 psl
= sock_dereference(pmc
->sflist
, sk
);
413 goto done
; /* err = -EADDRNOTAVAIL */
415 for (i
= 0; i
< psl
->sl_count
; i
++) {
416 rv
= !ipv6_addr_equal(&psl
->sl_addr
[i
], source
);
420 if (rv
) /* source not found */
421 goto done
; /* err = -EADDRNOTAVAIL */
423 /* special case - (INCLUDE, empty) == LEAVE_GROUP */
424 if (psl
->sl_count
== 1 && omode
== MCAST_INCLUDE
) {
429 /* update the interface filter */
430 ip6_mc_del_src(idev
, group
, omode
, 1, source
, 1);
432 for (j
= i
+1; j
< psl
->sl_count
; j
++)
433 psl
->sl_addr
[j
-1] = psl
->sl_addr
[j
];
438 /* else, add a new source to the filter */
440 if (psl
&& psl
->sl_count
>= sysctl_mld_max_msf
) {
444 if (!psl
|| psl
->sl_count
== psl
->sl_max
) {
445 struct ip6_sf_socklist
*newpsl
;
446 int count
= IP6_SFBLOCK
;
449 count
+= psl
->sl_max
;
450 newpsl
= sock_kmalloc(sk
, struct_size(newpsl
, sl_addr
, count
),
456 newpsl
->sl_max
= count
;
457 newpsl
->sl_count
= count
- IP6_SFBLOCK
;
459 for (i
= 0; i
< psl
->sl_count
; i
++)
460 newpsl
->sl_addr
[i
] = psl
->sl_addr
[i
];
461 atomic_sub(struct_size(psl
, sl_addr
, psl
->sl_max
),
464 rcu_assign_pointer(pmc
->sflist
, newpsl
);
468 rv
= 1; /* > 0 for insert logic below if sl_count is 0 */
469 for (i
= 0; i
< psl
->sl_count
; i
++) {
470 rv
= !ipv6_addr_equal(&psl
->sl_addr
[i
], source
);
471 if (rv
== 0) /* There is an error in the address. */
474 for (j
= psl
->sl_count
-1; j
>= i
; j
--)
475 psl
->sl_addr
[j
+1] = psl
->sl_addr
[j
];
476 psl
->sl_addr
[i
] = *source
;
479 /* update the interface list */
480 ip6_mc_add_src(idev
, group
, omode
, 1, source
, 1);
482 mutex_unlock(&idev
->mc_lock
);
484 err
= ipv6_sock_mc_drop(sk
, pgsr
->gsr_interface
, group
);
488 int ip6_mc_msfilter(struct sock
*sk
, struct group_filter
*gsf
,
489 struct sockaddr_storage
*list
)
491 const struct in6_addr
*group
;
492 struct ipv6_mc_socklist
*pmc
;
493 struct inet6_dev
*idev
;
494 struct ipv6_pinfo
*inet6
= inet6_sk(sk
);
495 struct ip6_sf_socklist
*newpsl
, *psl
;
496 struct net
*net
= sock_net(sk
);
500 group
= &((struct sockaddr_in6
*)&gsf
->gf_group
)->sin6_addr
;
502 if (!ipv6_addr_is_multicast(group
))
504 if (gsf
->gf_fmode
!= MCAST_INCLUDE
&&
505 gsf
->gf_fmode
!= MCAST_EXCLUDE
)
508 idev
= ip6_mc_find_dev_rtnl(net
, group
, gsf
->gf_interface
);
514 if (gsf
->gf_fmode
== MCAST_INCLUDE
&& gsf
->gf_numsrc
== 0) {
519 for_each_pmc_socklock(inet6
, sk
, pmc
) {
520 if (pmc
->ifindex
!= gsf
->gf_interface
)
522 if (ipv6_addr_equal(&pmc
->addr
, group
))
525 if (!pmc
) { /* must have a prior join */
529 if (gsf
->gf_numsrc
) {
530 newpsl
= sock_kmalloc(sk
, struct_size(newpsl
, sl_addr
,
537 newpsl
->sl_max
= newpsl
->sl_count
= gsf
->gf_numsrc
;
538 for (i
= 0; i
< newpsl
->sl_count
; ++i
, ++list
) {
539 struct sockaddr_in6
*psin6
;
541 psin6
= (struct sockaddr_in6
*)list
;
542 newpsl
->sl_addr
[i
] = psin6
->sin6_addr
;
544 mutex_lock(&idev
->mc_lock
);
545 err
= ip6_mc_add_src(idev
, group
, gsf
->gf_fmode
,
546 newpsl
->sl_count
, newpsl
->sl_addr
, 0);
548 mutex_unlock(&idev
->mc_lock
);
549 sock_kfree_s(sk
, newpsl
, struct_size(newpsl
, sl_addr
,
553 mutex_unlock(&idev
->mc_lock
);
556 mutex_lock(&idev
->mc_lock
);
557 ip6_mc_add_src(idev
, group
, gsf
->gf_fmode
, 0, NULL
, 0);
558 mutex_unlock(&idev
->mc_lock
);
561 mutex_lock(&idev
->mc_lock
);
562 psl
= sock_dereference(pmc
->sflist
, sk
);
564 ip6_mc_del_src(idev
, group
, pmc
->sfmode
,
565 psl
->sl_count
, psl
->sl_addr
, 0);
566 atomic_sub(struct_size(psl
, sl_addr
, psl
->sl_max
),
569 ip6_mc_del_src(idev
, group
, pmc
->sfmode
, 0, NULL
, 0);
571 rcu_assign_pointer(pmc
->sflist
, newpsl
);
572 mutex_unlock(&idev
->mc_lock
);
574 pmc
->sfmode
= gsf
->gf_fmode
;
578 err
= ipv6_sock_mc_drop(sk
, gsf
->gf_interface
, group
);
582 int ip6_mc_msfget(struct sock
*sk
, struct group_filter
*gsf
,
583 sockptr_t optval
, size_t ss_offset
)
585 struct ipv6_pinfo
*inet6
= inet6_sk(sk
);
586 const struct in6_addr
*group
;
587 struct ipv6_mc_socklist
*pmc
;
588 struct ip6_sf_socklist
*psl
;
592 group
= &((struct sockaddr_in6
*)&gsf
->gf_group
)->sin6_addr
;
594 if (!ipv6_addr_is_multicast(group
))
597 /* changes to the ipv6_mc_list require the socket lock and
598 * rtnl lock. We have the socket lock, so reading the list is safe.
601 for_each_pmc_socklock(inet6
, sk
, pmc
) {
602 if (pmc
->ifindex
!= gsf
->gf_interface
)
604 if (ipv6_addr_equal(group
, &pmc
->addr
))
607 if (!pmc
) /* must have a prior join */
608 return -EADDRNOTAVAIL
;
610 gsf
->gf_fmode
= pmc
->sfmode
;
611 psl
= sock_dereference(pmc
->sflist
, sk
);
612 count
= psl
? psl
->sl_count
: 0;
614 copycount
= min(count
, gsf
->gf_numsrc
);
615 gsf
->gf_numsrc
= count
;
616 for (i
= 0; i
< copycount
; i
++) {
617 struct sockaddr_in6
*psin6
;
618 struct sockaddr_storage ss
;
620 psin6
= (struct sockaddr_in6
*)&ss
;
621 memset(&ss
, 0, sizeof(ss
));
622 psin6
->sin6_family
= AF_INET6
;
623 psin6
->sin6_addr
= psl
->sl_addr
[i
];
624 if (copy_to_sockptr_offset(optval
, ss_offset
, &ss
, sizeof(ss
)))
626 ss_offset
+= sizeof(ss
);
631 bool inet6_mc_check(const struct sock
*sk
, const struct in6_addr
*mc_addr
,
632 const struct in6_addr
*src_addr
)
634 const struct ipv6_pinfo
*np
= inet6_sk(sk
);
635 const struct ipv6_mc_socklist
*mc
;
636 const struct ip6_sf_socklist
*psl
;
640 for_each_pmc_rcu(np
, mc
) {
641 if (ipv6_addr_equal(&mc
->addr
, mc_addr
))
646 return inet6_test_bit(MC6_ALL
, sk
);
648 psl
= rcu_dereference(mc
->sflist
);
650 rv
= mc
->sfmode
== MCAST_EXCLUDE
;
654 for (i
= 0; i
< psl
->sl_count
; i
++) {
655 if (ipv6_addr_equal(&psl
->sl_addr
[i
], src_addr
))
658 if (mc
->sfmode
== MCAST_INCLUDE
&& i
>= psl
->sl_count
)
660 if (mc
->sfmode
== MCAST_EXCLUDE
&& i
< psl
->sl_count
)
668 /* called with mc_lock */
669 static void igmp6_group_added(struct ifmcaddr6
*mc
)
671 struct net_device
*dev
= mc
->idev
->dev
;
672 char buf
[MAX_ADDR_LEN
];
674 if (IPV6_ADDR_MC_SCOPE(&mc
->mca_addr
) <
675 IPV6_ADDR_SCOPE_LINKLOCAL
)
678 if (!(mc
->mca_flags
&MAF_LOADED
)) {
679 mc
->mca_flags
|= MAF_LOADED
;
680 if (ndisc_mc_map(&mc
->mca_addr
, buf
, dev
, 0) == 0)
681 dev_mc_add(dev
, buf
);
684 if (!(dev
->flags
& IFF_UP
) || (mc
->mca_flags
& MAF_NOREPORT
))
687 if (mld_in_v1_mode(mc
->idev
)) {
688 igmp6_join_group(mc
);
693 /* Based on RFC3810 6.1, for newly added INCLUDE SSM, we
694 * should not send filter-mode change record as the mode
695 * should be from IN() to IN(A).
697 if (mc
->mca_sfmode
== MCAST_EXCLUDE
)
698 mc
->mca_crcount
= mc
->idev
->mc_qrv
;
700 mld_ifc_event(mc
->idev
);
703 /* called with mc_lock */
704 static void igmp6_group_dropped(struct ifmcaddr6
*mc
)
706 struct net_device
*dev
= mc
->idev
->dev
;
707 char buf
[MAX_ADDR_LEN
];
709 if (IPV6_ADDR_MC_SCOPE(&mc
->mca_addr
) <
710 IPV6_ADDR_SCOPE_LINKLOCAL
)
713 if (mc
->mca_flags
&MAF_LOADED
) {
714 mc
->mca_flags
&= ~MAF_LOADED
;
715 if (ndisc_mc_map(&mc
->mca_addr
, buf
, dev
, 0) == 0)
716 dev_mc_del(dev
, buf
);
719 if (mc
->mca_flags
& MAF_NOREPORT
)
723 igmp6_leave_group(mc
);
725 if (cancel_delayed_work(&mc
->mca_work
))
726 refcount_dec(&mc
->mca_refcnt
);
730 * deleted ifmcaddr6 manipulation
731 * called with mc_lock
733 static void mld_add_delrec(struct inet6_dev
*idev
, struct ifmcaddr6
*im
)
735 struct ifmcaddr6
*pmc
;
737 /* this is an "ifmcaddr6" for convenience; only the fields below
738 * are actually used. In particular, the refcnt and users are not
739 * used for management of the delete list. Using the same structure
740 * for deleted items allows change reports to use common code with
741 * non-deleted or query-response MCA's.
743 pmc
= kzalloc(sizeof(*pmc
), GFP_KERNEL
);
747 pmc
->idev
= im
->idev
;
749 pmc
->mca_addr
= im
->mca_addr
;
750 pmc
->mca_crcount
= idev
->mc_qrv
;
751 pmc
->mca_sfmode
= im
->mca_sfmode
;
752 if (pmc
->mca_sfmode
== MCAST_INCLUDE
) {
753 struct ip6_sf_list
*psf
;
755 rcu_assign_pointer(pmc
->mca_tomb
,
756 mc_dereference(im
->mca_tomb
, idev
));
757 rcu_assign_pointer(pmc
->mca_sources
,
758 mc_dereference(im
->mca_sources
, idev
));
759 RCU_INIT_POINTER(im
->mca_tomb
, NULL
);
760 RCU_INIT_POINTER(im
->mca_sources
, NULL
);
762 for_each_psf_mclock(pmc
, psf
)
763 psf
->sf_crcount
= pmc
->mca_crcount
;
766 rcu_assign_pointer(pmc
->next
, idev
->mc_tomb
);
767 rcu_assign_pointer(idev
->mc_tomb
, pmc
);
770 /* called with mc_lock */
771 static void mld_del_delrec(struct inet6_dev
*idev
, struct ifmcaddr6
*im
)
773 struct ip6_sf_list
*psf
, *sources
, *tomb
;
774 struct in6_addr
*pmca
= &im
->mca_addr
;
775 struct ifmcaddr6
*pmc
, *pmc_prev
;
778 for_each_mc_tomb(idev
, pmc
) {
779 if (ipv6_addr_equal(&pmc
->mca_addr
, pmca
))
785 rcu_assign_pointer(pmc_prev
->next
, pmc
->next
);
787 rcu_assign_pointer(idev
->mc_tomb
, pmc
->next
);
791 im
->idev
= pmc
->idev
;
792 if (im
->mca_sfmode
== MCAST_INCLUDE
) {
793 tomb
= rcu_replace_pointer(im
->mca_tomb
,
794 mc_dereference(pmc
->mca_tomb
, pmc
->idev
),
795 lockdep_is_held(&im
->idev
->mc_lock
));
796 rcu_assign_pointer(pmc
->mca_tomb
, tomb
);
798 sources
= rcu_replace_pointer(im
->mca_sources
,
799 mc_dereference(pmc
->mca_sources
, pmc
->idev
),
800 lockdep_is_held(&im
->idev
->mc_lock
));
801 rcu_assign_pointer(pmc
->mca_sources
, sources
);
802 for_each_psf_mclock(im
, psf
)
803 psf
->sf_crcount
= idev
->mc_qrv
;
805 im
->mca_crcount
= idev
->mc_qrv
;
807 in6_dev_put(pmc
->idev
);
808 ip6_mc_clear_src(pmc
);
813 /* called with mc_lock */
814 static void mld_clear_delrec(struct inet6_dev
*idev
)
816 struct ifmcaddr6
*pmc
, *nextpmc
;
818 pmc
= mc_dereference(idev
->mc_tomb
, idev
);
819 RCU_INIT_POINTER(idev
->mc_tomb
, NULL
);
821 for (; pmc
; pmc
= nextpmc
) {
822 nextpmc
= mc_dereference(pmc
->next
, idev
);
823 ip6_mc_clear_src(pmc
);
824 in6_dev_put(pmc
->idev
);
828 /* clear dead sources, too */
829 for_each_mc_mclock(idev
, pmc
) {
830 struct ip6_sf_list
*psf
, *psf_next
;
832 psf
= mc_dereference(pmc
->mca_tomb
, idev
);
833 RCU_INIT_POINTER(pmc
->mca_tomb
, NULL
);
834 for (; psf
; psf
= psf_next
) {
835 psf_next
= mc_dereference(psf
->sf_next
, idev
);
841 static void mld_clear_query(struct inet6_dev
*idev
)
845 spin_lock_bh(&idev
->mc_query_lock
);
846 while ((skb
= __skb_dequeue(&idev
->mc_query_queue
)))
848 spin_unlock_bh(&idev
->mc_query_lock
);
851 static void mld_clear_report(struct inet6_dev
*idev
)
855 spin_lock_bh(&idev
->mc_report_lock
);
856 while ((skb
= __skb_dequeue(&idev
->mc_report_queue
)))
858 spin_unlock_bh(&idev
->mc_report_lock
);
861 static void mca_get(struct ifmcaddr6
*mc
)
863 refcount_inc(&mc
->mca_refcnt
);
866 static void ma_put(struct ifmcaddr6
*mc
)
868 if (refcount_dec_and_test(&mc
->mca_refcnt
)) {
869 in6_dev_put(mc
->idev
);
874 /* called with mc_lock */
875 static struct ifmcaddr6
*mca_alloc(struct inet6_dev
*idev
,
876 const struct in6_addr
*addr
,
879 struct ifmcaddr6
*mc
;
881 mc
= kzalloc(sizeof(*mc
), GFP_KERNEL
);
885 INIT_DELAYED_WORK(&mc
->mca_work
, mld_mca_work
);
887 mc
->mca_addr
= *addr
;
888 mc
->idev
= idev
; /* reference taken by caller */
890 /* mca_stamp should be updated upon changes */
891 mc
->mca_cstamp
= mc
->mca_tstamp
= jiffies
;
892 refcount_set(&mc
->mca_refcnt
, 1);
894 mc
->mca_sfmode
= mode
;
895 mc
->mca_sfcount
[mode
] = 1;
897 if (ipv6_addr_is_ll_all_nodes(&mc
->mca_addr
) ||
898 IPV6_ADDR_MC_SCOPE(&mc
->mca_addr
) < IPV6_ADDR_SCOPE_LINKLOCAL
)
899 mc
->mca_flags
|= MAF_NOREPORT
;
905 * device multicast group inc (add if not found)
907 static int __ipv6_dev_mc_inc(struct net_device
*dev
,
908 const struct in6_addr
*addr
, unsigned int mode
)
910 struct ifmcaddr6
*mc
;
911 struct inet6_dev
*idev
;
915 /* we need to take a reference on idev */
916 idev
= in6_dev_get(dev
);
926 mutex_lock(&idev
->mc_lock
);
927 for_each_mc_mclock(idev
, mc
) {
928 if (ipv6_addr_equal(&mc
->mca_addr
, addr
)) {
930 ip6_mc_add_src(idev
, &mc
->mca_addr
, mode
, 0, NULL
, 0);
931 mutex_unlock(&idev
->mc_lock
);
937 mc
= mca_alloc(idev
, addr
, mode
);
939 mutex_unlock(&idev
->mc_lock
);
944 rcu_assign_pointer(mc
->next
, idev
->mc_list
);
945 rcu_assign_pointer(idev
->mc_list
, mc
);
949 mld_del_delrec(idev
, mc
);
950 igmp6_group_added(mc
);
951 mutex_unlock(&idev
->mc_lock
);
956 int ipv6_dev_mc_inc(struct net_device
*dev
, const struct in6_addr
*addr
)
958 return __ipv6_dev_mc_inc(dev
, addr
, MCAST_EXCLUDE
);
960 EXPORT_SYMBOL(ipv6_dev_mc_inc
);
963 * device multicast group del
965 int __ipv6_dev_mc_dec(struct inet6_dev
*idev
, const struct in6_addr
*addr
)
967 struct ifmcaddr6
*ma
, __rcu
**map
;
971 mutex_lock(&idev
->mc_lock
);
972 for (map
= &idev
->mc_list
;
973 (ma
= mc_dereference(*map
, idev
));
975 if (ipv6_addr_equal(&ma
->mca_addr
, addr
)) {
976 if (--ma
->mca_users
== 0) {
979 igmp6_group_dropped(ma
);
980 ip6_mc_clear_src(ma
);
981 mutex_unlock(&idev
->mc_lock
);
986 mutex_unlock(&idev
->mc_lock
);
991 mutex_unlock(&idev
->mc_lock
);
995 int ipv6_dev_mc_dec(struct net_device
*dev
, const struct in6_addr
*addr
)
997 struct inet6_dev
*idev
;
1002 idev
= __in6_dev_get(dev
);
1006 err
= __ipv6_dev_mc_dec(idev
, addr
);
1010 EXPORT_SYMBOL(ipv6_dev_mc_dec
);
1013 * check if the interface/address pair is valid
1015 bool ipv6_chk_mcast_addr(struct net_device
*dev
, const struct in6_addr
*group
,
1016 const struct in6_addr
*src_addr
)
1018 struct inet6_dev
*idev
;
1019 struct ifmcaddr6
*mc
;
1023 idev
= __in6_dev_get(dev
);
1025 for_each_mc_rcu(idev
, mc
) {
1026 if (ipv6_addr_equal(&mc
->mca_addr
, group
))
1030 if (src_addr
&& !ipv6_addr_any(src_addr
)) {
1031 struct ip6_sf_list
*psf
;
1033 for_each_psf_rcu(mc
, psf
) {
1034 if (ipv6_addr_equal(&psf
->sf_addr
, src_addr
))
1038 rv
= psf
->sf_count
[MCAST_INCLUDE
] ||
1039 psf
->sf_count
[MCAST_EXCLUDE
] !=
1040 mc
->mca_sfcount
[MCAST_EXCLUDE
];
1042 rv
= mc
->mca_sfcount
[MCAST_EXCLUDE
] != 0;
1044 rv
= true; /* don't filter unspecified source */
1051 /* called with mc_lock */
1052 static void mld_gq_start_work(struct inet6_dev
*idev
)
1054 unsigned long tv
= get_random_u32_below(idev
->mc_maxdelay
);
1056 idev
->mc_gq_running
= 1;
1057 if (!mod_delayed_work(mld_wq
, &idev
->mc_gq_work
, tv
+ 2))
1061 /* called with mc_lock */
1062 static void mld_gq_stop_work(struct inet6_dev
*idev
)
1064 idev
->mc_gq_running
= 0;
1065 if (cancel_delayed_work(&idev
->mc_gq_work
))
1066 __in6_dev_put(idev
);
1069 /* called with mc_lock */
1070 static void mld_ifc_start_work(struct inet6_dev
*idev
, unsigned long delay
)
1072 unsigned long tv
= get_random_u32_below(delay
);
1074 if (!mod_delayed_work(mld_wq
, &idev
->mc_ifc_work
, tv
+ 2))
1078 /* called with mc_lock */
1079 static void mld_ifc_stop_work(struct inet6_dev
*idev
)
1081 idev
->mc_ifc_count
= 0;
1082 if (cancel_delayed_work(&idev
->mc_ifc_work
))
1083 __in6_dev_put(idev
);
1086 /* called with mc_lock */
1087 static void mld_dad_start_work(struct inet6_dev
*idev
, unsigned long delay
)
1089 unsigned long tv
= get_random_u32_below(delay
);
1091 if (!mod_delayed_work(mld_wq
, &idev
->mc_dad_work
, tv
+ 2))
1095 static void mld_dad_stop_work(struct inet6_dev
*idev
)
1097 if (cancel_delayed_work(&idev
->mc_dad_work
))
1098 __in6_dev_put(idev
);
1101 static void mld_query_stop_work(struct inet6_dev
*idev
)
1103 spin_lock_bh(&idev
->mc_query_lock
);
1104 if (cancel_delayed_work(&idev
->mc_query_work
))
1105 __in6_dev_put(idev
);
1106 spin_unlock_bh(&idev
->mc_query_lock
);
1109 static void mld_report_stop_work(struct inet6_dev
*idev
)
1111 if (cancel_delayed_work_sync(&idev
->mc_report_work
))
1112 __in6_dev_put(idev
);
1116 * IGMP handling (alias multicast ICMPv6 messages)
1117 * called with mc_lock
1119 static void igmp6_group_queried(struct ifmcaddr6
*ma
, unsigned long resptime
)
1121 unsigned long delay
= resptime
;
1123 /* Do not start work for these addresses */
1124 if (ipv6_addr_is_ll_all_nodes(&ma
->mca_addr
) ||
1125 IPV6_ADDR_MC_SCOPE(&ma
->mca_addr
) < IPV6_ADDR_SCOPE_LINKLOCAL
)
1128 if (cancel_delayed_work(&ma
->mca_work
)) {
1129 refcount_dec(&ma
->mca_refcnt
);
1130 delay
= ma
->mca_work
.timer
.expires
- jiffies
;
1133 if (delay
>= resptime
)
1134 delay
= get_random_u32_below(resptime
);
1136 if (!mod_delayed_work(mld_wq
, &ma
->mca_work
, delay
))
1137 refcount_inc(&ma
->mca_refcnt
);
1138 ma
->mca_flags
|= MAF_TIMER_RUNNING
;
1141 /* mark EXCLUDE-mode sources
1142 * called with mc_lock
1144 static bool mld_xmarksources(struct ifmcaddr6
*pmc
, int nsrcs
,
1145 const struct in6_addr
*srcs
)
1147 struct ip6_sf_list
*psf
;
1151 for_each_psf_mclock(pmc
, psf
) {
1152 if (scount
== nsrcs
)
1154 for (i
= 0; i
< nsrcs
; i
++) {
1155 /* skip inactive filters */
1156 if (psf
->sf_count
[MCAST_INCLUDE
] ||
1157 pmc
->mca_sfcount
[MCAST_EXCLUDE
] !=
1158 psf
->sf_count
[MCAST_EXCLUDE
])
1160 if (ipv6_addr_equal(&srcs
[i
], &psf
->sf_addr
)) {
1166 pmc
->mca_flags
&= ~MAF_GSQUERY
;
1167 if (scount
== nsrcs
) /* all sources excluded */
1172 /* called with mc_lock */
1173 static bool mld_marksources(struct ifmcaddr6
*pmc
, int nsrcs
,
1174 const struct in6_addr
*srcs
)
1176 struct ip6_sf_list
*psf
;
1179 if (pmc
->mca_sfmode
== MCAST_EXCLUDE
)
1180 return mld_xmarksources(pmc
, nsrcs
, srcs
);
1182 /* mark INCLUDE-mode sources */
1185 for_each_psf_mclock(pmc
, psf
) {
1186 if (scount
== nsrcs
)
1188 for (i
= 0; i
< nsrcs
; i
++) {
1189 if (ipv6_addr_equal(&srcs
[i
], &psf
->sf_addr
)) {
1197 pmc
->mca_flags
&= ~MAF_GSQUERY
;
1200 pmc
->mca_flags
|= MAF_GSQUERY
;
1204 static int mld_force_mld_version(const struct inet6_dev
*idev
)
1206 const struct net
*net
= dev_net(idev
->dev
);
1209 all_force
= READ_ONCE(net
->ipv6
.devconf_all
->force_mld_version
);
1210 /* Normally, both are 0 here. If enforcement to a particular is
1211 * being used, individual device enforcement will have a lower
1212 * precedence over 'all' device (.../conf/all/force_mld_version).
1214 return all_force
?: READ_ONCE(idev
->cnf
.force_mld_version
);
1217 static bool mld_in_v2_mode_only(const struct inet6_dev
*idev
)
1219 return mld_force_mld_version(idev
) == 2;
1222 static bool mld_in_v1_mode_only(const struct inet6_dev
*idev
)
1224 return mld_force_mld_version(idev
) == 1;
1227 static bool mld_in_v1_mode(const struct inet6_dev
*idev
)
1229 if (mld_in_v2_mode_only(idev
))
1231 if (mld_in_v1_mode_only(idev
))
1233 if (idev
->mc_v1_seen
&& time_before(jiffies
, idev
->mc_v1_seen
))
1239 static void mld_set_v1_mode(struct inet6_dev
*idev
)
1241 /* RFC3810, relevant sections:
1242 * - 9.1. Robustness Variable
1243 * - 9.2. Query Interval
1244 * - 9.3. Query Response Interval
1245 * - 9.12. Older Version Querier Present Timeout
1247 unsigned long switchback
;
1249 switchback
= (idev
->mc_qrv
* idev
->mc_qi
) + idev
->mc_qri
;
1251 idev
->mc_v1_seen
= jiffies
+ switchback
;
1254 static void mld_update_qrv(struct inet6_dev
*idev
,
1255 const struct mld2_query
*mlh2
)
1257 /* RFC3810, relevant sections:
1258 * - 5.1.8. QRV (Querier's Robustness Variable)
1259 * - 9.1. Robustness Variable
1262 /* The value of the Robustness Variable MUST NOT be zero,
1263 * and SHOULD NOT be one. Catch this here if we ever run
1264 * into such a case in future.
1266 const int min_qrv
= min(MLD_QRV_DEFAULT
, sysctl_mld_qrv
);
1267 WARN_ON(idev
->mc_qrv
== 0);
1269 if (mlh2
->mld2q_qrv
> 0)
1270 idev
->mc_qrv
= mlh2
->mld2q_qrv
;
1272 if (unlikely(idev
->mc_qrv
< min_qrv
)) {
1273 net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n",
1274 idev
->mc_qrv
, min_qrv
);
1275 idev
->mc_qrv
= min_qrv
;
1279 static void mld_update_qi(struct inet6_dev
*idev
,
1280 const struct mld2_query
*mlh2
)
1282 /* RFC3810, relevant sections:
1283 * - 5.1.9. QQIC (Querier's Query Interval Code)
1284 * - 9.2. Query Interval
1285 * - 9.12. Older Version Querier Present Timeout
1286 * (the [Query Interval] in the last Query received)
1288 unsigned long mc_qqi
;
1290 if (mlh2
->mld2q_qqic
< 128) {
1291 mc_qqi
= mlh2
->mld2q_qqic
;
1293 unsigned long mc_man
, mc_exp
;
1295 mc_exp
= MLDV2_QQIC_EXP(mlh2
->mld2q_qqic
);
1296 mc_man
= MLDV2_QQIC_MAN(mlh2
->mld2q_qqic
);
1298 mc_qqi
= (mc_man
| 0x10) << (mc_exp
+ 3);
1301 idev
->mc_qi
= mc_qqi
* HZ
;
1304 static void mld_update_qri(struct inet6_dev
*idev
,
1305 const struct mld2_query
*mlh2
)
1307 /* RFC3810, relevant sections:
1308 * - 5.1.3. Maximum Response Code
1309 * - 9.3. Query Response Interval
1311 idev
->mc_qri
= msecs_to_jiffies(mldv2_mrc(mlh2
));
1314 static int mld_process_v1(struct inet6_dev
*idev
, struct mld_msg
*mld
,
1315 unsigned long *max_delay
, bool v1_query
)
1317 unsigned long mldv1_md
;
1319 /* Ignore v1 queries */
1320 if (mld_in_v2_mode_only(idev
))
1323 mldv1_md
= ntohs(mld
->mld_maxdelay
);
1325 /* When in MLDv1 fallback and a MLDv2 router start-up being
1326 * unaware of current MLDv1 operation, the MRC == MRD mapping
1327 * only works when the exponential algorithm is not being
1328 * used (as MLDv1 is unaware of such things).
1330 * According to the RFC author, the MLDv2 implementations
1331 * he's aware of all use a MRC < 32768 on start up queries.
1333 * Thus, should we *ever* encounter something else larger
1334 * than that, just assume the maximum possible within our
1338 mldv1_md
= min(mldv1_md
, MLDV1_MRD_MAX_COMPAT
);
1340 *max_delay
= max(msecs_to_jiffies(mldv1_md
), 1UL);
1342 /* MLDv1 router present: we need to go into v1 mode *only*
1343 * when an MLDv1 query is received as per section 9.12. of
1344 * RFC3810! And we know from RFC2710 section 3.7 that MLDv1
1345 * queries MUST be of exactly 24 octets.
1348 mld_set_v1_mode(idev
);
1350 /* cancel MLDv2 report work */
1351 mld_gq_stop_work(idev
);
1352 /* cancel the interface change work */
1353 mld_ifc_stop_work(idev
);
1354 /* clear deleted report items */
1355 mld_clear_delrec(idev
);
1360 static void mld_process_v2(struct inet6_dev
*idev
, struct mld2_query
*mld
,
1361 unsigned long *max_delay
)
1363 *max_delay
= max(msecs_to_jiffies(mldv2_mrc(mld
)), 1UL);
1365 mld_update_qrv(idev
, mld
);
1366 mld_update_qi(idev
, mld
);
1367 mld_update_qri(idev
, mld
);
1369 idev
->mc_maxdelay
= *max_delay
;
1374 /* called with rcu_read_lock() */
1375 void igmp6_event_query(struct sk_buff
*skb
)
1377 struct inet6_dev
*idev
= __in6_dev_get(skb
->dev
);
1379 if (!idev
|| idev
->dead
)
1382 spin_lock_bh(&idev
->mc_query_lock
);
1383 if (skb_queue_len(&idev
->mc_query_queue
) < MLD_MAX_SKBS
) {
1384 __skb_queue_tail(&idev
->mc_query_queue
, skb
);
1385 if (!mod_delayed_work(mld_wq
, &idev
->mc_query_work
, 0))
1389 spin_unlock_bh(&idev
->mc_query_lock
);
1394 static void __mld_query_work(struct sk_buff
*skb
)
1396 struct mld2_query
*mlh2
= NULL
;
1397 const struct in6_addr
*group
;
1398 unsigned long max_delay
;
1399 struct inet6_dev
*idev
;
1400 struct ifmcaddr6
*ma
;
1401 struct mld_msg
*mld
;
1406 if (!pskb_may_pull(skb
, sizeof(struct in6_addr
)))
1409 /* compute payload length excluding extension headers */
1410 len
= ntohs(ipv6_hdr(skb
)->payload_len
) + sizeof(struct ipv6hdr
);
1411 len
-= skb_network_header_len(skb
);
1414 * Upon reception of an MLD message that contains a Query, the node
1415 * checks if the source address of the message is a valid link-local
1416 * address, if the Hop Limit is set to 1, and if the Router Alert
1417 * option is present in the Hop-By-Hop Options header of the IPv6
1418 * packet. If any of these checks fails, the packet is dropped.
1420 if (!(ipv6_addr_type(&ipv6_hdr(skb
)->saddr
) & IPV6_ADDR_LINKLOCAL
) ||
1421 ipv6_hdr(skb
)->hop_limit
!= 1 ||
1422 !(IP6CB(skb
)->flags
& IP6SKB_ROUTERALERT
) ||
1423 IP6CB(skb
)->ra
!= htons(IPV6_OPT_ROUTERALERT_MLD
))
1426 idev
= in6_dev_get(skb
->dev
);
1430 mld
= (struct mld_msg
*)icmp6_hdr(skb
);
1431 group
= &mld
->mld_mca
;
1432 group_type
= ipv6_addr_type(group
);
1434 if (group_type
!= IPV6_ADDR_ANY
&&
1435 !(group_type
&IPV6_ADDR_MULTICAST
))
1438 if (len
< MLD_V1_QUERY_LEN
) {
1440 } else if (len
== MLD_V1_QUERY_LEN
|| mld_in_v1_mode(idev
)) {
1441 err
= mld_process_v1(idev
, mld
, &max_delay
,
1442 len
== MLD_V1_QUERY_LEN
);
1445 } else if (len
>= MLD_V2_QUERY_LEN_MIN
) {
1446 int srcs_offset
= sizeof(struct mld2_query
) -
1447 sizeof(struct icmp6hdr
);
1449 if (!pskb_may_pull(skb
, srcs_offset
))
1452 mlh2
= (struct mld2_query
*)skb_transport_header(skb
);
1454 mld_process_v2(idev
, mlh2
, &max_delay
);
1456 if (group_type
== IPV6_ADDR_ANY
) { /* general query */
1457 if (mlh2
->mld2q_nsrcs
)
1458 goto out
; /* no sources allowed */
1460 mld_gq_start_work(idev
);
1463 /* mark sources to include, if group & source-specific */
1464 if (mlh2
->mld2q_nsrcs
!= 0) {
1465 if (!pskb_may_pull(skb
, srcs_offset
+
1466 ntohs(mlh2
->mld2q_nsrcs
) * sizeof(struct in6_addr
)))
1469 mlh2
= (struct mld2_query
*)skb_transport_header(skb
);
1476 if (group_type
== IPV6_ADDR_ANY
) {
1477 for_each_mc_mclock(idev
, ma
) {
1478 igmp6_group_queried(ma
, max_delay
);
1481 for_each_mc_mclock(idev
, ma
) {
1482 if (!ipv6_addr_equal(group
, &ma
->mca_addr
))
1484 if (ma
->mca_flags
& MAF_TIMER_RUNNING
) {
1485 /* gsquery <- gsquery && mark */
1487 ma
->mca_flags
&= ~MAF_GSQUERY
;
1489 /* gsquery <- mark */
1491 ma
->mca_flags
|= MAF_GSQUERY
;
1493 ma
->mca_flags
&= ~MAF_GSQUERY
;
1495 if (!(ma
->mca_flags
& MAF_GSQUERY
) ||
1496 mld_marksources(ma
, ntohs(mlh2
->mld2q_nsrcs
), mlh2
->mld2q_srcs
))
1497 igmp6_group_queried(ma
, max_delay
);
1508 static void mld_query_work(struct work_struct
*work
)
1510 struct inet6_dev
*idev
= container_of(to_delayed_work(work
),
1513 struct sk_buff_head q
;
1514 struct sk_buff
*skb
;
1515 bool rework
= false;
1518 skb_queue_head_init(&q
);
1520 spin_lock_bh(&idev
->mc_query_lock
);
1521 while ((skb
= __skb_dequeue(&idev
->mc_query_queue
))) {
1522 __skb_queue_tail(&q
, skb
);
1524 if (++cnt
>= MLD_MAX_QUEUE
) {
1529 spin_unlock_bh(&idev
->mc_query_lock
);
1531 mutex_lock(&idev
->mc_lock
);
1532 while ((skb
= __skb_dequeue(&q
)))
1533 __mld_query_work(skb
);
1534 mutex_unlock(&idev
->mc_lock
);
1536 if (rework
&& queue_delayed_work(mld_wq
, &idev
->mc_query_work
, 0))
1542 /* called with rcu_read_lock() */
1543 void igmp6_event_report(struct sk_buff
*skb
)
1545 struct inet6_dev
*idev
= __in6_dev_get(skb
->dev
);
1547 if (!idev
|| idev
->dead
)
1550 spin_lock_bh(&idev
->mc_report_lock
);
1551 if (skb_queue_len(&idev
->mc_report_queue
) < MLD_MAX_SKBS
) {
1552 __skb_queue_tail(&idev
->mc_report_queue
, skb
);
1553 if (!mod_delayed_work(mld_wq
, &idev
->mc_report_work
, 0))
1557 spin_unlock_bh(&idev
->mc_report_lock
);
1562 static void __mld_report_work(struct sk_buff
*skb
)
1564 struct inet6_dev
*idev
;
1565 struct ifmcaddr6
*ma
;
1566 struct mld_msg
*mld
;
1569 /* Our own report looped back. Ignore it. */
1570 if (skb
->pkt_type
== PACKET_LOOPBACK
)
1573 /* send our report if the MC router may not have heard this report */
1574 if (skb
->pkt_type
!= PACKET_MULTICAST
&&
1575 skb
->pkt_type
!= PACKET_BROADCAST
)
1578 if (!pskb_may_pull(skb
, sizeof(*mld
) - sizeof(struct icmp6hdr
)))
1581 mld
= (struct mld_msg
*)icmp6_hdr(skb
);
1583 /* Drop reports with not link local source */
1584 addr_type
= ipv6_addr_type(&ipv6_hdr(skb
)->saddr
);
1585 if (addr_type
!= IPV6_ADDR_ANY
&&
1586 !(addr_type
&IPV6_ADDR_LINKLOCAL
))
1589 idev
= in6_dev_get(skb
->dev
);
1594 * Cancel the work for this group
1597 for_each_mc_mclock(idev
, ma
) {
1598 if (ipv6_addr_equal(&ma
->mca_addr
, &mld
->mld_mca
)) {
1599 if (cancel_delayed_work(&ma
->mca_work
))
1600 refcount_dec(&ma
->mca_refcnt
);
1601 ma
->mca_flags
&= ~(MAF_LAST_REPORTER
|
1612 static void mld_report_work(struct work_struct
*work
)
1614 struct inet6_dev
*idev
= container_of(to_delayed_work(work
),
1617 struct sk_buff_head q
;
1618 struct sk_buff
*skb
;
1619 bool rework
= false;
1622 skb_queue_head_init(&q
);
1623 spin_lock_bh(&idev
->mc_report_lock
);
1624 while ((skb
= __skb_dequeue(&idev
->mc_report_queue
))) {
1625 __skb_queue_tail(&q
, skb
);
1627 if (++cnt
>= MLD_MAX_QUEUE
) {
1632 spin_unlock_bh(&idev
->mc_report_lock
);
1634 mutex_lock(&idev
->mc_lock
);
1635 while ((skb
= __skb_dequeue(&q
)))
1636 __mld_report_work(skb
);
1637 mutex_unlock(&idev
->mc_lock
);
1639 if (rework
&& queue_delayed_work(mld_wq
, &idev
->mc_report_work
, 0))
1645 static bool is_in(struct ifmcaddr6
*pmc
, struct ip6_sf_list
*psf
, int type
,
1646 int gdeleted
, int sdeleted
)
1649 case MLD2_MODE_IS_INCLUDE
:
1650 case MLD2_MODE_IS_EXCLUDE
:
1651 if (gdeleted
|| sdeleted
)
1653 if (!((pmc
->mca_flags
& MAF_GSQUERY
) && !psf
->sf_gsresp
)) {
1654 if (pmc
->mca_sfmode
== MCAST_INCLUDE
)
1656 /* don't include if this source is excluded
1659 if (psf
->sf_count
[MCAST_INCLUDE
])
1660 return type
== MLD2_MODE_IS_INCLUDE
;
1661 return pmc
->mca_sfcount
[MCAST_EXCLUDE
] ==
1662 psf
->sf_count
[MCAST_EXCLUDE
];
1665 case MLD2_CHANGE_TO_INCLUDE
:
1666 if (gdeleted
|| sdeleted
)
1668 return psf
->sf_count
[MCAST_INCLUDE
] != 0;
1669 case MLD2_CHANGE_TO_EXCLUDE
:
1670 if (gdeleted
|| sdeleted
)
1672 if (pmc
->mca_sfcount
[MCAST_EXCLUDE
] == 0 ||
1673 psf
->sf_count
[MCAST_INCLUDE
])
1675 return pmc
->mca_sfcount
[MCAST_EXCLUDE
] ==
1676 psf
->sf_count
[MCAST_EXCLUDE
];
1677 case MLD2_ALLOW_NEW_SOURCES
:
1678 if (gdeleted
|| !psf
->sf_crcount
)
1680 return (pmc
->mca_sfmode
== MCAST_INCLUDE
) ^ sdeleted
;
1681 case MLD2_BLOCK_OLD_SOURCES
:
1682 if (pmc
->mca_sfmode
== MCAST_INCLUDE
)
1683 return gdeleted
|| (psf
->sf_crcount
&& sdeleted
);
1684 return psf
->sf_crcount
&& !gdeleted
&& !sdeleted
;
1690 mld_scount(struct ifmcaddr6
*pmc
, int type
, int gdeleted
, int sdeleted
)
1692 struct ip6_sf_list
*psf
;
1695 for_each_psf_mclock(pmc
, psf
) {
1696 if (!is_in(pmc
, psf
, type
, gdeleted
, sdeleted
))
1703 static void ip6_mc_hdr(const struct sock
*sk
, struct sk_buff
*skb
,
1704 struct net_device
*dev
, const struct in6_addr
*saddr
,
1705 const struct in6_addr
*daddr
, int proto
, int len
)
1707 struct ipv6hdr
*hdr
;
1709 skb
->protocol
= htons(ETH_P_IPV6
);
1712 skb_reset_network_header(skb
);
1713 skb_put(skb
, sizeof(struct ipv6hdr
));
1714 hdr
= ipv6_hdr(skb
);
1716 ip6_flow_hdr(hdr
, 0, 0);
1718 hdr
->payload_len
= htons(len
);
1719 hdr
->nexthdr
= proto
;
1720 hdr
->hop_limit
= READ_ONCE(inet6_sk(sk
)->hop_limit
);
1722 hdr
->saddr
= *saddr
;
1723 hdr
->daddr
= *daddr
;
1726 static struct sk_buff
*mld_newpack(struct inet6_dev
*idev
, unsigned int mtu
)
1728 u8 ra
[8] = { IPPROTO_ICMPV6
, 0, IPV6_TLV_ROUTERALERT
,
1729 2, 0, 0, IPV6_TLV_PADN
, 0 };
1730 struct net_device
*dev
= idev
->dev
;
1731 int hlen
= LL_RESERVED_SPACE(dev
);
1732 int tlen
= dev
->needed_tailroom
;
1733 struct net
*net
= dev_net(dev
);
1734 const struct in6_addr
*saddr
;
1735 struct in6_addr addr_buf
;
1736 struct mld2_report
*pmr
;
1737 struct sk_buff
*skb
;
1742 sk
= net
->ipv6
.igmp_sk
;
1743 /* we assume size > sizeof(ra) here
1744 * Also try to not allocate high-order pages for big MTU
1746 size
= min_t(int, mtu
, PAGE_SIZE
/ 2) + hlen
+ tlen
;
1747 skb
= sock_alloc_send_skb(sk
, size
, 1, &err
);
1751 skb
->priority
= TC_PRIO_CONTROL
;
1752 skb_reserve(skb
, hlen
);
1753 skb_tailroom_reserve(skb
, mtu
, tlen
);
1755 if (ipv6_get_lladdr(dev
, &addr_buf
, IFA_F_TENTATIVE
)) {
1756 /* <draft-ietf-magma-mld-source-05.txt>:
1757 * use unspecified address as the source address
1758 * when a valid link-local address is not available.
1760 saddr
= &in6addr_any
;
1764 ip6_mc_hdr(sk
, skb
, dev
, saddr
, &mld2_all_mcr
, NEXTHDR_HOP
, 0);
1766 skb_put_data(skb
, ra
, sizeof(ra
));
1768 skb_set_transport_header(skb
, skb_tail_pointer(skb
) - skb
->data
);
1769 skb_put(skb
, sizeof(*pmr
));
1770 pmr
= (struct mld2_report
*)skb_transport_header(skb
);
1771 pmr
->mld2r_type
= ICMPV6_MLD2_REPORT
;
1772 pmr
->mld2r_resv1
= 0;
1773 pmr
->mld2r_cksum
= 0;
1774 pmr
->mld2r_resv2
= 0;
1775 pmr
->mld2r_ngrec
= 0;
1779 static void mld_sendpack(struct sk_buff
*skb
)
1781 struct ipv6hdr
*pip6
= ipv6_hdr(skb
);
1782 struct mld2_report
*pmr
=
1783 (struct mld2_report
*)skb_transport_header(skb
);
1784 int payload_len
, mldlen
;
1785 struct inet6_dev
*idev
;
1786 struct net
*net
= dev_net(skb
->dev
);
1789 struct dst_entry
*dst
;
1792 idev
= __in6_dev_get(skb
->dev
);
1793 IP6_INC_STATS(net
, idev
, IPSTATS_MIB_OUTREQUESTS
);
1795 payload_len
= (skb_tail_pointer(skb
) - skb_network_header(skb
)) -
1797 mldlen
= skb_tail_pointer(skb
) - skb_transport_header(skb
);
1798 pip6
->payload_len
= htons(payload_len
);
1800 pmr
->mld2r_cksum
= csum_ipv6_magic(&pip6
->saddr
, &pip6
->daddr
, mldlen
,
1802 csum_partial(skb_transport_header(skb
),
1805 icmpv6_flow_init(net
->ipv6
.igmp_sk
, &fl6
, ICMPV6_MLD2_REPORT
,
1806 &ipv6_hdr(skb
)->saddr
, &ipv6_hdr(skb
)->daddr
,
1808 dst
= icmp6_dst_alloc(skb
->dev
, &fl6
);
1815 skb_dst_set(skb
, dst
);
1819 err
= NF_HOOK(NFPROTO_IPV6
, NF_INET_LOCAL_OUT
,
1820 net
, net
->ipv6
.igmp_sk
, skb
, NULL
, skb
->dev
,
1824 ICMP6MSGOUT_INC_STATS(net
, idev
, ICMPV6_MLD2_REPORT
);
1825 ICMP6_INC_STATS(net
, idev
, ICMP6_MIB_OUTMSGS
);
1827 IP6_INC_STATS(net
, idev
, IPSTATS_MIB_OUTDISCARDS
);
1838 static int grec_size(struct ifmcaddr6
*pmc
, int type
, int gdel
, int sdel
)
1840 return sizeof(struct mld2_grec
) + 16 * mld_scount(pmc
,type
,gdel
,sdel
);
1843 static struct sk_buff
*add_grhead(struct sk_buff
*skb
, struct ifmcaddr6
*pmc
,
1844 int type
, struct mld2_grec
**ppgr
, unsigned int mtu
)
1846 struct mld2_report
*pmr
;
1847 struct mld2_grec
*pgr
;
1850 skb
= mld_newpack(pmc
->idev
, mtu
);
1854 pgr
= skb_put(skb
, sizeof(struct mld2_grec
));
1855 pgr
->grec_type
= type
;
1856 pgr
->grec_auxwords
= 0;
1857 pgr
->grec_nsrcs
= 0;
1858 pgr
->grec_mca
= pmc
->mca_addr
; /* structure copy */
1859 pmr
= (struct mld2_report
*)skb_transport_header(skb
);
1860 pmr
->mld2r_ngrec
= htons(ntohs(pmr
->mld2r_ngrec
)+1);
1865 #define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0)
1867 /* called with mc_lock */
1868 static struct sk_buff
*add_grec(struct sk_buff
*skb
, struct ifmcaddr6
*pmc
,
1869 int type
, int gdeleted
, int sdeleted
,
1872 struct ip6_sf_list
*psf
, *psf_prev
, *psf_next
;
1873 int scount
, stotal
, first
, isquery
, truncate
;
1874 struct ip6_sf_list __rcu
**psf_list
;
1875 struct inet6_dev
*idev
= pmc
->idev
;
1876 struct net_device
*dev
= idev
->dev
;
1877 struct mld2_grec
*pgr
= NULL
;
1878 struct mld2_report
*pmr
;
1881 if (pmc
->mca_flags
& MAF_NOREPORT
)
1884 mtu
= READ_ONCE(dev
->mtu
);
1885 if (mtu
< IPV6_MIN_MTU
)
1888 isquery
= type
== MLD2_MODE_IS_INCLUDE
||
1889 type
== MLD2_MODE_IS_EXCLUDE
;
1890 truncate
= type
== MLD2_MODE_IS_EXCLUDE
||
1891 type
== MLD2_CHANGE_TO_EXCLUDE
;
1893 stotal
= scount
= 0;
1895 psf_list
= sdeleted
? &pmc
->mca_tomb
: &pmc
->mca_sources
;
1897 if (!rcu_access_pointer(*psf_list
))
1900 pmr
= skb
? (struct mld2_report
*)skb_transport_header(skb
) : NULL
;
1902 /* EX and TO_EX get a fresh packet, if needed */
1904 if (pmr
&& pmr
->mld2r_ngrec
&&
1905 AVAILABLE(skb
) < grec_size(pmc
, type
, gdeleted
, sdeleted
)) {
1908 skb
= mld_newpack(idev
, mtu
);
1913 for (psf
= mc_dereference(*psf_list
, idev
);
1916 struct in6_addr
*psrc
;
1918 psf_next
= mc_dereference(psf
->sf_next
, idev
);
1920 if (!is_in(pmc
, psf
, type
, gdeleted
, sdeleted
) && !crsend
) {
1925 /* Based on RFC3810 6.1. Should not send source-list change
1926 * records when there is a filter mode change.
1928 if (((gdeleted
&& pmc
->mca_sfmode
== MCAST_EXCLUDE
) ||
1929 (!gdeleted
&& pmc
->mca_crcount
)) &&
1930 (type
== MLD2_ALLOW_NEW_SOURCES
||
1931 type
== MLD2_BLOCK_OLD_SOURCES
) && psf
->sf_crcount
)
1932 goto decrease_sf_crcount
;
1934 /* clear marks on query responses */
1938 if (AVAILABLE(skb
) < sizeof(*psrc
) +
1939 first
*sizeof(struct mld2_grec
)) {
1940 if (truncate
&& !first
)
1941 break; /* truncate these */
1943 pgr
->grec_nsrcs
= htons(scount
);
1946 skb
= mld_newpack(idev
, mtu
);
1951 skb
= add_grhead(skb
, pmc
, type
, &pgr
, mtu
);
1956 psrc
= skb_put(skb
, sizeof(*psrc
));
1957 *psrc
= psf
->sf_addr
;
1959 if ((type
== MLD2_ALLOW_NEW_SOURCES
||
1960 type
== MLD2_BLOCK_OLD_SOURCES
) && psf
->sf_crcount
) {
1961 decrease_sf_crcount
:
1963 if ((sdeleted
|| gdeleted
) && psf
->sf_crcount
== 0) {
1965 rcu_assign_pointer(psf_prev
->sf_next
,
1966 mc_dereference(psf
->sf_next
, idev
));
1968 rcu_assign_pointer(*psf_list
,
1969 mc_dereference(psf
->sf_next
, idev
));
1970 kfree_rcu(psf
, rcu
);
1979 if (type
== MLD2_ALLOW_NEW_SOURCES
||
1980 type
== MLD2_BLOCK_OLD_SOURCES
)
1982 if (pmc
->mca_crcount
|| isquery
|| crsend
) {
1983 /* make sure we have room for group header */
1984 if (skb
&& AVAILABLE(skb
) < sizeof(struct mld2_grec
)) {
1986 skb
= NULL
; /* add_grhead will get a new one */
1988 skb
= add_grhead(skb
, pmc
, type
, &pgr
, mtu
);
1992 pgr
->grec_nsrcs
= htons(scount
);
1995 pmc
->mca_flags
&= ~MAF_GSQUERY
; /* clear query state */
1999 /* called with mc_lock */
2000 static void mld_send_report(struct inet6_dev
*idev
, struct ifmcaddr6
*pmc
)
2002 struct sk_buff
*skb
= NULL
;
2006 for_each_mc_mclock(idev
, pmc
) {
2007 if (pmc
->mca_flags
& MAF_NOREPORT
)
2009 if (pmc
->mca_sfcount
[MCAST_EXCLUDE
])
2010 type
= MLD2_MODE_IS_EXCLUDE
;
2012 type
= MLD2_MODE_IS_INCLUDE
;
2013 skb
= add_grec(skb
, pmc
, type
, 0, 0, 0);
2016 if (pmc
->mca_sfcount
[MCAST_EXCLUDE
])
2017 type
= MLD2_MODE_IS_EXCLUDE
;
2019 type
= MLD2_MODE_IS_INCLUDE
;
2020 skb
= add_grec(skb
, pmc
, type
, 0, 0, 0);
2027 * remove zero-count source records from a source filter list
2028 * called with mc_lock
2030 static void mld_clear_zeros(struct ip6_sf_list __rcu
**ppsf
, struct inet6_dev
*idev
)
2032 struct ip6_sf_list
*psf_prev
, *psf_next
, *psf
;
2035 for (psf
= mc_dereference(*ppsf
, idev
);
2038 psf_next
= mc_dereference(psf
->sf_next
, idev
);
2039 if (psf
->sf_crcount
== 0) {
2041 rcu_assign_pointer(psf_prev
->sf_next
,
2042 mc_dereference(psf
->sf_next
, idev
));
2044 rcu_assign_pointer(*ppsf
,
2045 mc_dereference(psf
->sf_next
, idev
));
2046 kfree_rcu(psf
, rcu
);
2053 /* called with mc_lock */
2054 static void mld_send_cr(struct inet6_dev
*idev
)
2056 struct ifmcaddr6
*pmc
, *pmc_prev
, *pmc_next
;
2057 struct sk_buff
*skb
= NULL
;
2062 for (pmc
= mc_dereference(idev
->mc_tomb
, idev
);
2065 pmc_next
= mc_dereference(pmc
->next
, idev
);
2066 if (pmc
->mca_sfmode
== MCAST_INCLUDE
) {
2067 type
= MLD2_BLOCK_OLD_SOURCES
;
2068 dtype
= MLD2_BLOCK_OLD_SOURCES
;
2069 skb
= add_grec(skb
, pmc
, type
, 1, 0, 0);
2070 skb
= add_grec(skb
, pmc
, dtype
, 1, 1, 0);
2072 if (pmc
->mca_crcount
) {
2073 if (pmc
->mca_sfmode
== MCAST_EXCLUDE
) {
2074 type
= MLD2_CHANGE_TO_INCLUDE
;
2075 skb
= add_grec(skb
, pmc
, type
, 1, 0, 0);
2078 if (pmc
->mca_crcount
== 0) {
2079 mld_clear_zeros(&pmc
->mca_tomb
, idev
);
2080 mld_clear_zeros(&pmc
->mca_sources
, idev
);
2083 if (pmc
->mca_crcount
== 0 &&
2084 !rcu_access_pointer(pmc
->mca_tomb
) &&
2085 !rcu_access_pointer(pmc
->mca_sources
)) {
2087 rcu_assign_pointer(pmc_prev
->next
, pmc_next
);
2089 rcu_assign_pointer(idev
->mc_tomb
, pmc_next
);
2090 in6_dev_put(pmc
->idev
);
2091 kfree_rcu(pmc
, rcu
);
2097 for_each_mc_mclock(idev
, pmc
) {
2098 if (pmc
->mca_sfcount
[MCAST_EXCLUDE
]) {
2099 type
= MLD2_BLOCK_OLD_SOURCES
;
2100 dtype
= MLD2_ALLOW_NEW_SOURCES
;
2102 type
= MLD2_ALLOW_NEW_SOURCES
;
2103 dtype
= MLD2_BLOCK_OLD_SOURCES
;
2105 skb
= add_grec(skb
, pmc
, type
, 0, 0, 0);
2106 skb
= add_grec(skb
, pmc
, dtype
, 0, 1, 0); /* deleted sources */
2108 /* filter mode changes */
2109 if (pmc
->mca_crcount
) {
2110 if (pmc
->mca_sfmode
== MCAST_EXCLUDE
)
2111 type
= MLD2_CHANGE_TO_EXCLUDE
;
2113 type
= MLD2_CHANGE_TO_INCLUDE
;
2114 skb
= add_grec(skb
, pmc
, type
, 0, 0, 0);
2120 (void) mld_sendpack(skb
);
2123 static void igmp6_send(struct in6_addr
*addr
, struct net_device
*dev
, int type
)
2125 struct net
*net
= dev_net(dev
);
2126 struct sock
*sk
= net
->ipv6
.igmp_sk
;
2127 struct inet6_dev
*idev
;
2128 struct sk_buff
*skb
;
2129 struct mld_msg
*hdr
;
2130 const struct in6_addr
*snd_addr
, *saddr
;
2131 struct in6_addr addr_buf
;
2132 int hlen
= LL_RESERVED_SPACE(dev
);
2133 int tlen
= dev
->needed_tailroom
;
2134 int err
, len
, payload_len
, full_len
;
2135 u8 ra
[8] = { IPPROTO_ICMPV6
, 0,
2136 IPV6_TLV_ROUTERALERT
, 2, 0, 0,
2139 struct dst_entry
*dst
;
2141 if (type
== ICMPV6_MGM_REDUCTION
)
2142 snd_addr
= &in6addr_linklocal_allrouters
;
2146 len
= sizeof(struct icmp6hdr
) + sizeof(struct in6_addr
);
2147 payload_len
= len
+ sizeof(ra
);
2148 full_len
= sizeof(struct ipv6hdr
) + payload_len
;
2151 IP6_INC_STATS(net
, __in6_dev_get(dev
), IPSTATS_MIB_OUTREQUESTS
);
2154 skb
= sock_alloc_send_skb(sk
, hlen
+ tlen
+ full_len
, 1, &err
);
2158 IP6_INC_STATS(net
, __in6_dev_get(dev
),
2159 IPSTATS_MIB_OUTDISCARDS
);
2163 skb
->priority
= TC_PRIO_CONTROL
;
2164 skb_reserve(skb
, hlen
);
2166 if (ipv6_get_lladdr(dev
, &addr_buf
, IFA_F_TENTATIVE
)) {
2167 /* <draft-ietf-magma-mld-source-05.txt>:
2168 * use unspecified address as the source address
2169 * when a valid link-local address is not available.
2171 saddr
= &in6addr_any
;
2175 ip6_mc_hdr(sk
, skb
, dev
, saddr
, snd_addr
, NEXTHDR_HOP
, payload_len
);
2177 skb_put_data(skb
, ra
, sizeof(ra
));
2179 hdr
= skb_put_zero(skb
, sizeof(struct mld_msg
));
2180 hdr
->mld_type
= type
;
2181 hdr
->mld_mca
= *addr
;
2183 hdr
->mld_cksum
= csum_ipv6_magic(saddr
, snd_addr
, len
,
2185 csum_partial(hdr
, len
, 0));
2188 idev
= __in6_dev_get(skb
->dev
);
2190 icmpv6_flow_init(sk
, &fl6
, type
,
2191 &ipv6_hdr(skb
)->saddr
, &ipv6_hdr(skb
)->daddr
,
2193 dst
= icmp6_dst_alloc(skb
->dev
, &fl6
);
2199 skb_dst_set(skb
, dst
);
2200 err
= NF_HOOK(NFPROTO_IPV6
, NF_INET_LOCAL_OUT
,
2201 net
, sk
, skb
, NULL
, skb
->dev
,
2205 ICMP6MSGOUT_INC_STATS(net
, idev
, type
);
2206 ICMP6_INC_STATS(net
, idev
, ICMP6_MIB_OUTMSGS
);
2208 IP6_INC_STATS(net
, idev
, IPSTATS_MIB_OUTDISCARDS
);
2218 /* called with mc_lock */
2219 static void mld_send_initial_cr(struct inet6_dev
*idev
)
2221 struct sk_buff
*skb
;
2222 struct ifmcaddr6
*pmc
;
2225 if (mld_in_v1_mode(idev
))
2229 for_each_mc_mclock(idev
, pmc
) {
2230 if (pmc
->mca_sfcount
[MCAST_EXCLUDE
])
2231 type
= MLD2_CHANGE_TO_EXCLUDE
;
2233 type
= MLD2_ALLOW_NEW_SOURCES
;
2234 skb
= add_grec(skb
, pmc
, type
, 0, 0, 1);
2240 void ipv6_mc_dad_complete(struct inet6_dev
*idev
)
2242 mutex_lock(&idev
->mc_lock
);
2243 idev
->mc_dad_count
= idev
->mc_qrv
;
2244 if (idev
->mc_dad_count
) {
2245 mld_send_initial_cr(idev
);
2246 idev
->mc_dad_count
--;
2247 if (idev
->mc_dad_count
)
2248 mld_dad_start_work(idev
,
2249 unsolicited_report_interval(idev
));
2251 mutex_unlock(&idev
->mc_lock
);
2254 static void mld_dad_work(struct work_struct
*work
)
2256 struct inet6_dev
*idev
= container_of(to_delayed_work(work
),
2259 mutex_lock(&idev
->mc_lock
);
2260 mld_send_initial_cr(idev
);
2261 if (idev
->mc_dad_count
) {
2262 idev
->mc_dad_count
--;
2263 if (idev
->mc_dad_count
)
2264 mld_dad_start_work(idev
,
2265 unsolicited_report_interval(idev
));
2267 mutex_unlock(&idev
->mc_lock
);
2271 /* called with mc_lock */
2272 static int ip6_mc_del1_src(struct ifmcaddr6
*pmc
, int sfmode
,
2273 const struct in6_addr
*psfsrc
)
2275 struct ip6_sf_list
*psf
, *psf_prev
;
2279 for_each_psf_mclock(pmc
, psf
) {
2280 if (ipv6_addr_equal(&psf
->sf_addr
, psfsrc
))
2284 if (!psf
|| psf
->sf_count
[sfmode
] == 0) {
2285 /* source filter not found, or count wrong => bug */
2288 psf
->sf_count
[sfmode
]--;
2289 if (!psf
->sf_count
[MCAST_INCLUDE
] && !psf
->sf_count
[MCAST_EXCLUDE
]) {
2290 struct inet6_dev
*idev
= pmc
->idev
;
2292 /* no more filters for this source */
2294 rcu_assign_pointer(psf_prev
->sf_next
,
2295 mc_dereference(psf
->sf_next
, idev
));
2297 rcu_assign_pointer(pmc
->mca_sources
,
2298 mc_dereference(psf
->sf_next
, idev
));
2300 if (psf
->sf_oldin
&& !(pmc
->mca_flags
& MAF_NOREPORT
) &&
2301 !mld_in_v1_mode(idev
)) {
2302 psf
->sf_crcount
= idev
->mc_qrv
;
2303 rcu_assign_pointer(psf
->sf_next
,
2304 mc_dereference(pmc
->mca_tomb
, idev
));
2305 rcu_assign_pointer(pmc
->mca_tomb
, psf
);
2308 kfree_rcu(psf
, rcu
);
2314 /* called with mc_lock */
2315 static int ip6_mc_del_src(struct inet6_dev
*idev
, const struct in6_addr
*pmca
,
2316 int sfmode
, int sfcount
, const struct in6_addr
*psfsrc
,
2319 struct ifmcaddr6
*pmc
;
2326 for_each_mc_mclock(idev
, pmc
) {
2327 if (ipv6_addr_equal(pmca
, &pmc
->mca_addr
))
2335 if (!pmc
->mca_sfcount
[sfmode
])
2338 pmc
->mca_sfcount
[sfmode
]--;
2341 for (i
= 0; i
< sfcount
; i
++) {
2342 int rv
= ip6_mc_del1_src(pmc
, sfmode
, &psfsrc
[i
]);
2344 changerec
|= rv
> 0;
2348 if (pmc
->mca_sfmode
== MCAST_EXCLUDE
&&
2349 pmc
->mca_sfcount
[MCAST_EXCLUDE
] == 0 &&
2350 pmc
->mca_sfcount
[MCAST_INCLUDE
]) {
2351 struct ip6_sf_list
*psf
;
2353 /* filter mode change */
2354 pmc
->mca_sfmode
= MCAST_INCLUDE
;
2355 pmc
->mca_crcount
= idev
->mc_qrv
;
2356 idev
->mc_ifc_count
= pmc
->mca_crcount
;
2357 for_each_psf_mclock(pmc
, psf
)
2358 psf
->sf_crcount
= 0;
2359 mld_ifc_event(pmc
->idev
);
2360 } else if (sf_setstate(pmc
) || changerec
) {
2361 mld_ifc_event(pmc
->idev
);
2368 * Add multicast single-source filter to the interface list
2369 * called with mc_lock
2371 static int ip6_mc_add1_src(struct ifmcaddr6
*pmc
, int sfmode
,
2372 const struct in6_addr
*psfsrc
)
2374 struct ip6_sf_list
*psf
, *psf_prev
;
2377 for_each_psf_mclock(pmc
, psf
) {
2378 if (ipv6_addr_equal(&psf
->sf_addr
, psfsrc
))
2383 psf
= kzalloc(sizeof(*psf
), GFP_KERNEL
);
2387 psf
->sf_addr
= *psfsrc
;
2389 rcu_assign_pointer(psf_prev
->sf_next
, psf
);
2391 rcu_assign_pointer(pmc
->mca_sources
, psf
);
2394 psf
->sf_count
[sfmode
]++;
2398 /* called with mc_lock */
2399 static void sf_markstate(struct ifmcaddr6
*pmc
)
2401 struct ip6_sf_list
*psf
;
2402 int mca_xcount
= pmc
->mca_sfcount
[MCAST_EXCLUDE
];
2404 for_each_psf_mclock(pmc
, psf
) {
2405 if (pmc
->mca_sfcount
[MCAST_EXCLUDE
]) {
2406 psf
->sf_oldin
= mca_xcount
==
2407 psf
->sf_count
[MCAST_EXCLUDE
] &&
2408 !psf
->sf_count
[MCAST_INCLUDE
];
2410 psf
->sf_oldin
= psf
->sf_count
[MCAST_INCLUDE
] != 0;
2415 /* called with mc_lock */
2416 static int sf_setstate(struct ifmcaddr6
*pmc
)
2418 struct ip6_sf_list
*psf
, *dpsf
;
2419 int mca_xcount
= pmc
->mca_sfcount
[MCAST_EXCLUDE
];
2420 int qrv
= pmc
->idev
->mc_qrv
;
2424 for_each_psf_mclock(pmc
, psf
) {
2425 if (pmc
->mca_sfcount
[MCAST_EXCLUDE
]) {
2426 new_in
= mca_xcount
== psf
->sf_count
[MCAST_EXCLUDE
] &&
2427 !psf
->sf_count
[MCAST_INCLUDE
];
2429 new_in
= psf
->sf_count
[MCAST_INCLUDE
] != 0;
2431 if (!psf
->sf_oldin
) {
2432 struct ip6_sf_list
*prev
= NULL
;
2434 for_each_psf_tomb(pmc
, dpsf
) {
2435 if (ipv6_addr_equal(&dpsf
->sf_addr
,
2442 rcu_assign_pointer(prev
->sf_next
,
2443 mc_dereference(dpsf
->sf_next
,
2446 rcu_assign_pointer(pmc
->mca_tomb
,
2447 mc_dereference(dpsf
->sf_next
,
2449 kfree_rcu(dpsf
, rcu
);
2451 psf
->sf_crcount
= qrv
;
2454 } else if (psf
->sf_oldin
) {
2455 psf
->sf_crcount
= 0;
2457 * add or update "delete" records if an active filter
2461 for_each_psf_tomb(pmc
, dpsf
)
2462 if (ipv6_addr_equal(&dpsf
->sf_addr
,
2466 dpsf
= kmalloc(sizeof(*dpsf
), GFP_KERNEL
);
2470 rcu_assign_pointer(dpsf
->sf_next
,
2471 mc_dereference(pmc
->mca_tomb
, pmc
->idev
));
2472 rcu_assign_pointer(pmc
->mca_tomb
, dpsf
);
2474 dpsf
->sf_crcount
= qrv
;
2482 * Add multicast source filter list to the interface list
2483 * called with mc_lock
2485 static int ip6_mc_add_src(struct inet6_dev
*idev
, const struct in6_addr
*pmca
,
2486 int sfmode
, int sfcount
, const struct in6_addr
*psfsrc
,
2489 struct ifmcaddr6
*pmc
;
2496 for_each_mc_mclock(idev
, pmc
) {
2497 if (ipv6_addr_equal(pmca
, &pmc
->mca_addr
))
2504 isexclude
= pmc
->mca_sfmode
== MCAST_EXCLUDE
;
2506 pmc
->mca_sfcount
[sfmode
]++;
2508 for (i
= 0; i
< sfcount
; i
++) {
2509 err
= ip6_mc_add1_src(pmc
, sfmode
, &psfsrc
[i
]);
2517 pmc
->mca_sfcount
[sfmode
]--;
2518 for (j
= 0; j
< i
; j
++)
2519 ip6_mc_del1_src(pmc
, sfmode
, &psfsrc
[j
]);
2520 } else if (isexclude
!= (pmc
->mca_sfcount
[MCAST_EXCLUDE
] != 0)) {
2521 struct ip6_sf_list
*psf
;
2523 /* filter mode change */
2524 if (pmc
->mca_sfcount
[MCAST_EXCLUDE
])
2525 pmc
->mca_sfmode
= MCAST_EXCLUDE
;
2526 else if (pmc
->mca_sfcount
[MCAST_INCLUDE
])
2527 pmc
->mca_sfmode
= MCAST_INCLUDE
;
2528 /* else no filters; keep old mode for reports */
2530 pmc
->mca_crcount
= idev
->mc_qrv
;
2531 idev
->mc_ifc_count
= pmc
->mca_crcount
;
2532 for_each_psf_mclock(pmc
, psf
)
2533 psf
->sf_crcount
= 0;
2534 mld_ifc_event(idev
);
2535 } else if (sf_setstate(pmc
)) {
2536 mld_ifc_event(idev
);
2541 /* called with mc_lock */
2542 static void ip6_mc_clear_src(struct ifmcaddr6
*pmc
)
2544 struct ip6_sf_list
*psf
, *nextpsf
;
2546 for (psf
= mc_dereference(pmc
->mca_tomb
, pmc
->idev
);
2549 nextpsf
= mc_dereference(psf
->sf_next
, pmc
->idev
);
2550 kfree_rcu(psf
, rcu
);
2552 RCU_INIT_POINTER(pmc
->mca_tomb
, NULL
);
2553 for (psf
= mc_dereference(pmc
->mca_sources
, pmc
->idev
);
2556 nextpsf
= mc_dereference(psf
->sf_next
, pmc
->idev
);
2557 kfree_rcu(psf
, rcu
);
2559 RCU_INIT_POINTER(pmc
->mca_sources
, NULL
);
2560 pmc
->mca_sfmode
= MCAST_EXCLUDE
;
2561 pmc
->mca_sfcount
[MCAST_INCLUDE
] = 0;
2562 pmc
->mca_sfcount
[MCAST_EXCLUDE
] = 1;
2565 /* called with mc_lock */
2566 static void igmp6_join_group(struct ifmcaddr6
*ma
)
2568 unsigned long delay
;
2570 if (ma
->mca_flags
& MAF_NOREPORT
)
2573 igmp6_send(&ma
->mca_addr
, ma
->idev
->dev
, ICMPV6_MGM_REPORT
);
2575 delay
= get_random_u32_below(unsolicited_report_interval(ma
->idev
));
2577 if (cancel_delayed_work(&ma
->mca_work
)) {
2578 refcount_dec(&ma
->mca_refcnt
);
2579 delay
= ma
->mca_work
.timer
.expires
- jiffies
;
2582 if (!mod_delayed_work(mld_wq
, &ma
->mca_work
, delay
))
2583 refcount_inc(&ma
->mca_refcnt
);
2584 ma
->mca_flags
|= MAF_TIMER_RUNNING
| MAF_LAST_REPORTER
;
2587 static int ip6_mc_leave_src(struct sock
*sk
, struct ipv6_mc_socklist
*iml
,
2588 struct inet6_dev
*idev
)
2590 struct ip6_sf_socklist
*psl
;
2593 psl
= sock_dereference(iml
->sflist
, sk
);
2596 mutex_lock(&idev
->mc_lock
);
2599 /* any-source empty exclude case */
2600 err
= ip6_mc_del_src(idev
, &iml
->addr
, iml
->sfmode
, 0, NULL
, 0);
2602 err
= ip6_mc_del_src(idev
, &iml
->addr
, iml
->sfmode
,
2603 psl
->sl_count
, psl
->sl_addr
, 0);
2604 RCU_INIT_POINTER(iml
->sflist
, NULL
);
2605 atomic_sub(struct_size(psl
, sl_addr
, psl
->sl_max
),
2606 &sk
->sk_omem_alloc
);
2607 kfree_rcu(psl
, rcu
);
2611 mutex_unlock(&idev
->mc_lock
);
2616 /* called with mc_lock */
2617 static void igmp6_leave_group(struct ifmcaddr6
*ma
)
2619 if (mld_in_v1_mode(ma
->idev
)) {
2620 if (ma
->mca_flags
& MAF_LAST_REPORTER
) {
2621 igmp6_send(&ma
->mca_addr
, ma
->idev
->dev
,
2622 ICMPV6_MGM_REDUCTION
);
2625 mld_add_delrec(ma
->idev
, ma
);
2626 mld_ifc_event(ma
->idev
);
2630 static void mld_gq_work(struct work_struct
*work
)
2632 struct inet6_dev
*idev
= container_of(to_delayed_work(work
),
2636 mutex_lock(&idev
->mc_lock
);
2637 mld_send_report(idev
, NULL
);
2638 idev
->mc_gq_running
= 0;
2639 mutex_unlock(&idev
->mc_lock
);
2644 static void mld_ifc_work(struct work_struct
*work
)
2646 struct inet6_dev
*idev
= container_of(to_delayed_work(work
),
2650 mutex_lock(&idev
->mc_lock
);
2653 if (idev
->mc_ifc_count
) {
2654 idev
->mc_ifc_count
--;
2655 if (idev
->mc_ifc_count
)
2656 mld_ifc_start_work(idev
,
2657 unsolicited_report_interval(idev
));
2659 mutex_unlock(&idev
->mc_lock
);
2663 /* called with mc_lock */
2664 static void mld_ifc_event(struct inet6_dev
*idev
)
2666 if (mld_in_v1_mode(idev
))
2669 idev
->mc_ifc_count
= idev
->mc_qrv
;
2670 mld_ifc_start_work(idev
, 1);
2673 static void mld_mca_work(struct work_struct
*work
)
2675 struct ifmcaddr6
*ma
= container_of(to_delayed_work(work
),
2676 struct ifmcaddr6
, mca_work
);
2678 mutex_lock(&ma
->idev
->mc_lock
);
2679 if (mld_in_v1_mode(ma
->idev
))
2680 igmp6_send(&ma
->mca_addr
, ma
->idev
->dev
, ICMPV6_MGM_REPORT
);
2682 mld_send_report(ma
->idev
, ma
);
2683 ma
->mca_flags
|= MAF_LAST_REPORTER
;
2684 ma
->mca_flags
&= ~MAF_TIMER_RUNNING
;
2685 mutex_unlock(&ma
->idev
->mc_lock
);
2690 /* Device changing type */
2692 void ipv6_mc_unmap(struct inet6_dev
*idev
)
2694 struct ifmcaddr6
*i
;
2696 /* Install multicast list, except for all-nodes (already installed) */
2698 mutex_lock(&idev
->mc_lock
);
2699 for_each_mc_mclock(idev
, i
)
2700 igmp6_group_dropped(i
);
2701 mutex_unlock(&idev
->mc_lock
);
2704 void ipv6_mc_remap(struct inet6_dev
*idev
)
2709 /* Device going down */
2710 void ipv6_mc_down(struct inet6_dev
*idev
)
2712 struct ifmcaddr6
*i
;
2714 mutex_lock(&idev
->mc_lock
);
2715 /* Withdraw multicast list */
2716 for_each_mc_mclock(idev
, i
)
2717 igmp6_group_dropped(i
);
2718 mutex_unlock(&idev
->mc_lock
);
2720 /* Should stop work after group drop. or we will
2721 * start work again in mld_ifc_event()
2723 mld_query_stop_work(idev
);
2724 mld_report_stop_work(idev
);
2726 mutex_lock(&idev
->mc_lock
);
2727 mld_ifc_stop_work(idev
);
2728 mld_gq_stop_work(idev
);
2729 mutex_unlock(&idev
->mc_lock
);
2731 mld_dad_stop_work(idev
);
2734 static void ipv6_mc_reset(struct inet6_dev
*idev
)
2736 idev
->mc_qrv
= sysctl_mld_qrv
;
2737 idev
->mc_qi
= MLD_QI_DEFAULT
;
2738 idev
->mc_qri
= MLD_QRI_DEFAULT
;
2739 idev
->mc_v1_seen
= 0;
2740 idev
->mc_maxdelay
= unsolicited_report_interval(idev
);
2743 /* Device going up */
2745 void ipv6_mc_up(struct inet6_dev
*idev
)
2747 struct ifmcaddr6
*i
;
2749 /* Install multicast list, except for all-nodes (already installed) */
2751 ipv6_mc_reset(idev
);
2752 mutex_lock(&idev
->mc_lock
);
2753 for_each_mc_mclock(idev
, i
) {
2754 mld_del_delrec(idev
, i
);
2755 igmp6_group_added(i
);
2757 mutex_unlock(&idev
->mc_lock
);
2760 /* IPv6 device initialization. */
2762 void ipv6_mc_init_dev(struct inet6_dev
*idev
)
2764 idev
->mc_gq_running
= 0;
2765 INIT_DELAYED_WORK(&idev
->mc_gq_work
, mld_gq_work
);
2766 RCU_INIT_POINTER(idev
->mc_tomb
, NULL
);
2767 idev
->mc_ifc_count
= 0;
2768 INIT_DELAYED_WORK(&idev
->mc_ifc_work
, mld_ifc_work
);
2769 INIT_DELAYED_WORK(&idev
->mc_dad_work
, mld_dad_work
);
2770 INIT_DELAYED_WORK(&idev
->mc_query_work
, mld_query_work
);
2771 INIT_DELAYED_WORK(&idev
->mc_report_work
, mld_report_work
);
2772 skb_queue_head_init(&idev
->mc_query_queue
);
2773 skb_queue_head_init(&idev
->mc_report_queue
);
2774 spin_lock_init(&idev
->mc_query_lock
);
2775 spin_lock_init(&idev
->mc_report_lock
);
2776 mutex_init(&idev
->mc_lock
);
2777 ipv6_mc_reset(idev
);
2781 * Device is about to be destroyed: clean up.
2784 void ipv6_mc_destroy_dev(struct inet6_dev
*idev
)
2786 struct ifmcaddr6
*i
;
2788 /* Deactivate works */
2790 mutex_lock(&idev
->mc_lock
);
2791 mld_clear_delrec(idev
);
2792 mutex_unlock(&idev
->mc_lock
);
2793 mld_clear_query(idev
);
2794 mld_clear_report(idev
);
2796 /* Delete all-nodes address. */
2797 /* We cannot call ipv6_dev_mc_dec() directly, our caller in
2798 * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will
2801 __ipv6_dev_mc_dec(idev
, &in6addr_linklocal_allnodes
);
2803 if (idev
->cnf
.forwarding
)
2804 __ipv6_dev_mc_dec(idev
, &in6addr_linklocal_allrouters
);
2806 mutex_lock(&idev
->mc_lock
);
2807 while ((i
= mc_dereference(idev
->mc_list
, idev
))) {
2808 rcu_assign_pointer(idev
->mc_list
, mc_dereference(i
->next
, idev
));
2810 ip6_mc_clear_src(i
);
2813 mutex_unlock(&idev
->mc_lock
);
2816 static void ipv6_mc_rejoin_groups(struct inet6_dev
*idev
)
2818 struct ifmcaddr6
*pmc
;
2822 mutex_lock(&idev
->mc_lock
);
2823 if (mld_in_v1_mode(idev
)) {
2824 for_each_mc_mclock(idev
, pmc
)
2825 igmp6_join_group(pmc
);
2827 mld_send_report(idev
, NULL
);
2829 mutex_unlock(&idev
->mc_lock
);
2832 static int ipv6_mc_netdev_event(struct notifier_block
*this,
2833 unsigned long event
,
2836 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
2837 struct inet6_dev
*idev
= __in6_dev_get(dev
);
2840 case NETDEV_RESEND_IGMP
:
2842 ipv6_mc_rejoin_groups(idev
);
2851 static struct notifier_block igmp6_netdev_notifier
= {
2852 .notifier_call
= ipv6_mc_netdev_event
,
2855 #ifdef CONFIG_PROC_FS
2856 struct igmp6_mc_iter_state
{
2857 struct seq_net_private p
;
2858 struct net_device
*dev
;
2859 struct inet6_dev
*idev
;
2862 #define igmp6_mc_seq_private(seq) ((struct igmp6_mc_iter_state *)(seq)->private)
2864 static inline struct ifmcaddr6
*igmp6_mc_get_first(struct seq_file
*seq
)
2866 struct ifmcaddr6
*im
= NULL
;
2867 struct igmp6_mc_iter_state
*state
= igmp6_mc_seq_private(seq
);
2868 struct net
*net
= seq_file_net(seq
);
2871 for_each_netdev_rcu(net
, state
->dev
) {
2872 struct inet6_dev
*idev
;
2873 idev
= __in6_dev_get(state
->dev
);
2877 im
= rcu_dereference(idev
->mc_list
);
2886 static struct ifmcaddr6
*igmp6_mc_get_next(struct seq_file
*seq
, struct ifmcaddr6
*im
)
2888 struct igmp6_mc_iter_state
*state
= igmp6_mc_seq_private(seq
);
2890 im
= rcu_dereference(im
->next
);
2892 state
->dev
= next_net_device_rcu(state
->dev
);
2897 state
->idev
= __in6_dev_get(state
->dev
);
2900 im
= rcu_dereference(state
->idev
->mc_list
);
2905 static struct ifmcaddr6
*igmp6_mc_get_idx(struct seq_file
*seq
, loff_t pos
)
2907 struct ifmcaddr6
*im
= igmp6_mc_get_first(seq
);
2909 while (pos
&& (im
= igmp6_mc_get_next(seq
, im
)) != NULL
)
2911 return pos
? NULL
: im
;
2914 static void *igmp6_mc_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2918 return igmp6_mc_get_idx(seq
, *pos
);
2921 static void *igmp6_mc_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2923 struct ifmcaddr6
*im
= igmp6_mc_get_next(seq
, v
);
2929 static void igmp6_mc_seq_stop(struct seq_file
*seq
, void *v
)
2932 struct igmp6_mc_iter_state
*state
= igmp6_mc_seq_private(seq
);
2934 if (likely(state
->idev
))
2940 static int igmp6_mc_seq_show(struct seq_file
*seq
, void *v
)
2942 struct ifmcaddr6
*im
= (struct ifmcaddr6
*)v
;
2943 struct igmp6_mc_iter_state
*state
= igmp6_mc_seq_private(seq
);
2946 "%-4d %-15s %pi6 %5d %08X %ld\n",
2947 state
->dev
->ifindex
, state
->dev
->name
,
2949 im
->mca_users
, im
->mca_flags
,
2950 (im
->mca_flags
& MAF_TIMER_RUNNING
) ?
2951 jiffies_to_clock_t(im
->mca_work
.timer
.expires
- jiffies
) : 0);
2955 static const struct seq_operations igmp6_mc_seq_ops
= {
2956 .start
= igmp6_mc_seq_start
,
2957 .next
= igmp6_mc_seq_next
,
2958 .stop
= igmp6_mc_seq_stop
,
2959 .show
= igmp6_mc_seq_show
,
2962 struct igmp6_mcf_iter_state
{
2963 struct seq_net_private p
;
2964 struct net_device
*dev
;
2965 struct inet6_dev
*idev
;
2966 struct ifmcaddr6
*im
;
2969 #define igmp6_mcf_seq_private(seq) ((struct igmp6_mcf_iter_state *)(seq)->private)
2971 static inline struct ip6_sf_list
*igmp6_mcf_get_first(struct seq_file
*seq
)
2973 struct ip6_sf_list
*psf
= NULL
;
2974 struct ifmcaddr6
*im
= NULL
;
2975 struct igmp6_mcf_iter_state
*state
= igmp6_mcf_seq_private(seq
);
2976 struct net
*net
= seq_file_net(seq
);
2980 for_each_netdev_rcu(net
, state
->dev
) {
2981 struct inet6_dev
*idev
;
2982 idev
= __in6_dev_get(state
->dev
);
2983 if (unlikely(idev
== NULL
))
2986 im
= rcu_dereference(idev
->mc_list
);
2988 psf
= rcu_dereference(im
->mca_sources
);
2999 static struct ip6_sf_list
*igmp6_mcf_get_next(struct seq_file
*seq
, struct ip6_sf_list
*psf
)
3001 struct igmp6_mcf_iter_state
*state
= igmp6_mcf_seq_private(seq
);
3003 psf
= rcu_dereference(psf
->sf_next
);
3005 state
->im
= rcu_dereference(state
->im
->next
);
3006 while (!state
->im
) {
3007 state
->dev
= next_net_device_rcu(state
->dev
);
3012 state
->idev
= __in6_dev_get(state
->dev
);
3015 state
->im
= rcu_dereference(state
->idev
->mc_list
);
3017 psf
= rcu_dereference(state
->im
->mca_sources
);
3023 static struct ip6_sf_list
*igmp6_mcf_get_idx(struct seq_file
*seq
, loff_t pos
)
3025 struct ip6_sf_list
*psf
= igmp6_mcf_get_first(seq
);
3027 while (pos
&& (psf
= igmp6_mcf_get_next(seq
, psf
)) != NULL
)
3029 return pos
? NULL
: psf
;
3032 static void *igmp6_mcf_seq_start(struct seq_file
*seq
, loff_t
*pos
)
3036 return *pos
? igmp6_mcf_get_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
3039 static void *igmp6_mcf_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
3041 struct ip6_sf_list
*psf
;
3042 if (v
== SEQ_START_TOKEN
)
3043 psf
= igmp6_mcf_get_first(seq
);
3045 psf
= igmp6_mcf_get_next(seq
, v
);
3050 static void igmp6_mcf_seq_stop(struct seq_file
*seq
, void *v
)
3053 struct igmp6_mcf_iter_state
*state
= igmp6_mcf_seq_private(seq
);
3055 if (likely(state
->im
))
3057 if (likely(state
->idev
))
3064 static int igmp6_mcf_seq_show(struct seq_file
*seq
, void *v
)
3066 struct ip6_sf_list
*psf
= (struct ip6_sf_list
*)v
;
3067 struct igmp6_mcf_iter_state
*state
= igmp6_mcf_seq_private(seq
);
3069 if (v
== SEQ_START_TOKEN
) {
3070 seq_puts(seq
, "Idx Device Multicast Address Source Address INC EXC\n");
3073 "%3d %6.6s %pi6 %pi6 %6lu %6lu\n",
3074 state
->dev
->ifindex
, state
->dev
->name
,
3075 &state
->im
->mca_addr
,
3077 psf
->sf_count
[MCAST_INCLUDE
],
3078 psf
->sf_count
[MCAST_EXCLUDE
]);
3083 static const struct seq_operations igmp6_mcf_seq_ops
= {
3084 .start
= igmp6_mcf_seq_start
,
3085 .next
= igmp6_mcf_seq_next
,
3086 .stop
= igmp6_mcf_seq_stop
,
3087 .show
= igmp6_mcf_seq_show
,
3090 static int __net_init
igmp6_proc_init(struct net
*net
)
3095 if (!proc_create_net("igmp6", 0444, net
->proc_net
, &igmp6_mc_seq_ops
,
3096 sizeof(struct igmp6_mc_iter_state
)))
3098 if (!proc_create_net("mcfilter6", 0444, net
->proc_net
,
3100 sizeof(struct igmp6_mcf_iter_state
)))
3101 goto out_proc_net_igmp6
;
3108 remove_proc_entry("igmp6", net
->proc_net
);
3112 static void __net_exit
igmp6_proc_exit(struct net
*net
)
3114 remove_proc_entry("mcfilter6", net
->proc_net
);
3115 remove_proc_entry("igmp6", net
->proc_net
);
3118 static inline int igmp6_proc_init(struct net
*net
)
3122 static inline void igmp6_proc_exit(struct net
*net
)
3127 static int __net_init
igmp6_net_init(struct net
*net
)
3131 err
= inet_ctl_sock_create(&net
->ipv6
.igmp_sk
, PF_INET6
,
3132 SOCK_RAW
, IPPROTO_ICMPV6
, net
);
3134 pr_err("Failed to initialize the IGMP6 control socket (err %d)\n",
3139 inet6_sk(net
->ipv6
.igmp_sk
)->hop_limit
= 1;
3140 net
->ipv6
.igmp_sk
->sk_allocation
= GFP_KERNEL
;
3142 err
= inet_ctl_sock_create(&net
->ipv6
.mc_autojoin_sk
, PF_INET6
,
3143 SOCK_RAW
, IPPROTO_ICMPV6
, net
);
3145 pr_err("Failed to initialize the IGMP6 autojoin socket (err %d)\n",
3147 goto out_sock_create
;
3150 err
= igmp6_proc_init(net
);
3152 goto out_sock_create_autojoin
;
3156 out_sock_create_autojoin
:
3157 inet_ctl_sock_destroy(net
->ipv6
.mc_autojoin_sk
);
3159 inet_ctl_sock_destroy(net
->ipv6
.igmp_sk
);
3164 static void __net_exit
igmp6_net_exit(struct net
*net
)
3166 inet_ctl_sock_destroy(net
->ipv6
.igmp_sk
);
3167 inet_ctl_sock_destroy(net
->ipv6
.mc_autojoin_sk
);
3168 igmp6_proc_exit(net
);
3171 static struct pernet_operations igmp6_net_ops
= {
3172 .init
= igmp6_net_init
,
3173 .exit
= igmp6_net_exit
,
3176 int __init
igmp6_init(void)
3180 err
= register_pernet_subsys(&igmp6_net_ops
);
3184 mld_wq
= create_workqueue("mld");
3186 unregister_pernet_subsys(&igmp6_net_ops
);
3193 int __init
igmp6_late_init(void)
3195 return register_netdevice_notifier(&igmp6_netdev_notifier
);
3198 void igmp6_cleanup(void)
3200 unregister_pernet_subsys(&igmp6_net_ops
);
3201 destroy_workqueue(mld_wq
);
3204 void igmp6_late_cleanup(void)
3206 unregister_netdevice_notifier(&igmp6_netdev_notifier
);