2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <linux/uaccess.h>
20 #include <linux/types.h>
21 #include <linux/sched.h>
22 #include <linux/errno.h>
23 #include <linux/timer.h>
25 #include <linux/kernel.h>
26 #include <linux/fcntl.h>
27 #include <linux/stat.h>
28 #include <linux/socket.h>
29 #include <linux/inet.h>
30 #include <linux/netdevice.h>
31 #include <linux/inetdevice.h>
32 #include <linux/proc_fs.h>
33 #include <linux/seq_file.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/compat.h>
37 #include <net/protocol.h>
38 #include <linux/skbuff.h>
41 #include <linux/notifier.h>
42 #include <linux/if_arp.h>
43 #include <net/checksum.h>
44 #include <net/netlink.h>
45 #include <net/fib_rules.h>
48 #include <net/ip6_route.h>
49 #include <linux/mroute6.h>
50 #include <linux/pim.h>
51 #include <net/addrconf.h>
52 #include <linux/netfilter_ipv6.h>
53 #include <linux/export.h>
54 #include <net/ip6_checksum.h>
55 #include <linux/netconf.h>
58 struct list_head list
;
61 struct sock
*mroute6_sk
;
62 struct timer_list ipmr_expire_timer
;
63 struct list_head mfc6_unres_queue
;
64 struct list_head mfc6_cache_array
[MFC6_LINES
];
65 struct mif_device vif6_table
[MAXMIFS
];
67 atomic_t cache_resolve_queue_len
;
68 bool mroute_do_assert
;
70 #ifdef CONFIG_IPV6_PIMSM_V2
71 int mroute_reg_vif_num
;
76 struct fib_rule common
;
80 struct mr6_table
*mrt
;
83 /* Big lock, protecting vif table, mrt cache and mroute socket state.
84 Note that the changes are semaphored via rtnl_lock.
87 static DEFINE_RWLOCK(mrt_lock
);
90 * Multicast router control variables
93 #define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
95 /* Special spinlock for queue of unresolved entries */
96 static DEFINE_SPINLOCK(mfc_unres_lock
);
98 /* We return to original Alan's scheme. Hash table of resolved
99 entries is changed only in process context and protected
100 with weak lock mrt_lock. Queue of unresolved entries is protected
101 with strong spinlock mfc_unres_lock.
103 In this case data path is free of exclusive locks at all.
106 static struct kmem_cache
*mrt_cachep __read_mostly
;
108 static struct mr6_table
*ip6mr_new_table(struct net
*net
, u32 id
);
109 static void ip6mr_free_table(struct mr6_table
*mrt
);
111 static void ip6_mr_forward(struct net
*net
, struct mr6_table
*mrt
,
112 struct sk_buff
*skb
, struct mfc6_cache
*cache
);
113 static int ip6mr_cache_report(struct mr6_table
*mrt
, struct sk_buff
*pkt
,
114 mifi_t mifi
, int assert);
115 static int __ip6mr_fill_mroute(struct mr6_table
*mrt
, struct sk_buff
*skb
,
116 struct mfc6_cache
*c
, struct rtmsg
*rtm
);
117 static void mr6_netlink_event(struct mr6_table
*mrt
, struct mfc6_cache
*mfc
,
119 static void mrt6msg_netlink_event(struct mr6_table
*mrt
, struct sk_buff
*pkt
);
120 static int ip6mr_rtm_dumproute(struct sk_buff
*skb
,
121 struct netlink_callback
*cb
);
122 static void mroute_clean_tables(struct mr6_table
*mrt
, bool all
);
123 static void ipmr_expire_process(struct timer_list
*t
);
125 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
126 #define ip6mr_for_each_table(mrt, net) \
127 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
129 static struct mr6_table
*ip6mr_get_table(struct net
*net
, u32 id
)
131 struct mr6_table
*mrt
;
133 ip6mr_for_each_table(mrt
, net
) {
140 static int ip6mr_fib_lookup(struct net
*net
, struct flowi6
*flp6
,
141 struct mr6_table
**mrt
)
144 struct ip6mr_result res
;
145 struct fib_lookup_arg arg
= {
147 .flags
= FIB_LOOKUP_NOREF
,
150 err
= fib_rules_lookup(net
->ipv6
.mr6_rules_ops
,
151 flowi6_to_flowi(flp6
), 0, &arg
);
158 static int ip6mr_rule_action(struct fib_rule
*rule
, struct flowi
*flp
,
159 int flags
, struct fib_lookup_arg
*arg
)
161 struct ip6mr_result
*res
= arg
->result
;
162 struct mr6_table
*mrt
;
164 switch (rule
->action
) {
167 case FR_ACT_UNREACHABLE
:
169 case FR_ACT_PROHIBIT
:
171 case FR_ACT_BLACKHOLE
:
176 mrt
= ip6mr_get_table(rule
->fr_net
, rule
->table
);
183 static int ip6mr_rule_match(struct fib_rule
*rule
, struct flowi
*flp
, int flags
)
188 static const struct nla_policy ip6mr_rule_policy
[FRA_MAX
+ 1] = {
192 static int ip6mr_rule_configure(struct fib_rule
*rule
, struct sk_buff
*skb
,
193 struct fib_rule_hdr
*frh
, struct nlattr
**tb
)
198 static int ip6mr_rule_compare(struct fib_rule
*rule
, struct fib_rule_hdr
*frh
,
204 static int ip6mr_rule_fill(struct fib_rule
*rule
, struct sk_buff
*skb
,
205 struct fib_rule_hdr
*frh
)
213 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template
= {
214 .family
= RTNL_FAMILY_IP6MR
,
215 .rule_size
= sizeof(struct ip6mr_rule
),
216 .addr_size
= sizeof(struct in6_addr
),
217 .action
= ip6mr_rule_action
,
218 .match
= ip6mr_rule_match
,
219 .configure
= ip6mr_rule_configure
,
220 .compare
= ip6mr_rule_compare
,
221 .fill
= ip6mr_rule_fill
,
222 .nlgroup
= RTNLGRP_IPV6_RULE
,
223 .policy
= ip6mr_rule_policy
,
224 .owner
= THIS_MODULE
,
227 static int __net_init
ip6mr_rules_init(struct net
*net
)
229 struct fib_rules_ops
*ops
;
230 struct mr6_table
*mrt
;
233 ops
= fib_rules_register(&ip6mr_rules_ops_template
, net
);
237 INIT_LIST_HEAD(&net
->ipv6
.mr6_tables
);
239 mrt
= ip6mr_new_table(net
, RT6_TABLE_DFLT
);
245 err
= fib_default_rule_add(ops
, 0x7fff, RT6_TABLE_DFLT
, 0);
249 net
->ipv6
.mr6_rules_ops
= ops
;
253 ip6mr_free_table(mrt
);
255 fib_rules_unregister(ops
);
259 static void __net_exit
ip6mr_rules_exit(struct net
*net
)
261 struct mr6_table
*mrt
, *next
;
264 list_for_each_entry_safe(mrt
, next
, &net
->ipv6
.mr6_tables
, list
) {
265 list_del(&mrt
->list
);
266 ip6mr_free_table(mrt
);
268 fib_rules_unregister(net
->ipv6
.mr6_rules_ops
);
272 #define ip6mr_for_each_table(mrt, net) \
273 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
275 static struct mr6_table
*ip6mr_get_table(struct net
*net
, u32 id
)
277 return net
->ipv6
.mrt6
;
280 static int ip6mr_fib_lookup(struct net
*net
, struct flowi6
*flp6
,
281 struct mr6_table
**mrt
)
283 *mrt
= net
->ipv6
.mrt6
;
287 static int __net_init
ip6mr_rules_init(struct net
*net
)
289 net
->ipv6
.mrt6
= ip6mr_new_table(net
, RT6_TABLE_DFLT
);
290 return net
->ipv6
.mrt6
? 0 : -ENOMEM
;
293 static void __net_exit
ip6mr_rules_exit(struct net
*net
)
296 ip6mr_free_table(net
->ipv6
.mrt6
);
297 net
->ipv6
.mrt6
= NULL
;
302 static struct mr6_table
*ip6mr_new_table(struct net
*net
, u32 id
)
304 struct mr6_table
*mrt
;
307 mrt
= ip6mr_get_table(net
, id
);
311 mrt
= kzalloc(sizeof(*mrt
), GFP_KERNEL
);
315 write_pnet(&mrt
->net
, net
);
317 /* Forwarding cache */
318 for (i
= 0; i
< MFC6_LINES
; i
++)
319 INIT_LIST_HEAD(&mrt
->mfc6_cache_array
[i
]);
321 INIT_LIST_HEAD(&mrt
->mfc6_unres_queue
);
323 timer_setup(&mrt
->ipmr_expire_timer
, ipmr_expire_process
, 0);
325 #ifdef CONFIG_IPV6_PIMSM_V2
326 mrt
->mroute_reg_vif_num
= -1;
328 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
329 list_add_tail_rcu(&mrt
->list
, &net
->ipv6
.mr6_tables
);
334 static void ip6mr_free_table(struct mr6_table
*mrt
)
336 del_timer_sync(&mrt
->ipmr_expire_timer
);
337 mroute_clean_tables(mrt
, true);
341 #ifdef CONFIG_PROC_FS
343 struct ipmr_mfc_iter
{
344 struct seq_net_private p
;
345 struct mr6_table
*mrt
;
346 struct list_head
*cache
;
351 static struct mfc6_cache
*ipmr_mfc_seq_idx(struct net
*net
,
352 struct ipmr_mfc_iter
*it
, loff_t pos
)
354 struct mr6_table
*mrt
= it
->mrt
;
355 struct mfc6_cache
*mfc
;
357 read_lock(&mrt_lock
);
358 for (it
->ct
= 0; it
->ct
< MFC6_LINES
; it
->ct
++) {
359 it
->cache
= &mrt
->mfc6_cache_array
[it
->ct
];
360 list_for_each_entry(mfc
, it
->cache
, list
)
364 read_unlock(&mrt_lock
);
366 spin_lock_bh(&mfc_unres_lock
);
367 it
->cache
= &mrt
->mfc6_unres_queue
;
368 list_for_each_entry(mfc
, it
->cache
, list
)
371 spin_unlock_bh(&mfc_unres_lock
);
378 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
381 struct ipmr_vif_iter
{
382 struct seq_net_private p
;
383 struct mr6_table
*mrt
;
387 static struct mif_device
*ip6mr_vif_seq_idx(struct net
*net
,
388 struct ipmr_vif_iter
*iter
,
391 struct mr6_table
*mrt
= iter
->mrt
;
393 for (iter
->ct
= 0; iter
->ct
< mrt
->maxvif
; ++iter
->ct
) {
394 if (!MIF_EXISTS(mrt
, iter
->ct
))
397 return &mrt
->vif6_table
[iter
->ct
];
402 static void *ip6mr_vif_seq_start(struct seq_file
*seq
, loff_t
*pos
)
405 struct ipmr_vif_iter
*iter
= seq
->private;
406 struct net
*net
= seq_file_net(seq
);
407 struct mr6_table
*mrt
;
409 mrt
= ip6mr_get_table(net
, RT6_TABLE_DFLT
);
411 return ERR_PTR(-ENOENT
);
415 read_lock(&mrt_lock
);
416 return *pos
? ip6mr_vif_seq_idx(net
, seq
->private, *pos
- 1)
420 static void *ip6mr_vif_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
422 struct ipmr_vif_iter
*iter
= seq
->private;
423 struct net
*net
= seq_file_net(seq
);
424 struct mr6_table
*mrt
= iter
->mrt
;
427 if (v
== SEQ_START_TOKEN
)
428 return ip6mr_vif_seq_idx(net
, iter
, 0);
430 while (++iter
->ct
< mrt
->maxvif
) {
431 if (!MIF_EXISTS(mrt
, iter
->ct
))
433 return &mrt
->vif6_table
[iter
->ct
];
438 static void ip6mr_vif_seq_stop(struct seq_file
*seq
, void *v
)
441 read_unlock(&mrt_lock
);
444 static int ip6mr_vif_seq_show(struct seq_file
*seq
, void *v
)
446 struct ipmr_vif_iter
*iter
= seq
->private;
447 struct mr6_table
*mrt
= iter
->mrt
;
449 if (v
== SEQ_START_TOKEN
) {
451 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
453 const struct mif_device
*vif
= v
;
454 const char *name
= vif
->dev
? vif
->dev
->name
: "none";
457 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
458 vif
- mrt
->vif6_table
,
459 name
, vif
->bytes_in
, vif
->pkt_in
,
460 vif
->bytes_out
, vif
->pkt_out
,
466 static const struct seq_operations ip6mr_vif_seq_ops
= {
467 .start
= ip6mr_vif_seq_start
,
468 .next
= ip6mr_vif_seq_next
,
469 .stop
= ip6mr_vif_seq_stop
,
470 .show
= ip6mr_vif_seq_show
,
473 static int ip6mr_vif_open(struct inode
*inode
, struct file
*file
)
475 return seq_open_net(inode
, file
, &ip6mr_vif_seq_ops
,
476 sizeof(struct ipmr_vif_iter
));
479 static const struct file_operations ip6mr_vif_fops
= {
480 .open
= ip6mr_vif_open
,
483 .release
= seq_release_net
,
486 static void *ipmr_mfc_seq_start(struct seq_file
*seq
, loff_t
*pos
)
488 struct ipmr_mfc_iter
*it
= seq
->private;
489 struct net
*net
= seq_file_net(seq
);
490 struct mr6_table
*mrt
;
492 mrt
= ip6mr_get_table(net
, RT6_TABLE_DFLT
);
494 return ERR_PTR(-ENOENT
);
498 return *pos
? ipmr_mfc_seq_idx(net
, seq
->private, *pos
- 1)
502 static void *ipmr_mfc_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
504 struct mfc6_cache
*mfc
= v
;
505 struct ipmr_mfc_iter
*it
= seq
->private;
506 struct net
*net
= seq_file_net(seq
);
507 struct mr6_table
*mrt
= it
->mrt
;
511 if (v
== SEQ_START_TOKEN
)
512 return ipmr_mfc_seq_idx(net
, seq
->private, 0);
514 if (mfc
->list
.next
!= it
->cache
)
515 return list_entry(mfc
->list
.next
, struct mfc6_cache
, list
);
517 if (it
->cache
== &mrt
->mfc6_unres_queue
)
520 BUG_ON(it
->cache
!= &mrt
->mfc6_cache_array
[it
->ct
]);
522 while (++it
->ct
< MFC6_LINES
) {
523 it
->cache
= &mrt
->mfc6_cache_array
[it
->ct
];
524 if (list_empty(it
->cache
))
526 return list_first_entry(it
->cache
, struct mfc6_cache
, list
);
529 /* exhausted cache_array, show unresolved */
530 read_unlock(&mrt_lock
);
531 it
->cache
= &mrt
->mfc6_unres_queue
;
534 spin_lock_bh(&mfc_unres_lock
);
535 if (!list_empty(it
->cache
))
536 return list_first_entry(it
->cache
, struct mfc6_cache
, list
);
539 spin_unlock_bh(&mfc_unres_lock
);
545 static void ipmr_mfc_seq_stop(struct seq_file
*seq
, void *v
)
547 struct ipmr_mfc_iter
*it
= seq
->private;
548 struct mr6_table
*mrt
= it
->mrt
;
550 if (it
->cache
== &mrt
->mfc6_unres_queue
)
551 spin_unlock_bh(&mfc_unres_lock
);
552 else if (it
->cache
== &mrt
->mfc6_cache_array
[it
->ct
])
553 read_unlock(&mrt_lock
);
556 static int ipmr_mfc_seq_show(struct seq_file
*seq
, void *v
)
560 if (v
== SEQ_START_TOKEN
) {
564 "Iif Pkts Bytes Wrong Oifs\n");
566 const struct mfc6_cache
*mfc
= v
;
567 const struct ipmr_mfc_iter
*it
= seq
->private;
568 struct mr6_table
*mrt
= it
->mrt
;
570 seq_printf(seq
, "%pI6 %pI6 %-3hd",
571 &mfc
->mf6c_mcastgrp
, &mfc
->mf6c_origin
,
574 if (it
->cache
!= &mrt
->mfc6_unres_queue
) {
575 seq_printf(seq
, " %8lu %8lu %8lu",
577 mfc
->mfc_un
.res
.bytes
,
578 mfc
->mfc_un
.res
.wrong_if
);
579 for (n
= mfc
->mfc_un
.res
.minvif
;
580 n
< mfc
->mfc_un
.res
.maxvif
; n
++) {
581 if (MIF_EXISTS(mrt
, n
) &&
582 mfc
->mfc_un
.res
.ttls
[n
] < 255)
585 n
, mfc
->mfc_un
.res
.ttls
[n
]);
588 /* unresolved mfc_caches don't contain
589 * pkt, bytes and wrong_if values
591 seq_printf(seq
, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
598 static const struct seq_operations ipmr_mfc_seq_ops
= {
599 .start
= ipmr_mfc_seq_start
,
600 .next
= ipmr_mfc_seq_next
,
601 .stop
= ipmr_mfc_seq_stop
,
602 .show
= ipmr_mfc_seq_show
,
605 static int ipmr_mfc_open(struct inode
*inode
, struct file
*file
)
607 return seq_open_net(inode
, file
, &ipmr_mfc_seq_ops
,
608 sizeof(struct ipmr_mfc_iter
));
611 static const struct file_operations ip6mr_mfc_fops
= {
612 .open
= ipmr_mfc_open
,
615 .release
= seq_release_net
,
619 #ifdef CONFIG_IPV6_PIMSM_V2
621 static int pim6_rcv(struct sk_buff
*skb
)
623 struct pimreghdr
*pim
;
624 struct ipv6hdr
*encap
;
625 struct net_device
*reg_dev
= NULL
;
626 struct net
*net
= dev_net(skb
->dev
);
627 struct mr6_table
*mrt
;
628 struct flowi6 fl6
= {
629 .flowi6_iif
= skb
->dev
->ifindex
,
630 .flowi6_mark
= skb
->mark
,
634 if (!pskb_may_pull(skb
, sizeof(*pim
) + sizeof(*encap
)))
637 pim
= (struct pimreghdr
*)skb_transport_header(skb
);
638 if (pim
->type
!= ((PIM_VERSION
<< 4) | PIM_TYPE_REGISTER
) ||
639 (pim
->flags
& PIM_NULL_REGISTER
) ||
640 (csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
, &ipv6_hdr(skb
)->daddr
,
641 sizeof(*pim
), IPPROTO_PIM
,
642 csum_partial((void *)pim
, sizeof(*pim
), 0)) &&
643 csum_fold(skb_checksum(skb
, 0, skb
->len
, 0))))
646 /* check if the inner packet is destined to mcast group */
647 encap
= (struct ipv6hdr
*)(skb_transport_header(skb
) +
650 if (!ipv6_addr_is_multicast(&encap
->daddr
) ||
651 encap
->payload_len
== 0 ||
652 ntohs(encap
->payload_len
) + sizeof(*pim
) > skb
->len
)
655 if (ip6mr_fib_lookup(net
, &fl6
, &mrt
) < 0)
657 reg_vif_num
= mrt
->mroute_reg_vif_num
;
659 read_lock(&mrt_lock
);
660 if (reg_vif_num
>= 0)
661 reg_dev
= mrt
->vif6_table
[reg_vif_num
].dev
;
664 read_unlock(&mrt_lock
);
669 skb
->mac_header
= skb
->network_header
;
670 skb_pull(skb
, (u8
*)encap
- skb
->data
);
671 skb_reset_network_header(skb
);
672 skb
->protocol
= htons(ETH_P_IPV6
);
673 skb
->ip_summed
= CHECKSUM_NONE
;
675 skb_tunnel_rx(skb
, reg_dev
, dev_net(reg_dev
));
686 static const struct inet6_protocol pim6_protocol
= {
690 /* Service routines creating virtual interfaces: PIMREG */
692 static netdev_tx_t
reg_vif_xmit(struct sk_buff
*skb
,
693 struct net_device
*dev
)
695 struct net
*net
= dev_net(dev
);
696 struct mr6_table
*mrt
;
697 struct flowi6 fl6
= {
698 .flowi6_oif
= dev
->ifindex
,
699 .flowi6_iif
= skb
->skb_iif
? : LOOPBACK_IFINDEX
,
700 .flowi6_mark
= skb
->mark
,
704 err
= ip6mr_fib_lookup(net
, &fl6
, &mrt
);
710 read_lock(&mrt_lock
);
711 dev
->stats
.tx_bytes
+= skb
->len
;
712 dev
->stats
.tx_packets
++;
713 ip6mr_cache_report(mrt
, skb
, mrt
->mroute_reg_vif_num
, MRT6MSG_WHOLEPKT
);
714 read_unlock(&mrt_lock
);
719 static int reg_vif_get_iflink(const struct net_device
*dev
)
724 static const struct net_device_ops reg_vif_netdev_ops
= {
725 .ndo_start_xmit
= reg_vif_xmit
,
726 .ndo_get_iflink
= reg_vif_get_iflink
,
729 static void reg_vif_setup(struct net_device
*dev
)
731 dev
->type
= ARPHRD_PIMREG
;
732 dev
->mtu
= 1500 - sizeof(struct ipv6hdr
) - 8;
733 dev
->flags
= IFF_NOARP
;
734 dev
->netdev_ops
= ®_vif_netdev_ops
;
735 dev
->needs_free_netdev
= true;
736 dev
->features
|= NETIF_F_NETNS_LOCAL
;
739 static struct net_device
*ip6mr_reg_vif(struct net
*net
, struct mr6_table
*mrt
)
741 struct net_device
*dev
;
744 if (mrt
->id
== RT6_TABLE_DFLT
)
745 sprintf(name
, "pim6reg");
747 sprintf(name
, "pim6reg%u", mrt
->id
);
749 dev
= alloc_netdev(0, name
, NET_NAME_UNKNOWN
, reg_vif_setup
);
753 dev_net_set(dev
, net
);
755 if (register_netdevice(dev
)) {
767 unregister_netdevice(dev
);
776 static int mif6_delete(struct mr6_table
*mrt
, int vifi
, int notify
,
777 struct list_head
*head
)
779 struct mif_device
*v
;
780 struct net_device
*dev
;
781 struct inet6_dev
*in6_dev
;
783 if (vifi
< 0 || vifi
>= mrt
->maxvif
)
784 return -EADDRNOTAVAIL
;
786 v
= &mrt
->vif6_table
[vifi
];
788 write_lock_bh(&mrt_lock
);
793 write_unlock_bh(&mrt_lock
);
794 return -EADDRNOTAVAIL
;
797 #ifdef CONFIG_IPV6_PIMSM_V2
798 if (vifi
== mrt
->mroute_reg_vif_num
)
799 mrt
->mroute_reg_vif_num
= -1;
802 if (vifi
+ 1 == mrt
->maxvif
) {
804 for (tmp
= vifi
- 1; tmp
>= 0; tmp
--) {
805 if (MIF_EXISTS(mrt
, tmp
))
808 mrt
->maxvif
= tmp
+ 1;
811 write_unlock_bh(&mrt_lock
);
813 dev_set_allmulti(dev
, -1);
815 in6_dev
= __in6_dev_get(dev
);
817 in6_dev
->cnf
.mc_forwarding
--;
818 inet6_netconf_notify_devconf(dev_net(dev
), RTM_NEWNETCONF
,
819 NETCONFA_MC_FORWARDING
,
820 dev
->ifindex
, &in6_dev
->cnf
);
823 if ((v
->flags
& MIFF_REGISTER
) && !notify
)
824 unregister_netdevice_queue(dev
, head
);
830 static inline void ip6mr_cache_free(struct mfc6_cache
*c
)
832 kmem_cache_free(mrt_cachep
, c
);
835 /* Destroy an unresolved cache entry, killing queued skbs
836 and reporting error to netlink readers.
839 static void ip6mr_destroy_unres(struct mr6_table
*mrt
, struct mfc6_cache
*c
)
841 struct net
*net
= read_pnet(&mrt
->net
);
844 atomic_dec(&mrt
->cache_resolve_queue_len
);
846 while ((skb
= skb_dequeue(&c
->mfc_un
.unres
.unresolved
)) != NULL
) {
847 if (ipv6_hdr(skb
)->version
== 0) {
848 struct nlmsghdr
*nlh
= skb_pull(skb
,
849 sizeof(struct ipv6hdr
));
850 nlh
->nlmsg_type
= NLMSG_ERROR
;
851 nlh
->nlmsg_len
= nlmsg_msg_size(sizeof(struct nlmsgerr
));
852 skb_trim(skb
, nlh
->nlmsg_len
);
853 ((struct nlmsgerr
*)nlmsg_data(nlh
))->error
= -ETIMEDOUT
;
854 rtnl_unicast(skb
, net
, NETLINK_CB(skb
).portid
);
863 /* Timer process for all the unresolved queue. */
865 static void ipmr_do_expire_process(struct mr6_table
*mrt
)
867 unsigned long now
= jiffies
;
868 unsigned long expires
= 10 * HZ
;
869 struct mfc6_cache
*c
, *next
;
871 list_for_each_entry_safe(c
, next
, &mrt
->mfc6_unres_queue
, list
) {
872 if (time_after(c
->mfc_un
.unres
.expires
, now
)) {
874 unsigned long interval
= c
->mfc_un
.unres
.expires
- now
;
875 if (interval
< expires
)
881 mr6_netlink_event(mrt
, c
, RTM_DELROUTE
);
882 ip6mr_destroy_unres(mrt
, c
);
885 if (!list_empty(&mrt
->mfc6_unres_queue
))
886 mod_timer(&mrt
->ipmr_expire_timer
, jiffies
+ expires
);
889 static void ipmr_expire_process(struct timer_list
*t
)
891 struct mr6_table
*mrt
= from_timer(mrt
, t
, ipmr_expire_timer
);
893 if (!spin_trylock(&mfc_unres_lock
)) {
894 mod_timer(&mrt
->ipmr_expire_timer
, jiffies
+ 1);
898 if (!list_empty(&mrt
->mfc6_unres_queue
))
899 ipmr_do_expire_process(mrt
);
901 spin_unlock(&mfc_unres_lock
);
904 /* Fill oifs list. It is called under write locked mrt_lock. */
906 static void ip6mr_update_thresholds(struct mr6_table
*mrt
, struct mfc6_cache
*cache
,
911 cache
->mfc_un
.res
.minvif
= MAXMIFS
;
912 cache
->mfc_un
.res
.maxvif
= 0;
913 memset(cache
->mfc_un
.res
.ttls
, 255, MAXMIFS
);
915 for (vifi
= 0; vifi
< mrt
->maxvif
; vifi
++) {
916 if (MIF_EXISTS(mrt
, vifi
) &&
917 ttls
[vifi
] && ttls
[vifi
] < 255) {
918 cache
->mfc_un
.res
.ttls
[vifi
] = ttls
[vifi
];
919 if (cache
->mfc_un
.res
.minvif
> vifi
)
920 cache
->mfc_un
.res
.minvif
= vifi
;
921 if (cache
->mfc_un
.res
.maxvif
<= vifi
)
922 cache
->mfc_un
.res
.maxvif
= vifi
+ 1;
925 cache
->mfc_un
.res
.lastuse
= jiffies
;
928 static int mif6_add(struct net
*net
, struct mr6_table
*mrt
,
929 struct mif6ctl
*vifc
, int mrtsock
)
931 int vifi
= vifc
->mif6c_mifi
;
932 struct mif_device
*v
= &mrt
->vif6_table
[vifi
];
933 struct net_device
*dev
;
934 struct inet6_dev
*in6_dev
;
938 if (MIF_EXISTS(mrt
, vifi
))
941 switch (vifc
->mif6c_flags
) {
942 #ifdef CONFIG_IPV6_PIMSM_V2
945 * Special Purpose VIF in PIM
946 * All the packets will be sent to the daemon
948 if (mrt
->mroute_reg_vif_num
>= 0)
950 dev
= ip6mr_reg_vif(net
, mrt
);
953 err
= dev_set_allmulti(dev
, 1);
955 unregister_netdevice(dev
);
962 dev
= dev_get_by_index(net
, vifc
->mif6c_pifi
);
964 return -EADDRNOTAVAIL
;
965 err
= dev_set_allmulti(dev
, 1);
975 in6_dev
= __in6_dev_get(dev
);
977 in6_dev
->cnf
.mc_forwarding
++;
978 inet6_netconf_notify_devconf(dev_net(dev
), RTM_NEWNETCONF
,
979 NETCONFA_MC_FORWARDING
,
980 dev
->ifindex
, &in6_dev
->cnf
);
984 * Fill in the VIF structures
986 v
->rate_limit
= vifc
->vifc_rate_limit
;
987 v
->flags
= vifc
->mif6c_flags
;
989 v
->flags
|= VIFF_STATIC
;
990 v
->threshold
= vifc
->vifc_threshold
;
995 v
->link
= dev
->ifindex
;
996 if (v
->flags
& MIFF_REGISTER
)
997 v
->link
= dev_get_iflink(dev
);
999 /* And finish update writing critical data */
1000 write_lock_bh(&mrt_lock
);
1002 #ifdef CONFIG_IPV6_PIMSM_V2
1003 if (v
->flags
& MIFF_REGISTER
)
1004 mrt
->mroute_reg_vif_num
= vifi
;
1006 if (vifi
+ 1 > mrt
->maxvif
)
1007 mrt
->maxvif
= vifi
+ 1;
1008 write_unlock_bh(&mrt_lock
);
1012 static struct mfc6_cache
*ip6mr_cache_find(struct mr6_table
*mrt
,
1013 const struct in6_addr
*origin
,
1014 const struct in6_addr
*mcastgrp
)
1016 int line
= MFC6_HASH(mcastgrp
, origin
);
1017 struct mfc6_cache
*c
;
1019 list_for_each_entry(c
, &mrt
->mfc6_cache_array
[line
], list
) {
1020 if (ipv6_addr_equal(&c
->mf6c_origin
, origin
) &&
1021 ipv6_addr_equal(&c
->mf6c_mcastgrp
, mcastgrp
))
1027 /* Look for a (*,*,oif) entry */
1028 static struct mfc6_cache
*ip6mr_cache_find_any_parent(struct mr6_table
*mrt
,
1031 int line
= MFC6_HASH(&in6addr_any
, &in6addr_any
);
1032 struct mfc6_cache
*c
;
1034 list_for_each_entry(c
, &mrt
->mfc6_cache_array
[line
], list
)
1035 if (ipv6_addr_any(&c
->mf6c_origin
) &&
1036 ipv6_addr_any(&c
->mf6c_mcastgrp
) &&
1037 (c
->mfc_un
.res
.ttls
[mifi
] < 255))
1043 /* Look for a (*,G) entry */
1044 static struct mfc6_cache
*ip6mr_cache_find_any(struct mr6_table
*mrt
,
1045 struct in6_addr
*mcastgrp
,
1048 int line
= MFC6_HASH(mcastgrp
, &in6addr_any
);
1049 struct mfc6_cache
*c
, *proxy
;
1051 if (ipv6_addr_any(mcastgrp
))
1054 list_for_each_entry(c
, &mrt
->mfc6_cache_array
[line
], list
)
1055 if (ipv6_addr_any(&c
->mf6c_origin
) &&
1056 ipv6_addr_equal(&c
->mf6c_mcastgrp
, mcastgrp
)) {
1057 if (c
->mfc_un
.res
.ttls
[mifi
] < 255)
1060 /* It's ok if the mifi is part of the static tree */
1061 proxy
= ip6mr_cache_find_any_parent(mrt
,
1063 if (proxy
&& proxy
->mfc_un
.res
.ttls
[mifi
] < 255)
1068 return ip6mr_cache_find_any_parent(mrt
, mifi
);
1072 * Allocate a multicast cache entry
1074 static struct mfc6_cache
*ip6mr_cache_alloc(void)
1076 struct mfc6_cache
*c
= kmem_cache_zalloc(mrt_cachep
, GFP_KERNEL
);
1079 c
->mfc_un
.res
.last_assert
= jiffies
- MFC_ASSERT_THRESH
- 1;
1080 c
->mfc_un
.res
.minvif
= MAXMIFS
;
1084 static struct mfc6_cache
*ip6mr_cache_alloc_unres(void)
1086 struct mfc6_cache
*c
= kmem_cache_zalloc(mrt_cachep
, GFP_ATOMIC
);
1089 skb_queue_head_init(&c
->mfc_un
.unres
.unresolved
);
1090 c
->mfc_un
.unres
.expires
= jiffies
+ 10 * HZ
;
1095 * A cache entry has gone into a resolved state from queued
1098 static void ip6mr_cache_resolve(struct net
*net
, struct mr6_table
*mrt
,
1099 struct mfc6_cache
*uc
, struct mfc6_cache
*c
)
1101 struct sk_buff
*skb
;
1104 * Play the pending entries through our router
1107 while ((skb
= __skb_dequeue(&uc
->mfc_un
.unres
.unresolved
))) {
1108 if (ipv6_hdr(skb
)->version
== 0) {
1109 struct nlmsghdr
*nlh
= skb_pull(skb
,
1110 sizeof(struct ipv6hdr
));
1112 if (__ip6mr_fill_mroute(mrt
, skb
, c
, nlmsg_data(nlh
)) > 0) {
1113 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - (u8
*)nlh
;
1115 nlh
->nlmsg_type
= NLMSG_ERROR
;
1116 nlh
->nlmsg_len
= nlmsg_msg_size(sizeof(struct nlmsgerr
));
1117 skb_trim(skb
, nlh
->nlmsg_len
);
1118 ((struct nlmsgerr
*)nlmsg_data(nlh
))->error
= -EMSGSIZE
;
1120 rtnl_unicast(skb
, net
, NETLINK_CB(skb
).portid
);
1122 ip6_mr_forward(net
, mrt
, skb
, c
);
1127 * Bounce a cache query up to pim6sd and netlink.
1129 * Called under mrt_lock.
1132 static int ip6mr_cache_report(struct mr6_table
*mrt
, struct sk_buff
*pkt
,
1133 mifi_t mifi
, int assert)
1135 struct sk_buff
*skb
;
1136 struct mrt6msg
*msg
;
1139 #ifdef CONFIG_IPV6_PIMSM_V2
1140 if (assert == MRT6MSG_WHOLEPKT
)
1141 skb
= skb_realloc_headroom(pkt
, -skb_network_offset(pkt
)
1145 skb
= alloc_skb(sizeof(struct ipv6hdr
) + sizeof(*msg
), GFP_ATOMIC
);
1150 /* I suppose that internal messages
1151 * do not require checksums */
1153 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1155 #ifdef CONFIG_IPV6_PIMSM_V2
1156 if (assert == MRT6MSG_WHOLEPKT
) {
1157 /* Ugly, but we have no choice with this interface.
1158 Duplicate old header, fix length etc.
1159 And all this only to mangle msg->im6_msgtype and
1160 to set msg->im6_mbz to "mbz" :-)
1162 skb_push(skb
, -skb_network_offset(pkt
));
1164 skb_push(skb
, sizeof(*msg
));
1165 skb_reset_transport_header(skb
);
1166 msg
= (struct mrt6msg
*)skb_transport_header(skb
);
1168 msg
->im6_msgtype
= MRT6MSG_WHOLEPKT
;
1169 msg
->im6_mif
= mrt
->mroute_reg_vif_num
;
1171 msg
->im6_src
= ipv6_hdr(pkt
)->saddr
;
1172 msg
->im6_dst
= ipv6_hdr(pkt
)->daddr
;
1174 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1179 * Copy the IP header
1182 skb_put(skb
, sizeof(struct ipv6hdr
));
1183 skb_reset_network_header(skb
);
1184 skb_copy_to_linear_data(skb
, ipv6_hdr(pkt
), sizeof(struct ipv6hdr
));
1189 skb_put(skb
, sizeof(*msg
));
1190 skb_reset_transport_header(skb
);
1191 msg
= (struct mrt6msg
*)skb_transport_header(skb
);
1194 msg
->im6_msgtype
= assert;
1195 msg
->im6_mif
= mifi
;
1197 msg
->im6_src
= ipv6_hdr(pkt
)->saddr
;
1198 msg
->im6_dst
= ipv6_hdr(pkt
)->daddr
;
1200 skb_dst_set(skb
, dst_clone(skb_dst(pkt
)));
1201 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1204 if (!mrt
->mroute6_sk
) {
1209 mrt6msg_netlink_event(mrt
, skb
);
1212 * Deliver to user space multicast routing algorithms
1214 ret
= sock_queue_rcv_skb(mrt
->mroute6_sk
, skb
);
1216 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1224 * Queue a packet for resolution. It gets locked cache entry!
1228 ip6mr_cache_unresolved(struct mr6_table
*mrt
, mifi_t mifi
, struct sk_buff
*skb
)
1232 struct mfc6_cache
*c
;
1234 spin_lock_bh(&mfc_unres_lock
);
1235 list_for_each_entry(c
, &mrt
->mfc6_unres_queue
, list
) {
1236 if (ipv6_addr_equal(&c
->mf6c_mcastgrp
, &ipv6_hdr(skb
)->daddr
) &&
1237 ipv6_addr_equal(&c
->mf6c_origin
, &ipv6_hdr(skb
)->saddr
)) {
1245 * Create a new entry if allowable
1248 if (atomic_read(&mrt
->cache_resolve_queue_len
) >= 10 ||
1249 (c
= ip6mr_cache_alloc_unres()) == NULL
) {
1250 spin_unlock_bh(&mfc_unres_lock
);
1257 * Fill in the new cache entry
1259 c
->mf6c_parent
= -1;
1260 c
->mf6c_origin
= ipv6_hdr(skb
)->saddr
;
1261 c
->mf6c_mcastgrp
= ipv6_hdr(skb
)->daddr
;
1264 * Reflect first query at pim6sd
1266 err
= ip6mr_cache_report(mrt
, skb
, mifi
, MRT6MSG_NOCACHE
);
1268 /* If the report failed throw the cache entry
1271 spin_unlock_bh(&mfc_unres_lock
);
1273 ip6mr_cache_free(c
);
1278 atomic_inc(&mrt
->cache_resolve_queue_len
);
1279 list_add(&c
->list
, &mrt
->mfc6_unres_queue
);
1280 mr6_netlink_event(mrt
, c
, RTM_NEWROUTE
);
1282 ipmr_do_expire_process(mrt
);
1286 * See if we can append the packet
1288 if (c
->mfc_un
.unres
.unresolved
.qlen
> 3) {
1292 skb_queue_tail(&c
->mfc_un
.unres
.unresolved
, skb
);
1296 spin_unlock_bh(&mfc_unres_lock
);
1301 * MFC6 cache manipulation by user space
1304 static int ip6mr_mfc_delete(struct mr6_table
*mrt
, struct mf6cctl
*mfc
,
1308 struct mfc6_cache
*c
, *next
;
1310 line
= MFC6_HASH(&mfc
->mf6cc_mcastgrp
.sin6_addr
, &mfc
->mf6cc_origin
.sin6_addr
);
1312 list_for_each_entry_safe(c
, next
, &mrt
->mfc6_cache_array
[line
], list
) {
1313 if (ipv6_addr_equal(&c
->mf6c_origin
, &mfc
->mf6cc_origin
.sin6_addr
) &&
1314 ipv6_addr_equal(&c
->mf6c_mcastgrp
,
1315 &mfc
->mf6cc_mcastgrp
.sin6_addr
) &&
1316 (parent
== -1 || parent
== c
->mf6c_parent
)) {
1317 write_lock_bh(&mrt_lock
);
1319 write_unlock_bh(&mrt_lock
);
1321 mr6_netlink_event(mrt
, c
, RTM_DELROUTE
);
1322 ip6mr_cache_free(c
);
1329 static int ip6mr_device_event(struct notifier_block
*this,
1330 unsigned long event
, void *ptr
)
1332 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1333 struct net
*net
= dev_net(dev
);
1334 struct mr6_table
*mrt
;
1335 struct mif_device
*v
;
1338 if (event
!= NETDEV_UNREGISTER
)
1341 ip6mr_for_each_table(mrt
, net
) {
1342 v
= &mrt
->vif6_table
[0];
1343 for (ct
= 0; ct
< mrt
->maxvif
; ct
++, v
++) {
1345 mif6_delete(mrt
, ct
, 1, NULL
);
1352 static struct notifier_block ip6_mr_notifier
= {
1353 .notifier_call
= ip6mr_device_event
1357 * Setup for IP multicast routing
1360 static int __net_init
ip6mr_net_init(struct net
*net
)
1364 err
= ip6mr_rules_init(net
);
1368 #ifdef CONFIG_PROC_FS
1370 if (!proc_create("ip6_mr_vif", 0, net
->proc_net
, &ip6mr_vif_fops
))
1372 if (!proc_create("ip6_mr_cache", 0, net
->proc_net
, &ip6mr_mfc_fops
))
1373 goto proc_cache_fail
;
1378 #ifdef CONFIG_PROC_FS
1380 remove_proc_entry("ip6_mr_vif", net
->proc_net
);
1382 ip6mr_rules_exit(net
);
1388 static void __net_exit
ip6mr_net_exit(struct net
*net
)
1390 #ifdef CONFIG_PROC_FS
1391 remove_proc_entry("ip6_mr_cache", net
->proc_net
);
1392 remove_proc_entry("ip6_mr_vif", net
->proc_net
);
1394 ip6mr_rules_exit(net
);
1397 static struct pernet_operations ip6mr_net_ops
= {
1398 .init
= ip6mr_net_init
,
1399 .exit
= ip6mr_net_exit
,
1402 int __init
ip6_mr_init(void)
1406 mrt_cachep
= kmem_cache_create("ip6_mrt_cache",
1407 sizeof(struct mfc6_cache
),
1408 0, SLAB_HWCACHE_ALIGN
,
1413 err
= register_pernet_subsys(&ip6mr_net_ops
);
1415 goto reg_pernet_fail
;
1417 err
= register_netdevice_notifier(&ip6_mr_notifier
);
1419 goto reg_notif_fail
;
1420 #ifdef CONFIG_IPV6_PIMSM_V2
1421 if (inet6_add_protocol(&pim6_protocol
, IPPROTO_PIM
) < 0) {
1422 pr_err("%s: can't add PIM protocol\n", __func__
);
1424 goto add_proto_fail
;
1427 err
= rtnl_register_module(THIS_MODULE
, RTNL_FAMILY_IP6MR
, RTM_GETROUTE
,
1428 NULL
, ip6mr_rtm_dumproute
, 0);
1432 #ifdef CONFIG_IPV6_PIMSM_V2
1433 inet6_del_protocol(&pim6_protocol
, IPPROTO_PIM
);
1435 unregister_netdevice_notifier(&ip6_mr_notifier
);
1438 unregister_pernet_subsys(&ip6mr_net_ops
);
1440 kmem_cache_destroy(mrt_cachep
);
1444 void ip6_mr_cleanup(void)
1446 rtnl_unregister(RTNL_FAMILY_IP6MR
, RTM_GETROUTE
);
1447 #ifdef CONFIG_IPV6_PIMSM_V2
1448 inet6_del_protocol(&pim6_protocol
, IPPROTO_PIM
);
1450 unregister_netdevice_notifier(&ip6_mr_notifier
);
1451 unregister_pernet_subsys(&ip6mr_net_ops
);
1452 kmem_cache_destroy(mrt_cachep
);
1455 static int ip6mr_mfc_add(struct net
*net
, struct mr6_table
*mrt
,
1456 struct mf6cctl
*mfc
, int mrtsock
, int parent
)
1460 struct mfc6_cache
*uc
, *c
;
1461 unsigned char ttls
[MAXMIFS
];
1464 if (mfc
->mf6cc_parent
>= MAXMIFS
)
1467 memset(ttls
, 255, MAXMIFS
);
1468 for (i
= 0; i
< MAXMIFS
; i
++) {
1469 if (IF_ISSET(i
, &mfc
->mf6cc_ifset
))
1474 line
= MFC6_HASH(&mfc
->mf6cc_mcastgrp
.sin6_addr
, &mfc
->mf6cc_origin
.sin6_addr
);
1476 list_for_each_entry(c
, &mrt
->mfc6_cache_array
[line
], list
) {
1477 if (ipv6_addr_equal(&c
->mf6c_origin
, &mfc
->mf6cc_origin
.sin6_addr
) &&
1478 ipv6_addr_equal(&c
->mf6c_mcastgrp
,
1479 &mfc
->mf6cc_mcastgrp
.sin6_addr
) &&
1480 (parent
== -1 || parent
== mfc
->mf6cc_parent
)) {
1487 write_lock_bh(&mrt_lock
);
1488 c
->mf6c_parent
= mfc
->mf6cc_parent
;
1489 ip6mr_update_thresholds(mrt
, c
, ttls
);
1491 c
->mfc_flags
|= MFC_STATIC
;
1492 write_unlock_bh(&mrt_lock
);
1493 mr6_netlink_event(mrt
, c
, RTM_NEWROUTE
);
1497 if (!ipv6_addr_any(&mfc
->mf6cc_mcastgrp
.sin6_addr
) &&
1498 !ipv6_addr_is_multicast(&mfc
->mf6cc_mcastgrp
.sin6_addr
))
1501 c
= ip6mr_cache_alloc();
1505 c
->mf6c_origin
= mfc
->mf6cc_origin
.sin6_addr
;
1506 c
->mf6c_mcastgrp
= mfc
->mf6cc_mcastgrp
.sin6_addr
;
1507 c
->mf6c_parent
= mfc
->mf6cc_parent
;
1508 ip6mr_update_thresholds(mrt
, c
, ttls
);
1510 c
->mfc_flags
|= MFC_STATIC
;
1512 write_lock_bh(&mrt_lock
);
1513 list_add(&c
->list
, &mrt
->mfc6_cache_array
[line
]);
1514 write_unlock_bh(&mrt_lock
);
1517 * Check to see if we resolved a queued list. If so we
1518 * need to send on the frames and tidy up.
1521 spin_lock_bh(&mfc_unres_lock
);
1522 list_for_each_entry(uc
, &mrt
->mfc6_unres_queue
, list
) {
1523 if (ipv6_addr_equal(&uc
->mf6c_origin
, &c
->mf6c_origin
) &&
1524 ipv6_addr_equal(&uc
->mf6c_mcastgrp
, &c
->mf6c_mcastgrp
)) {
1525 list_del(&uc
->list
);
1526 atomic_dec(&mrt
->cache_resolve_queue_len
);
1531 if (list_empty(&mrt
->mfc6_unres_queue
))
1532 del_timer(&mrt
->ipmr_expire_timer
);
1533 spin_unlock_bh(&mfc_unres_lock
);
1536 ip6mr_cache_resolve(net
, mrt
, uc
, c
);
1537 ip6mr_cache_free(uc
);
1539 mr6_netlink_event(mrt
, c
, RTM_NEWROUTE
);
1544 * Close the multicast socket, and clear the vif tables etc
1547 static void mroute_clean_tables(struct mr6_table
*mrt
, bool all
)
1551 struct mfc6_cache
*c
, *next
;
1554 * Shut down all active vif entries
1556 for (i
= 0; i
< mrt
->maxvif
; i
++) {
1557 if (!all
&& (mrt
->vif6_table
[i
].flags
& VIFF_STATIC
))
1559 mif6_delete(mrt
, i
, 0, &list
);
1561 unregister_netdevice_many(&list
);
1566 for (i
= 0; i
< MFC6_LINES
; i
++) {
1567 list_for_each_entry_safe(c
, next
, &mrt
->mfc6_cache_array
[i
], list
) {
1568 if (!all
&& (c
->mfc_flags
& MFC_STATIC
))
1570 write_lock_bh(&mrt_lock
);
1572 write_unlock_bh(&mrt_lock
);
1574 mr6_netlink_event(mrt
, c
, RTM_DELROUTE
);
1575 ip6mr_cache_free(c
);
1579 if (atomic_read(&mrt
->cache_resolve_queue_len
) != 0) {
1580 spin_lock_bh(&mfc_unres_lock
);
1581 list_for_each_entry_safe(c
, next
, &mrt
->mfc6_unres_queue
, list
) {
1583 mr6_netlink_event(mrt
, c
, RTM_DELROUTE
);
1584 ip6mr_destroy_unres(mrt
, c
);
1586 spin_unlock_bh(&mfc_unres_lock
);
1590 static int ip6mr_sk_init(struct mr6_table
*mrt
, struct sock
*sk
)
1593 struct net
*net
= sock_net(sk
);
1596 write_lock_bh(&mrt_lock
);
1597 if (likely(mrt
->mroute6_sk
== NULL
)) {
1598 mrt
->mroute6_sk
= sk
;
1599 net
->ipv6
.devconf_all
->mc_forwarding
++;
1603 write_unlock_bh(&mrt_lock
);
1606 inet6_netconf_notify_devconf(net
, RTM_NEWNETCONF
,
1607 NETCONFA_MC_FORWARDING
,
1608 NETCONFA_IFINDEX_ALL
,
1609 net
->ipv6
.devconf_all
);
1615 int ip6mr_sk_done(struct sock
*sk
)
1618 struct net
*net
= sock_net(sk
);
1619 struct mr6_table
*mrt
;
1621 if (sk
->sk_type
!= SOCK_RAW
||
1622 inet_sk(sk
)->inet_num
!= IPPROTO_ICMPV6
)
1626 ip6mr_for_each_table(mrt
, net
) {
1627 if (sk
== mrt
->mroute6_sk
) {
1628 write_lock_bh(&mrt_lock
);
1629 mrt
->mroute6_sk
= NULL
;
1630 net
->ipv6
.devconf_all
->mc_forwarding
--;
1631 write_unlock_bh(&mrt_lock
);
1632 inet6_netconf_notify_devconf(net
, RTM_NEWNETCONF
,
1633 NETCONFA_MC_FORWARDING
,
1634 NETCONFA_IFINDEX_ALL
,
1635 net
->ipv6
.devconf_all
);
1637 mroute_clean_tables(mrt
, false);
1647 struct sock
*mroute6_socket(struct net
*net
, struct sk_buff
*skb
)
1649 struct mr6_table
*mrt
;
1650 struct flowi6 fl6
= {
1651 .flowi6_iif
= skb
->skb_iif
? : LOOPBACK_IFINDEX
,
1652 .flowi6_oif
= skb
->dev
->ifindex
,
1653 .flowi6_mark
= skb
->mark
,
1656 if (ip6mr_fib_lookup(net
, &fl6
, &mrt
) < 0)
1659 return mrt
->mroute6_sk
;
1663 * Socket options and virtual interface manipulation. The whole
1664 * virtual interface system is a complete heap, but unfortunately
1665 * that's how BSD mrouted happens to think. Maybe one day with a proper
1666 * MOSPF/PIM router set up we can clean this up.
1669 int ip6_mroute_setsockopt(struct sock
*sk
, int optname
, char __user
*optval
, unsigned int optlen
)
1671 int ret
, parent
= 0;
1675 struct net
*net
= sock_net(sk
);
1676 struct mr6_table
*mrt
;
1678 if (sk
->sk_type
!= SOCK_RAW
||
1679 inet_sk(sk
)->inet_num
!= IPPROTO_ICMPV6
)
1682 mrt
= ip6mr_get_table(net
, raw6_sk(sk
)->ip6mr_table
? : RT6_TABLE_DFLT
);
1686 if (optname
!= MRT6_INIT
) {
1687 if (sk
!= mrt
->mroute6_sk
&& !ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1693 if (optlen
< sizeof(int))
1696 return ip6mr_sk_init(mrt
, sk
);
1699 return ip6mr_sk_done(sk
);
1702 if (optlen
< sizeof(vif
))
1704 if (copy_from_user(&vif
, optval
, sizeof(vif
)))
1706 if (vif
.mif6c_mifi
>= MAXMIFS
)
1709 ret
= mif6_add(net
, mrt
, &vif
, sk
== mrt
->mroute6_sk
);
1714 if (optlen
< sizeof(mifi_t
))
1716 if (copy_from_user(&mifi
, optval
, sizeof(mifi_t
)))
1719 ret
= mif6_delete(mrt
, mifi
, 0, NULL
);
1724 * Manipulate the forwarding caches. These live
1725 * in a sort of kernel/user symbiosis.
1731 case MRT6_ADD_MFC_PROXY
:
1732 case MRT6_DEL_MFC_PROXY
:
1733 if (optlen
< sizeof(mfc
))
1735 if (copy_from_user(&mfc
, optval
, sizeof(mfc
)))
1738 parent
= mfc
.mf6cc_parent
;
1740 if (optname
== MRT6_DEL_MFC
|| optname
== MRT6_DEL_MFC_PROXY
)
1741 ret
= ip6mr_mfc_delete(mrt
, &mfc
, parent
);
1743 ret
= ip6mr_mfc_add(net
, mrt
, &mfc
,
1744 sk
== mrt
->mroute6_sk
, parent
);
1749 * Control PIM assert (to activate pim will activate assert)
1755 if (optlen
!= sizeof(v
))
1757 if (get_user(v
, (int __user
*)optval
))
1759 mrt
->mroute_do_assert
= v
;
1763 #ifdef CONFIG_IPV6_PIMSM_V2
1768 if (optlen
!= sizeof(v
))
1770 if (get_user(v
, (int __user
*)optval
))
1775 if (v
!= mrt
->mroute_do_pim
) {
1776 mrt
->mroute_do_pim
= v
;
1777 mrt
->mroute_do_assert
= v
;
1784 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1789 if (optlen
!= sizeof(u32
))
1791 if (get_user(v
, (u32 __user
*)optval
))
1793 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1794 if (v
!= RT_TABLE_DEFAULT
&& v
>= 100000000)
1796 if (sk
== mrt
->mroute6_sk
)
1801 if (!ip6mr_new_table(net
, v
))
1803 raw6_sk(sk
)->ip6mr_table
= v
;
1809 * Spurious command, or MRT6_VERSION which you cannot
1813 return -ENOPROTOOPT
;
1818 * Getsock opt support for the multicast routing system.
1821 int ip6_mroute_getsockopt(struct sock
*sk
, int optname
, char __user
*optval
,
1826 struct net
*net
= sock_net(sk
);
1827 struct mr6_table
*mrt
;
1829 if (sk
->sk_type
!= SOCK_RAW
||
1830 inet_sk(sk
)->inet_num
!= IPPROTO_ICMPV6
)
1833 mrt
= ip6mr_get_table(net
, raw6_sk(sk
)->ip6mr_table
? : RT6_TABLE_DFLT
);
1841 #ifdef CONFIG_IPV6_PIMSM_V2
1843 val
= mrt
->mroute_do_pim
;
1847 val
= mrt
->mroute_do_assert
;
1850 return -ENOPROTOOPT
;
1853 if (get_user(olr
, optlen
))
1856 olr
= min_t(int, olr
, sizeof(int));
1860 if (put_user(olr
, optlen
))
1862 if (copy_to_user(optval
, &val
, olr
))
1868 * The IP multicast ioctl support routines.
1871 int ip6mr_ioctl(struct sock
*sk
, int cmd
, void __user
*arg
)
1873 struct sioc_sg_req6 sr
;
1874 struct sioc_mif_req6 vr
;
1875 struct mif_device
*vif
;
1876 struct mfc6_cache
*c
;
1877 struct net
*net
= sock_net(sk
);
1878 struct mr6_table
*mrt
;
1880 mrt
= ip6mr_get_table(net
, raw6_sk(sk
)->ip6mr_table
? : RT6_TABLE_DFLT
);
1885 case SIOCGETMIFCNT_IN6
:
1886 if (copy_from_user(&vr
, arg
, sizeof(vr
)))
1888 if (vr
.mifi
>= mrt
->maxvif
)
1890 read_lock(&mrt_lock
);
1891 vif
= &mrt
->vif6_table
[vr
.mifi
];
1892 if (MIF_EXISTS(mrt
, vr
.mifi
)) {
1893 vr
.icount
= vif
->pkt_in
;
1894 vr
.ocount
= vif
->pkt_out
;
1895 vr
.ibytes
= vif
->bytes_in
;
1896 vr
.obytes
= vif
->bytes_out
;
1897 read_unlock(&mrt_lock
);
1899 if (copy_to_user(arg
, &vr
, sizeof(vr
)))
1903 read_unlock(&mrt_lock
);
1904 return -EADDRNOTAVAIL
;
1905 case SIOCGETSGCNT_IN6
:
1906 if (copy_from_user(&sr
, arg
, sizeof(sr
)))
1909 read_lock(&mrt_lock
);
1910 c
= ip6mr_cache_find(mrt
, &sr
.src
.sin6_addr
, &sr
.grp
.sin6_addr
);
1912 sr
.pktcnt
= c
->mfc_un
.res
.pkt
;
1913 sr
.bytecnt
= c
->mfc_un
.res
.bytes
;
1914 sr
.wrong_if
= c
->mfc_un
.res
.wrong_if
;
1915 read_unlock(&mrt_lock
);
1917 if (copy_to_user(arg
, &sr
, sizeof(sr
)))
1921 read_unlock(&mrt_lock
);
1922 return -EADDRNOTAVAIL
;
1924 return -ENOIOCTLCMD
;
1928 #ifdef CONFIG_COMPAT
1929 struct compat_sioc_sg_req6
{
1930 struct sockaddr_in6 src
;
1931 struct sockaddr_in6 grp
;
1932 compat_ulong_t pktcnt
;
1933 compat_ulong_t bytecnt
;
1934 compat_ulong_t wrong_if
;
1937 struct compat_sioc_mif_req6
{
1939 compat_ulong_t icount
;
1940 compat_ulong_t ocount
;
1941 compat_ulong_t ibytes
;
1942 compat_ulong_t obytes
;
1945 int ip6mr_compat_ioctl(struct sock
*sk
, unsigned int cmd
, void __user
*arg
)
1947 struct compat_sioc_sg_req6 sr
;
1948 struct compat_sioc_mif_req6 vr
;
1949 struct mif_device
*vif
;
1950 struct mfc6_cache
*c
;
1951 struct net
*net
= sock_net(sk
);
1952 struct mr6_table
*mrt
;
1954 mrt
= ip6mr_get_table(net
, raw6_sk(sk
)->ip6mr_table
? : RT6_TABLE_DFLT
);
1959 case SIOCGETMIFCNT_IN6
:
1960 if (copy_from_user(&vr
, arg
, sizeof(vr
)))
1962 if (vr
.mifi
>= mrt
->maxvif
)
1964 read_lock(&mrt_lock
);
1965 vif
= &mrt
->vif6_table
[vr
.mifi
];
1966 if (MIF_EXISTS(mrt
, vr
.mifi
)) {
1967 vr
.icount
= vif
->pkt_in
;
1968 vr
.ocount
= vif
->pkt_out
;
1969 vr
.ibytes
= vif
->bytes_in
;
1970 vr
.obytes
= vif
->bytes_out
;
1971 read_unlock(&mrt_lock
);
1973 if (copy_to_user(arg
, &vr
, sizeof(vr
)))
1977 read_unlock(&mrt_lock
);
1978 return -EADDRNOTAVAIL
;
1979 case SIOCGETSGCNT_IN6
:
1980 if (copy_from_user(&sr
, arg
, sizeof(sr
)))
1983 read_lock(&mrt_lock
);
1984 c
= ip6mr_cache_find(mrt
, &sr
.src
.sin6_addr
, &sr
.grp
.sin6_addr
);
1986 sr
.pktcnt
= c
->mfc_un
.res
.pkt
;
1987 sr
.bytecnt
= c
->mfc_un
.res
.bytes
;
1988 sr
.wrong_if
= c
->mfc_un
.res
.wrong_if
;
1989 read_unlock(&mrt_lock
);
1991 if (copy_to_user(arg
, &sr
, sizeof(sr
)))
1995 read_unlock(&mrt_lock
);
1996 return -EADDRNOTAVAIL
;
1998 return -ENOIOCTLCMD
;
2003 static inline int ip6mr_forward2_finish(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
2005 __IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
2006 IPSTATS_MIB_OUTFORWDATAGRAMS
);
2007 __IP6_ADD_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
2008 IPSTATS_MIB_OUTOCTETS
, skb
->len
);
2009 return dst_output(net
, sk
, skb
);
2013 * Processing handlers for ip6mr_forward
2016 static int ip6mr_forward2(struct net
*net
, struct mr6_table
*mrt
,
2017 struct sk_buff
*skb
, struct mfc6_cache
*c
, int vifi
)
2019 struct ipv6hdr
*ipv6h
;
2020 struct mif_device
*vif
= &mrt
->vif6_table
[vifi
];
2021 struct net_device
*dev
;
2022 struct dst_entry
*dst
;
2028 #ifdef CONFIG_IPV6_PIMSM_V2
2029 if (vif
->flags
& MIFF_REGISTER
) {
2031 vif
->bytes_out
+= skb
->len
;
2032 vif
->dev
->stats
.tx_bytes
+= skb
->len
;
2033 vif
->dev
->stats
.tx_packets
++;
2034 ip6mr_cache_report(mrt
, skb
, vifi
, MRT6MSG_WHOLEPKT
);
2039 ipv6h
= ipv6_hdr(skb
);
2041 fl6
= (struct flowi6
) {
2042 .flowi6_oif
= vif
->link
,
2043 .daddr
= ipv6h
->daddr
,
2046 dst
= ip6_route_output(net
, NULL
, &fl6
);
2053 skb_dst_set(skb
, dst
);
2056 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2057 * not only before forwarding, but after forwarding on all output
2058 * interfaces. It is clear, if mrouter runs a multicasting
2059 * program, it should receive packets not depending to what interface
2060 * program is joined.
2061 * If we will not make it, the program will have to join on all
2062 * interfaces. On the other hand, multihoming host (or router, but
2063 * not mrouter) cannot join to more than one interface - it will
2064 * result in receiving multiple packets.
2069 vif
->bytes_out
+= skb
->len
;
2071 /* We are about to write */
2072 /* XXX: extension headers? */
2073 if (skb_cow(skb
, sizeof(*ipv6h
) + LL_RESERVED_SPACE(dev
)))
2076 ipv6h
= ipv6_hdr(skb
);
2079 IP6CB(skb
)->flags
|= IP6SKB_FORWARDED
;
2081 return NF_HOOK(NFPROTO_IPV6
, NF_INET_FORWARD
,
2082 net
, NULL
, skb
, skb
->dev
, dev
,
2083 ip6mr_forward2_finish
);
2090 static int ip6mr_find_vif(struct mr6_table
*mrt
, struct net_device
*dev
)
2094 for (ct
= mrt
->maxvif
- 1; ct
>= 0; ct
--) {
2095 if (mrt
->vif6_table
[ct
].dev
== dev
)
2101 static void ip6_mr_forward(struct net
*net
, struct mr6_table
*mrt
,
2102 struct sk_buff
*skb
, struct mfc6_cache
*cache
)
2106 int true_vifi
= ip6mr_find_vif(mrt
, skb
->dev
);
2108 vif
= cache
->mf6c_parent
;
2109 cache
->mfc_un
.res
.pkt
++;
2110 cache
->mfc_un
.res
.bytes
+= skb
->len
;
2111 cache
->mfc_un
.res
.lastuse
= jiffies
;
2113 if (ipv6_addr_any(&cache
->mf6c_origin
) && true_vifi
>= 0) {
2114 struct mfc6_cache
*cache_proxy
;
2116 /* For an (*,G) entry, we only check that the incoming
2117 * interface is part of the static tree.
2119 cache_proxy
= ip6mr_cache_find_any_parent(mrt
, vif
);
2121 cache_proxy
->mfc_un
.res
.ttls
[true_vifi
] < 255)
2126 * Wrong interface: drop packet and (maybe) send PIM assert.
2128 if (mrt
->vif6_table
[vif
].dev
!= skb
->dev
) {
2129 cache
->mfc_un
.res
.wrong_if
++;
2131 if (true_vifi
>= 0 && mrt
->mroute_do_assert
&&
2132 /* pimsm uses asserts, when switching from RPT to SPT,
2133 so that we cannot check that packet arrived on an oif.
2134 It is bad, but otherwise we would need to move pretty
2135 large chunk of pimd to kernel. Ough... --ANK
2137 (mrt
->mroute_do_pim
||
2138 cache
->mfc_un
.res
.ttls
[true_vifi
] < 255) &&
2140 cache
->mfc_un
.res
.last_assert
+ MFC_ASSERT_THRESH
)) {
2141 cache
->mfc_un
.res
.last_assert
= jiffies
;
2142 ip6mr_cache_report(mrt
, skb
, true_vifi
, MRT6MSG_WRONGMIF
);
2148 mrt
->vif6_table
[vif
].pkt_in
++;
2149 mrt
->vif6_table
[vif
].bytes_in
+= skb
->len
;
2154 if (ipv6_addr_any(&cache
->mf6c_origin
) &&
2155 ipv6_addr_any(&cache
->mf6c_mcastgrp
)) {
2156 if (true_vifi
>= 0 &&
2157 true_vifi
!= cache
->mf6c_parent
&&
2158 ipv6_hdr(skb
)->hop_limit
>
2159 cache
->mfc_un
.res
.ttls
[cache
->mf6c_parent
]) {
2160 /* It's an (*,*) entry and the packet is not coming from
2161 * the upstream: forward the packet to the upstream
2164 psend
= cache
->mf6c_parent
;
2169 for (ct
= cache
->mfc_un
.res
.maxvif
- 1; ct
>= cache
->mfc_un
.res
.minvif
; ct
--) {
2170 /* For (*,G) entry, don't forward to the incoming interface */
2171 if ((!ipv6_addr_any(&cache
->mf6c_origin
) || ct
!= true_vifi
) &&
2172 ipv6_hdr(skb
)->hop_limit
> cache
->mfc_un
.res
.ttls
[ct
]) {
2174 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
2176 ip6mr_forward2(net
, mrt
, skb2
, cache
, psend
);
2183 ip6mr_forward2(net
, mrt
, skb
, cache
, psend
);
2193 * Multicast packets for forwarding arrive here
2196 int ip6_mr_input(struct sk_buff
*skb
)
2198 struct mfc6_cache
*cache
;
2199 struct net
*net
= dev_net(skb
->dev
);
2200 struct mr6_table
*mrt
;
2201 struct flowi6 fl6
= {
2202 .flowi6_iif
= skb
->dev
->ifindex
,
2203 .flowi6_mark
= skb
->mark
,
2207 err
= ip6mr_fib_lookup(net
, &fl6
, &mrt
);
2213 read_lock(&mrt_lock
);
2214 cache
= ip6mr_cache_find(mrt
,
2215 &ipv6_hdr(skb
)->saddr
, &ipv6_hdr(skb
)->daddr
);
2217 int vif
= ip6mr_find_vif(mrt
, skb
->dev
);
2220 cache
= ip6mr_cache_find_any(mrt
,
2221 &ipv6_hdr(skb
)->daddr
,
2226 * No usable cache entry
2231 vif
= ip6mr_find_vif(mrt
, skb
->dev
);
2233 int err
= ip6mr_cache_unresolved(mrt
, vif
, skb
);
2234 read_unlock(&mrt_lock
);
2238 read_unlock(&mrt_lock
);
2243 ip6_mr_forward(net
, mrt
, skb
, cache
);
2245 read_unlock(&mrt_lock
);
2251 static int __ip6mr_fill_mroute(struct mr6_table
*mrt
, struct sk_buff
*skb
,
2252 struct mfc6_cache
*c
, struct rtmsg
*rtm
)
2254 struct rta_mfc_stats mfcs
;
2255 struct nlattr
*mp_attr
;
2256 struct rtnexthop
*nhp
;
2257 unsigned long lastuse
;
2260 /* If cache is unresolved, don't try to parse IIF and OIF */
2261 if (c
->mf6c_parent
>= MAXMIFS
) {
2262 rtm
->rtm_flags
|= RTNH_F_UNRESOLVED
;
2266 if (MIF_EXISTS(mrt
, c
->mf6c_parent
) &&
2267 nla_put_u32(skb
, RTA_IIF
, mrt
->vif6_table
[c
->mf6c_parent
].dev
->ifindex
) < 0)
2269 mp_attr
= nla_nest_start(skb
, RTA_MULTIPATH
);
2273 for (ct
= c
->mfc_un
.res
.minvif
; ct
< c
->mfc_un
.res
.maxvif
; ct
++) {
2274 if (MIF_EXISTS(mrt
, ct
) && c
->mfc_un
.res
.ttls
[ct
] < 255) {
2275 nhp
= nla_reserve_nohdr(skb
, sizeof(*nhp
));
2277 nla_nest_cancel(skb
, mp_attr
);
2281 nhp
->rtnh_flags
= 0;
2282 nhp
->rtnh_hops
= c
->mfc_un
.res
.ttls
[ct
];
2283 nhp
->rtnh_ifindex
= mrt
->vif6_table
[ct
].dev
->ifindex
;
2284 nhp
->rtnh_len
= sizeof(*nhp
);
2288 nla_nest_end(skb
, mp_attr
);
2290 lastuse
= READ_ONCE(c
->mfc_un
.res
.lastuse
);
2291 lastuse
= time_after_eq(jiffies
, lastuse
) ? jiffies
- lastuse
: 0;
2293 mfcs
.mfcs_packets
= c
->mfc_un
.res
.pkt
;
2294 mfcs
.mfcs_bytes
= c
->mfc_un
.res
.bytes
;
2295 mfcs
.mfcs_wrong_if
= c
->mfc_un
.res
.wrong_if
;
2296 if (nla_put_64bit(skb
, RTA_MFC_STATS
, sizeof(mfcs
), &mfcs
, RTA_PAD
) ||
2297 nla_put_u64_64bit(skb
, RTA_EXPIRES
, jiffies_to_clock_t(lastuse
),
2301 rtm
->rtm_type
= RTN_MULTICAST
;
2305 int ip6mr_get_route(struct net
*net
, struct sk_buff
*skb
, struct rtmsg
*rtm
,
2309 struct mr6_table
*mrt
;
2310 struct mfc6_cache
*cache
;
2311 struct rt6_info
*rt
= (struct rt6_info
*)skb_dst(skb
);
2313 mrt
= ip6mr_get_table(net
, RT6_TABLE_DFLT
);
2317 read_lock(&mrt_lock
);
2318 cache
= ip6mr_cache_find(mrt
, &rt
->rt6i_src
.addr
, &rt
->rt6i_dst
.addr
);
2319 if (!cache
&& skb
->dev
) {
2320 int vif
= ip6mr_find_vif(mrt
, skb
->dev
);
2323 cache
= ip6mr_cache_find_any(mrt
, &rt
->rt6i_dst
.addr
,
2328 struct sk_buff
*skb2
;
2329 struct ipv6hdr
*iph
;
2330 struct net_device
*dev
;
2334 if (!dev
|| (vif
= ip6mr_find_vif(mrt
, dev
)) < 0) {
2335 read_unlock(&mrt_lock
);
2339 /* really correct? */
2340 skb2
= alloc_skb(sizeof(struct ipv6hdr
), GFP_ATOMIC
);
2342 read_unlock(&mrt_lock
);
2346 NETLINK_CB(skb2
).portid
= portid
;
2347 skb_reset_transport_header(skb2
);
2349 skb_put(skb2
, sizeof(struct ipv6hdr
));
2350 skb_reset_network_header(skb2
);
2352 iph
= ipv6_hdr(skb2
);
2355 iph
->flow_lbl
[0] = 0;
2356 iph
->flow_lbl
[1] = 0;
2357 iph
->flow_lbl
[2] = 0;
2358 iph
->payload_len
= 0;
2359 iph
->nexthdr
= IPPROTO_NONE
;
2361 iph
->saddr
= rt
->rt6i_src
.addr
;
2362 iph
->daddr
= rt
->rt6i_dst
.addr
;
2364 err
= ip6mr_cache_unresolved(mrt
, vif
, skb2
);
2365 read_unlock(&mrt_lock
);
2370 if (rtm
->rtm_flags
& RTM_F_NOTIFY
)
2371 cache
->mfc_flags
|= MFC_NOTIFY
;
2373 err
= __ip6mr_fill_mroute(mrt
, skb
, cache
, rtm
);
2374 read_unlock(&mrt_lock
);
2378 static int ip6mr_fill_mroute(struct mr6_table
*mrt
, struct sk_buff
*skb
,
2379 u32 portid
, u32 seq
, struct mfc6_cache
*c
, int cmd
,
2382 struct nlmsghdr
*nlh
;
2386 nlh
= nlmsg_put(skb
, portid
, seq
, cmd
, sizeof(*rtm
), flags
);
2390 rtm
= nlmsg_data(nlh
);
2391 rtm
->rtm_family
= RTNL_FAMILY_IP6MR
;
2392 rtm
->rtm_dst_len
= 128;
2393 rtm
->rtm_src_len
= 128;
2395 rtm
->rtm_table
= mrt
->id
;
2396 if (nla_put_u32(skb
, RTA_TABLE
, mrt
->id
))
2397 goto nla_put_failure
;
2398 rtm
->rtm_type
= RTN_MULTICAST
;
2399 rtm
->rtm_scope
= RT_SCOPE_UNIVERSE
;
2400 if (c
->mfc_flags
& MFC_STATIC
)
2401 rtm
->rtm_protocol
= RTPROT_STATIC
;
2403 rtm
->rtm_protocol
= RTPROT_MROUTED
;
2406 if (nla_put_in6_addr(skb
, RTA_SRC
, &c
->mf6c_origin
) ||
2407 nla_put_in6_addr(skb
, RTA_DST
, &c
->mf6c_mcastgrp
))
2408 goto nla_put_failure
;
2409 err
= __ip6mr_fill_mroute(mrt
, skb
, c
, rtm
);
2410 /* do not break the dump if cache is unresolved */
2411 if (err
< 0 && err
!= -ENOENT
)
2412 goto nla_put_failure
;
2414 nlmsg_end(skb
, nlh
);
2418 nlmsg_cancel(skb
, nlh
);
2422 static int mr6_msgsize(bool unresolved
, int maxvif
)
2425 NLMSG_ALIGN(sizeof(struct rtmsg
))
2426 + nla_total_size(4) /* RTA_TABLE */
2427 + nla_total_size(sizeof(struct in6_addr
)) /* RTA_SRC */
2428 + nla_total_size(sizeof(struct in6_addr
)) /* RTA_DST */
2433 + nla_total_size(4) /* RTA_IIF */
2434 + nla_total_size(0) /* RTA_MULTIPATH */
2435 + maxvif
* NLA_ALIGN(sizeof(struct rtnexthop
))
2437 + nla_total_size_64bit(sizeof(struct rta_mfc_stats
))
2443 static void mr6_netlink_event(struct mr6_table
*mrt
, struct mfc6_cache
*mfc
,
2446 struct net
*net
= read_pnet(&mrt
->net
);
2447 struct sk_buff
*skb
;
2450 skb
= nlmsg_new(mr6_msgsize(mfc
->mf6c_parent
>= MAXMIFS
, mrt
->maxvif
),
2455 err
= ip6mr_fill_mroute(mrt
, skb
, 0, 0, mfc
, cmd
, 0);
2459 rtnl_notify(skb
, net
, 0, RTNLGRP_IPV6_MROUTE
, NULL
, GFP_ATOMIC
);
2465 rtnl_set_sk_err(net
, RTNLGRP_IPV6_MROUTE
, err
);
2468 static size_t mrt6msg_netlink_msgsize(size_t payloadlen
)
2471 NLMSG_ALIGN(sizeof(struct rtgenmsg
))
2472 + nla_total_size(1) /* IP6MRA_CREPORT_MSGTYPE */
2473 + nla_total_size(4) /* IP6MRA_CREPORT_MIF_ID */
2474 /* IP6MRA_CREPORT_SRC_ADDR */
2475 + nla_total_size(sizeof(struct in6_addr
))
2476 /* IP6MRA_CREPORT_DST_ADDR */
2477 + nla_total_size(sizeof(struct in6_addr
))
2478 /* IP6MRA_CREPORT_PKT */
2479 + nla_total_size(payloadlen
)
2485 static void mrt6msg_netlink_event(struct mr6_table
*mrt
, struct sk_buff
*pkt
)
2487 struct net
*net
= read_pnet(&mrt
->net
);
2488 struct nlmsghdr
*nlh
;
2489 struct rtgenmsg
*rtgenm
;
2490 struct mrt6msg
*msg
;
2491 struct sk_buff
*skb
;
2495 payloadlen
= pkt
->len
- sizeof(struct mrt6msg
);
2496 msg
= (struct mrt6msg
*)skb_transport_header(pkt
);
2498 skb
= nlmsg_new(mrt6msg_netlink_msgsize(payloadlen
), GFP_ATOMIC
);
2502 nlh
= nlmsg_put(skb
, 0, 0, RTM_NEWCACHEREPORT
,
2503 sizeof(struct rtgenmsg
), 0);
2506 rtgenm
= nlmsg_data(nlh
);
2507 rtgenm
->rtgen_family
= RTNL_FAMILY_IP6MR
;
2508 if (nla_put_u8(skb
, IP6MRA_CREPORT_MSGTYPE
, msg
->im6_msgtype
) ||
2509 nla_put_u32(skb
, IP6MRA_CREPORT_MIF_ID
, msg
->im6_mif
) ||
2510 nla_put_in6_addr(skb
, IP6MRA_CREPORT_SRC_ADDR
,
2512 nla_put_in6_addr(skb
, IP6MRA_CREPORT_DST_ADDR
,
2514 goto nla_put_failure
;
2516 nla
= nla_reserve(skb
, IP6MRA_CREPORT_PKT
, payloadlen
);
2517 if (!nla
|| skb_copy_bits(pkt
, sizeof(struct mrt6msg
),
2518 nla_data(nla
), payloadlen
))
2519 goto nla_put_failure
;
2521 nlmsg_end(skb
, nlh
);
2523 rtnl_notify(skb
, net
, 0, RTNLGRP_IPV6_MROUTE_R
, NULL
, GFP_ATOMIC
);
2527 nlmsg_cancel(skb
, nlh
);
2530 rtnl_set_sk_err(net
, RTNLGRP_IPV6_MROUTE_R
, -ENOBUFS
);
2533 static int ip6mr_rtm_dumproute(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2535 struct net
*net
= sock_net(skb
->sk
);
2536 struct mr6_table
*mrt
;
2537 struct mfc6_cache
*mfc
;
2538 unsigned int t
= 0, s_t
;
2539 unsigned int h
= 0, s_h
;
2540 unsigned int e
= 0, s_e
;
2546 read_lock(&mrt_lock
);
2547 ip6mr_for_each_table(mrt
, net
) {
2552 for (h
= s_h
; h
< MFC6_LINES
; h
++) {
2553 list_for_each_entry(mfc
, &mrt
->mfc6_cache_array
[h
], list
) {
2556 if (ip6mr_fill_mroute(mrt
, skb
,
2557 NETLINK_CB(cb
->skb
).portid
,
2567 spin_lock_bh(&mfc_unres_lock
);
2568 list_for_each_entry(mfc
, &mrt
->mfc6_unres_queue
, list
) {
2571 if (ip6mr_fill_mroute(mrt
, skb
,
2572 NETLINK_CB(cb
->skb
).portid
,
2576 spin_unlock_bh(&mfc_unres_lock
);
2582 spin_unlock_bh(&mfc_unres_lock
);
2589 read_unlock(&mrt_lock
);