2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <asm/system.h>
20 #include <asm/uaccess.h>
21 #include <linux/types.h>
22 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/timer.h>
26 #include <linux/kernel.h>
27 #include <linux/fcntl.h>
28 #include <linux/stat.h>
29 #include <linux/socket.h>
30 #include <linux/inet.h>
31 #include <linux/netdevice.h>
32 #include <linux/inetdevice.h>
33 #include <linux/proc_fs.h>
34 #include <linux/seq_file.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <net/protocol.h>
38 #include <linux/skbuff.h>
41 #include <linux/notifier.h>
42 #include <linux/if_arp.h>
43 #include <net/checksum.h>
44 #include <net/netlink.h>
45 #include <net/fib_rules.h>
48 #include <net/ip6_route.h>
49 #include <linux/mroute6.h>
50 #include <linux/pim.h>
51 #include <net/addrconf.h>
52 #include <linux/netfilter_ipv6.h>
53 #include <net/ip6_checksum.h>
56 struct list_head list
;
61 struct sock
*mroute6_sk
;
62 struct timer_list ipmr_expire_timer
;
63 struct list_head mfc6_unres_queue
;
64 struct list_head mfc6_cache_array
[MFC6_LINES
];
65 struct mif_device vif6_table
[MAXMIFS
];
67 atomic_t cache_resolve_queue_len
;
70 #ifdef CONFIG_IPV6_PIMSM_V2
71 int mroute_reg_vif_num
;
76 struct fib_rule common
;
80 struct mr6_table
*mrt
;
83 /* Big lock, protecting vif table, mrt cache and mroute socket state.
84 Note that the changes are semaphored via rtnl_lock.
87 static DEFINE_RWLOCK(mrt_lock
);
90 * Multicast router control variables
93 #define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
95 /* Special spinlock for queue of unresolved entries */
96 static DEFINE_SPINLOCK(mfc_unres_lock
);
98 /* We return to original Alan's scheme. Hash table of resolved
99 entries is changed only in process context and protected
100 with weak lock mrt_lock. Queue of unresolved entries is protected
101 with strong spinlock mfc_unres_lock.
103 In this case data path is free of exclusive locks at all.
106 static struct kmem_cache
*mrt_cachep __read_mostly
;
108 static struct mr6_table
*ip6mr_new_table(struct net
*net
, u32 id
);
109 static void ip6mr_free_table(struct mr6_table
*mrt
);
111 static int ip6_mr_forward(struct net
*net
, struct mr6_table
*mrt
,
112 struct sk_buff
*skb
, struct mfc6_cache
*cache
);
113 static int ip6mr_cache_report(struct mr6_table
*mrt
, struct sk_buff
*pkt
,
114 mifi_t mifi
, int assert);
115 static int __ip6mr_fill_mroute(struct mr6_table
*mrt
, struct sk_buff
*skb
,
116 struct mfc6_cache
*c
, struct rtmsg
*rtm
);
117 static int ip6mr_rtm_dumproute(struct sk_buff
*skb
,
118 struct netlink_callback
*cb
);
119 static void mroute_clean_tables(struct mr6_table
*mrt
);
120 static void ipmr_expire_process(unsigned long arg
);
122 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
123 #define ip6mr_for_each_table(mrt, met) \
124 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
126 static struct mr6_table
*ip6mr_get_table(struct net
*net
, u32 id
)
128 struct mr6_table
*mrt
;
130 ip6mr_for_each_table(mrt
, net
) {
137 static int ip6mr_fib_lookup(struct net
*net
, struct flowi
*flp
,
138 struct mr6_table
**mrt
)
140 struct ip6mr_result res
;
141 struct fib_lookup_arg arg
= { .result
= &res
, };
144 err
= fib_rules_lookup(net
->ipv6
.mr6_rules_ops
, flp
, 0, &arg
);
151 static int ip6mr_rule_action(struct fib_rule
*rule
, struct flowi
*flp
,
152 int flags
, struct fib_lookup_arg
*arg
)
154 struct ip6mr_result
*res
= arg
->result
;
155 struct mr6_table
*mrt
;
157 switch (rule
->action
) {
160 case FR_ACT_UNREACHABLE
:
162 case FR_ACT_PROHIBIT
:
164 case FR_ACT_BLACKHOLE
:
169 mrt
= ip6mr_get_table(rule
->fr_net
, rule
->table
);
176 static int ip6mr_rule_match(struct fib_rule
*rule
, struct flowi
*flp
, int flags
)
181 static const struct nla_policy ip6mr_rule_policy
[FRA_MAX
+ 1] = {
185 static int ip6mr_rule_configure(struct fib_rule
*rule
, struct sk_buff
*skb
,
186 struct fib_rule_hdr
*frh
, struct nlattr
**tb
)
191 static int ip6mr_rule_compare(struct fib_rule
*rule
, struct fib_rule_hdr
*frh
,
197 static int ip6mr_rule_fill(struct fib_rule
*rule
, struct sk_buff
*skb
,
198 struct fib_rule_hdr
*frh
)
206 static const struct fib_rules_ops __net_initdata ip6mr_rules_ops_template
= {
207 .family
= RTNL_FAMILY_IP6MR
,
208 .rule_size
= sizeof(struct ip6mr_rule
),
209 .addr_size
= sizeof(struct in6_addr
),
210 .action
= ip6mr_rule_action
,
211 .match
= ip6mr_rule_match
,
212 .configure
= ip6mr_rule_configure
,
213 .compare
= ip6mr_rule_compare
,
214 .default_pref
= fib_default_rule_pref
,
215 .fill
= ip6mr_rule_fill
,
216 .nlgroup
= RTNLGRP_IPV6_RULE
,
217 .policy
= ip6mr_rule_policy
,
218 .owner
= THIS_MODULE
,
221 static int __net_init
ip6mr_rules_init(struct net
*net
)
223 struct fib_rules_ops
*ops
;
224 struct mr6_table
*mrt
;
227 ops
= fib_rules_register(&ip6mr_rules_ops_template
, net
);
231 INIT_LIST_HEAD(&net
->ipv6
.mr6_tables
);
233 mrt
= ip6mr_new_table(net
, RT6_TABLE_DFLT
);
239 err
= fib_default_rule_add(ops
, 0x7fff, RT6_TABLE_DFLT
, 0);
243 net
->ipv6
.mr6_rules_ops
= ops
;
249 fib_rules_unregister(ops
);
253 static void __net_exit
ip6mr_rules_exit(struct net
*net
)
255 struct mr6_table
*mrt
, *next
;
257 list_for_each_entry_safe(mrt
, next
, &net
->ipv6
.mr6_tables
, list
)
258 ip6mr_free_table(mrt
);
259 fib_rules_unregister(net
->ipv6
.mr6_rules_ops
);
262 #define ip6mr_for_each_table(mrt, net) \
263 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
265 static struct mr6_table
*ip6mr_get_table(struct net
*net
, u32 id
)
267 return net
->ipv6
.mrt6
;
270 static int ip6mr_fib_lookup(struct net
*net
, struct flowi
*flp
,
271 struct mr6_table
**mrt
)
273 *mrt
= net
->ipv6
.mrt6
;
277 static int __net_init
ip6mr_rules_init(struct net
*net
)
279 net
->ipv6
.mrt6
= ip6mr_new_table(net
, RT6_TABLE_DFLT
);
280 return net
->ipv6
.mrt6
? 0 : -ENOMEM
;
283 static void __net_exit
ip6mr_rules_exit(struct net
*net
)
285 ip6mr_free_table(net
->ipv6
.mrt6
);
289 static struct mr6_table
*ip6mr_new_table(struct net
*net
, u32 id
)
291 struct mr6_table
*mrt
;
294 mrt
= ip6mr_get_table(net
, id
);
298 mrt
= kzalloc(sizeof(*mrt
), GFP_KERNEL
);
302 write_pnet(&mrt
->net
, net
);
304 /* Forwarding cache */
305 for (i
= 0; i
< MFC6_LINES
; i
++)
306 INIT_LIST_HEAD(&mrt
->mfc6_cache_array
[i
]);
308 INIT_LIST_HEAD(&mrt
->mfc6_unres_queue
);
310 setup_timer(&mrt
->ipmr_expire_timer
, ipmr_expire_process
,
313 #ifdef CONFIG_IPV6_PIMSM_V2
314 mrt
->mroute_reg_vif_num
= -1;
316 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
317 list_add_tail_rcu(&mrt
->list
, &net
->ipv6
.mr6_tables
);
322 static void ip6mr_free_table(struct mr6_table
*mrt
)
324 del_timer(&mrt
->ipmr_expire_timer
);
325 mroute_clean_tables(mrt
);
329 #ifdef CONFIG_PROC_FS
331 struct ipmr_mfc_iter
{
332 struct seq_net_private p
;
333 struct mr6_table
*mrt
;
334 struct list_head
*cache
;
339 static struct mfc6_cache
*ipmr_mfc_seq_idx(struct net
*net
,
340 struct ipmr_mfc_iter
*it
, loff_t pos
)
342 struct mr6_table
*mrt
= it
->mrt
;
343 struct mfc6_cache
*mfc
;
345 read_lock(&mrt_lock
);
346 for (it
->ct
= 0; it
->ct
< MFC6_LINES
; it
->ct
++) {
347 it
->cache
= &mrt
->mfc6_cache_array
[it
->ct
];
348 list_for_each_entry(mfc
, it
->cache
, list
)
352 read_unlock(&mrt_lock
);
354 spin_lock_bh(&mfc_unres_lock
);
355 it
->cache
= &mrt
->mfc6_unres_queue
;
356 list_for_each_entry(mfc
, it
->cache
, list
)
359 spin_unlock_bh(&mfc_unres_lock
);
366 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
369 struct ipmr_vif_iter
{
370 struct seq_net_private p
;
371 struct mr6_table
*mrt
;
375 static struct mif_device
*ip6mr_vif_seq_idx(struct net
*net
,
376 struct ipmr_vif_iter
*iter
,
379 struct mr6_table
*mrt
= iter
->mrt
;
381 for (iter
->ct
= 0; iter
->ct
< mrt
->maxvif
; ++iter
->ct
) {
382 if (!MIF_EXISTS(mrt
, iter
->ct
))
385 return &mrt
->vif6_table
[iter
->ct
];
390 static void *ip6mr_vif_seq_start(struct seq_file
*seq
, loff_t
*pos
)
393 struct ipmr_vif_iter
*iter
= seq
->private;
394 struct net
*net
= seq_file_net(seq
);
395 struct mr6_table
*mrt
;
397 mrt
= ip6mr_get_table(net
, RT6_TABLE_DFLT
);
399 return ERR_PTR(-ENOENT
);
403 read_lock(&mrt_lock
);
404 return *pos
? ip6mr_vif_seq_idx(net
, seq
->private, *pos
- 1)
408 static void *ip6mr_vif_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
410 struct ipmr_vif_iter
*iter
= seq
->private;
411 struct net
*net
= seq_file_net(seq
);
412 struct mr6_table
*mrt
= iter
->mrt
;
415 if (v
== SEQ_START_TOKEN
)
416 return ip6mr_vif_seq_idx(net
, iter
, 0);
418 while (++iter
->ct
< mrt
->maxvif
) {
419 if (!MIF_EXISTS(mrt
, iter
->ct
))
421 return &mrt
->vif6_table
[iter
->ct
];
426 static void ip6mr_vif_seq_stop(struct seq_file
*seq
, void *v
)
429 read_unlock(&mrt_lock
);
432 static int ip6mr_vif_seq_show(struct seq_file
*seq
, void *v
)
434 struct ipmr_vif_iter
*iter
= seq
->private;
435 struct mr6_table
*mrt
= iter
->mrt
;
437 if (v
== SEQ_START_TOKEN
) {
439 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
441 const struct mif_device
*vif
= v
;
442 const char *name
= vif
->dev
? vif
->dev
->name
: "none";
445 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
446 vif
- mrt
->vif6_table
,
447 name
, vif
->bytes_in
, vif
->pkt_in
,
448 vif
->bytes_out
, vif
->pkt_out
,
454 static const struct seq_operations ip6mr_vif_seq_ops
= {
455 .start
= ip6mr_vif_seq_start
,
456 .next
= ip6mr_vif_seq_next
,
457 .stop
= ip6mr_vif_seq_stop
,
458 .show
= ip6mr_vif_seq_show
,
461 static int ip6mr_vif_open(struct inode
*inode
, struct file
*file
)
463 return seq_open_net(inode
, file
, &ip6mr_vif_seq_ops
,
464 sizeof(struct ipmr_vif_iter
));
467 static const struct file_operations ip6mr_vif_fops
= {
468 .owner
= THIS_MODULE
,
469 .open
= ip6mr_vif_open
,
472 .release
= seq_release_net
,
475 static void *ipmr_mfc_seq_start(struct seq_file
*seq
, loff_t
*pos
)
477 struct ipmr_mfc_iter
*it
= seq
->private;
478 struct net
*net
= seq_file_net(seq
);
479 struct mr6_table
*mrt
;
481 mrt
= ip6mr_get_table(net
, RT6_TABLE_DFLT
);
483 return ERR_PTR(-ENOENT
);
486 return *pos
? ipmr_mfc_seq_idx(net
, seq
->private, *pos
- 1)
490 static void *ipmr_mfc_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
492 struct mfc6_cache
*mfc
= v
;
493 struct ipmr_mfc_iter
*it
= seq
->private;
494 struct net
*net
= seq_file_net(seq
);
495 struct mr6_table
*mrt
= it
->mrt
;
499 if (v
== SEQ_START_TOKEN
)
500 return ipmr_mfc_seq_idx(net
, seq
->private, 0);
502 if (mfc
->list
.next
!= it
->cache
)
503 return list_entry(mfc
->list
.next
, struct mfc6_cache
, list
);
505 if (it
->cache
== &mrt
->mfc6_unres_queue
)
508 BUG_ON(it
->cache
!= &mrt
->mfc6_cache_array
[it
->ct
]);
510 while (++it
->ct
< MFC6_LINES
) {
511 it
->cache
= &mrt
->mfc6_cache_array
[it
->ct
];
512 if (list_empty(it
->cache
))
514 return list_first_entry(it
->cache
, struct mfc6_cache
, list
);
517 /* exhausted cache_array, show unresolved */
518 read_unlock(&mrt_lock
);
519 it
->cache
= &mrt
->mfc6_unres_queue
;
522 spin_lock_bh(&mfc_unres_lock
);
523 if (!list_empty(it
->cache
))
524 return list_first_entry(it
->cache
, struct mfc6_cache
, list
);
527 spin_unlock_bh(&mfc_unres_lock
);
533 static void ipmr_mfc_seq_stop(struct seq_file
*seq
, void *v
)
535 struct ipmr_mfc_iter
*it
= seq
->private;
536 struct mr6_table
*mrt
= it
->mrt
;
538 if (it
->cache
== &mrt
->mfc6_unres_queue
)
539 spin_unlock_bh(&mfc_unres_lock
);
540 else if (it
->cache
== mrt
->mfc6_cache_array
)
541 read_unlock(&mrt_lock
);
544 static int ipmr_mfc_seq_show(struct seq_file
*seq
, void *v
)
548 if (v
== SEQ_START_TOKEN
) {
552 "Iif Pkts Bytes Wrong Oifs\n");
554 const struct mfc6_cache
*mfc
= v
;
555 const struct ipmr_mfc_iter
*it
= seq
->private;
556 struct mr6_table
*mrt
= it
->mrt
;
558 seq_printf(seq
, "%pI6 %pI6 %-3hd",
559 &mfc
->mf6c_mcastgrp
, &mfc
->mf6c_origin
,
562 if (it
->cache
!= &mrt
->mfc6_unres_queue
) {
563 seq_printf(seq
, " %8lu %8lu %8lu",
565 mfc
->mfc_un
.res
.bytes
,
566 mfc
->mfc_un
.res
.wrong_if
);
567 for (n
= mfc
->mfc_un
.res
.minvif
;
568 n
< mfc
->mfc_un
.res
.maxvif
; n
++) {
569 if (MIF_EXISTS(mrt
, n
) &&
570 mfc
->mfc_un
.res
.ttls
[n
] < 255)
573 n
, mfc
->mfc_un
.res
.ttls
[n
]);
576 /* unresolved mfc_caches don't contain
577 * pkt, bytes and wrong_if values
579 seq_printf(seq
, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
586 static const struct seq_operations ipmr_mfc_seq_ops
= {
587 .start
= ipmr_mfc_seq_start
,
588 .next
= ipmr_mfc_seq_next
,
589 .stop
= ipmr_mfc_seq_stop
,
590 .show
= ipmr_mfc_seq_show
,
593 static int ipmr_mfc_open(struct inode
*inode
, struct file
*file
)
595 return seq_open_net(inode
, file
, &ipmr_mfc_seq_ops
,
596 sizeof(struct ipmr_mfc_iter
));
599 static const struct file_operations ip6mr_mfc_fops
= {
600 .owner
= THIS_MODULE
,
601 .open
= ipmr_mfc_open
,
604 .release
= seq_release_net
,
608 #ifdef CONFIG_IPV6_PIMSM_V2
610 static int pim6_rcv(struct sk_buff
*skb
)
612 struct pimreghdr
*pim
;
613 struct ipv6hdr
*encap
;
614 struct net_device
*reg_dev
= NULL
;
615 struct net
*net
= dev_net(skb
->dev
);
616 struct mr6_table
*mrt
;
618 .iif
= skb
->dev
->ifindex
,
623 if (!pskb_may_pull(skb
, sizeof(*pim
) + sizeof(*encap
)))
626 pim
= (struct pimreghdr
*)skb_transport_header(skb
);
627 if (pim
->type
!= ((PIM_VERSION
<< 4) | PIM_REGISTER
) ||
628 (pim
->flags
& PIM_NULL_REGISTER
) ||
629 (csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
, &ipv6_hdr(skb
)->daddr
,
630 sizeof(*pim
), IPPROTO_PIM
,
631 csum_partial((void *)pim
, sizeof(*pim
), 0)) &&
632 csum_fold(skb_checksum(skb
, 0, skb
->len
, 0))))
635 /* check if the inner packet is destined to mcast group */
636 encap
= (struct ipv6hdr
*)(skb_transport_header(skb
) +
639 if (!ipv6_addr_is_multicast(&encap
->daddr
) ||
640 encap
->payload_len
== 0 ||
641 ntohs(encap
->payload_len
) + sizeof(*pim
) > skb
->len
)
644 if (ip6mr_fib_lookup(net
, &fl
, &mrt
) < 0)
646 reg_vif_num
= mrt
->mroute_reg_vif_num
;
648 read_lock(&mrt_lock
);
649 if (reg_vif_num
>= 0)
650 reg_dev
= mrt
->vif6_table
[reg_vif_num
].dev
;
653 read_unlock(&mrt_lock
);
658 skb
->mac_header
= skb
->network_header
;
659 skb_pull(skb
, (u8
*)encap
- skb
->data
);
660 skb_reset_network_header(skb
);
661 skb
->protocol
= htons(ETH_P_IPV6
);
663 skb
->pkt_type
= PACKET_HOST
;
665 skb_tunnel_rx(skb
, reg_dev
);
675 static const struct inet6_protocol pim6_protocol
= {
679 /* Service routines creating virtual interfaces: PIMREG */
681 static netdev_tx_t
reg_vif_xmit(struct sk_buff
*skb
,
682 struct net_device
*dev
)
684 struct net
*net
= dev_net(dev
);
685 struct mr6_table
*mrt
;
693 err
= ip6mr_fib_lookup(net
, &fl
, &mrt
);
697 read_lock(&mrt_lock
);
698 dev
->stats
.tx_bytes
+= skb
->len
;
699 dev
->stats
.tx_packets
++;
700 ip6mr_cache_report(mrt
, skb
, mrt
->mroute_reg_vif_num
, MRT6MSG_WHOLEPKT
);
701 read_unlock(&mrt_lock
);
706 static const struct net_device_ops reg_vif_netdev_ops
= {
707 .ndo_start_xmit
= reg_vif_xmit
,
710 static void reg_vif_setup(struct net_device
*dev
)
712 dev
->type
= ARPHRD_PIMREG
;
713 dev
->mtu
= 1500 - sizeof(struct ipv6hdr
) - 8;
714 dev
->flags
= IFF_NOARP
;
715 dev
->netdev_ops
= ®_vif_netdev_ops
;
716 dev
->destructor
= free_netdev
;
717 dev
->features
|= NETIF_F_NETNS_LOCAL
;
720 static struct net_device
*ip6mr_reg_vif(struct net
*net
, struct mr6_table
*mrt
)
722 struct net_device
*dev
;
725 if (mrt
->id
== RT6_TABLE_DFLT
)
726 sprintf(name
, "pim6reg");
728 sprintf(name
, "pim6reg%u", mrt
->id
);
730 dev
= alloc_netdev(0, name
, reg_vif_setup
);
734 dev_net_set(dev
, net
);
736 if (register_netdevice(dev
)) {
749 /* allow the register to be completed before unregistering. */
753 unregister_netdevice(dev
);
762 static int mif6_delete(struct mr6_table
*mrt
, int vifi
, struct list_head
*head
)
764 struct mif_device
*v
;
765 struct net_device
*dev
;
766 struct inet6_dev
*in6_dev
;
768 if (vifi
< 0 || vifi
>= mrt
->maxvif
)
769 return -EADDRNOTAVAIL
;
771 v
= &mrt
->vif6_table
[vifi
];
773 write_lock_bh(&mrt_lock
);
778 write_unlock_bh(&mrt_lock
);
779 return -EADDRNOTAVAIL
;
782 #ifdef CONFIG_IPV6_PIMSM_V2
783 if (vifi
== mrt
->mroute_reg_vif_num
)
784 mrt
->mroute_reg_vif_num
= -1;
787 if (vifi
+ 1 == mrt
->maxvif
) {
789 for (tmp
= vifi
- 1; tmp
>= 0; tmp
--) {
790 if (MIF_EXISTS(mrt
, tmp
))
793 mrt
->maxvif
= tmp
+ 1;
796 write_unlock_bh(&mrt_lock
);
798 dev_set_allmulti(dev
, -1);
800 in6_dev
= __in6_dev_get(dev
);
802 in6_dev
->cnf
.mc_forwarding
--;
804 if (v
->flags
& MIFF_REGISTER
)
805 unregister_netdevice_queue(dev
, head
);
811 static inline void ip6mr_cache_free(struct mfc6_cache
*c
)
813 kmem_cache_free(mrt_cachep
, c
);
816 /* Destroy an unresolved cache entry, killing queued skbs
817 and reporting error to netlink readers.
820 static void ip6mr_destroy_unres(struct mr6_table
*mrt
, struct mfc6_cache
*c
)
822 struct net
*net
= read_pnet(&mrt
->net
);
825 atomic_dec(&mrt
->cache_resolve_queue_len
);
827 while((skb
= skb_dequeue(&c
->mfc_un
.unres
.unresolved
)) != NULL
) {
828 if (ipv6_hdr(skb
)->version
== 0) {
829 struct nlmsghdr
*nlh
= (struct nlmsghdr
*)skb_pull(skb
, sizeof(struct ipv6hdr
));
830 nlh
->nlmsg_type
= NLMSG_ERROR
;
831 nlh
->nlmsg_len
= NLMSG_LENGTH(sizeof(struct nlmsgerr
));
832 skb_trim(skb
, nlh
->nlmsg_len
);
833 ((struct nlmsgerr
*)NLMSG_DATA(nlh
))->error
= -ETIMEDOUT
;
834 rtnl_unicast(skb
, net
, NETLINK_CB(skb
).pid
);
843 /* Timer process for all the unresolved queue. */
845 static void ipmr_do_expire_process(struct mr6_table
*mrt
)
847 unsigned long now
= jiffies
;
848 unsigned long expires
= 10 * HZ
;
849 struct mfc6_cache
*c
, *next
;
851 list_for_each_entry_safe(c
, next
, &mrt
->mfc6_unres_queue
, list
) {
852 if (time_after(c
->mfc_un
.unres
.expires
, now
)) {
854 unsigned long interval
= c
->mfc_un
.unres
.expires
- now
;
855 if (interval
< expires
)
861 ip6mr_destroy_unres(mrt
, c
);
864 if (!list_empty(&mrt
->mfc6_unres_queue
))
865 mod_timer(&mrt
->ipmr_expire_timer
, jiffies
+ expires
);
868 static void ipmr_expire_process(unsigned long arg
)
870 struct mr6_table
*mrt
= (struct mr6_table
*)arg
;
872 if (!spin_trylock(&mfc_unres_lock
)) {
873 mod_timer(&mrt
->ipmr_expire_timer
, jiffies
+ 1);
877 if (!list_empty(&mrt
->mfc6_unres_queue
))
878 ipmr_do_expire_process(mrt
);
880 spin_unlock(&mfc_unres_lock
);
883 /* Fill oifs list. It is called under write locked mrt_lock. */
885 static void ip6mr_update_thresholds(struct mr6_table
*mrt
, struct mfc6_cache
*cache
,
890 cache
->mfc_un
.res
.minvif
= MAXMIFS
;
891 cache
->mfc_un
.res
.maxvif
= 0;
892 memset(cache
->mfc_un
.res
.ttls
, 255, MAXMIFS
);
894 for (vifi
= 0; vifi
< mrt
->maxvif
; vifi
++) {
895 if (MIF_EXISTS(mrt
, vifi
) &&
896 ttls
[vifi
] && ttls
[vifi
] < 255) {
897 cache
->mfc_un
.res
.ttls
[vifi
] = ttls
[vifi
];
898 if (cache
->mfc_un
.res
.minvif
> vifi
)
899 cache
->mfc_un
.res
.minvif
= vifi
;
900 if (cache
->mfc_un
.res
.maxvif
<= vifi
)
901 cache
->mfc_un
.res
.maxvif
= vifi
+ 1;
906 static int mif6_add(struct net
*net
, struct mr6_table
*mrt
,
907 struct mif6ctl
*vifc
, int mrtsock
)
909 int vifi
= vifc
->mif6c_mifi
;
910 struct mif_device
*v
= &mrt
->vif6_table
[vifi
];
911 struct net_device
*dev
;
912 struct inet6_dev
*in6_dev
;
916 if (MIF_EXISTS(mrt
, vifi
))
919 switch (vifc
->mif6c_flags
) {
920 #ifdef CONFIG_IPV6_PIMSM_V2
923 * Special Purpose VIF in PIM
924 * All the packets will be sent to the daemon
926 if (mrt
->mroute_reg_vif_num
>= 0)
928 dev
= ip6mr_reg_vif(net
, mrt
);
931 err
= dev_set_allmulti(dev
, 1);
933 unregister_netdevice(dev
);
940 dev
= dev_get_by_index(net
, vifc
->mif6c_pifi
);
942 return -EADDRNOTAVAIL
;
943 err
= dev_set_allmulti(dev
, 1);
953 in6_dev
= __in6_dev_get(dev
);
955 in6_dev
->cnf
.mc_forwarding
++;
958 * Fill in the VIF structures
960 v
->rate_limit
= vifc
->vifc_rate_limit
;
961 v
->flags
= vifc
->mif6c_flags
;
963 v
->flags
|= VIFF_STATIC
;
964 v
->threshold
= vifc
->vifc_threshold
;
969 v
->link
= dev
->ifindex
;
970 if (v
->flags
& MIFF_REGISTER
)
971 v
->link
= dev
->iflink
;
973 /* And finish update writing critical data */
974 write_lock_bh(&mrt_lock
);
976 #ifdef CONFIG_IPV6_PIMSM_V2
977 if (v
->flags
& MIFF_REGISTER
)
978 mrt
->mroute_reg_vif_num
= vifi
;
980 if (vifi
+ 1 > mrt
->maxvif
)
981 mrt
->maxvif
= vifi
+ 1;
982 write_unlock_bh(&mrt_lock
);
986 static struct mfc6_cache
*ip6mr_cache_find(struct mr6_table
*mrt
,
987 struct in6_addr
*origin
,
988 struct in6_addr
*mcastgrp
)
990 int line
= MFC6_HASH(mcastgrp
, origin
);
991 struct mfc6_cache
*c
;
993 list_for_each_entry(c
, &mrt
->mfc6_cache_array
[line
], list
) {
994 if (ipv6_addr_equal(&c
->mf6c_origin
, origin
) &&
995 ipv6_addr_equal(&c
->mf6c_mcastgrp
, mcastgrp
))
1002 * Allocate a multicast cache entry
1004 static struct mfc6_cache
*ip6mr_cache_alloc(void)
1006 struct mfc6_cache
*c
= kmem_cache_zalloc(mrt_cachep
, GFP_KERNEL
);
1009 c
->mfc_un
.res
.minvif
= MAXMIFS
;
1013 static struct mfc6_cache
*ip6mr_cache_alloc_unres(void)
1015 struct mfc6_cache
*c
= kmem_cache_zalloc(mrt_cachep
, GFP_ATOMIC
);
1018 skb_queue_head_init(&c
->mfc_un
.unres
.unresolved
);
1019 c
->mfc_un
.unres
.expires
= jiffies
+ 10 * HZ
;
1024 * A cache entry has gone into a resolved state from queued
1027 static void ip6mr_cache_resolve(struct net
*net
, struct mr6_table
*mrt
,
1028 struct mfc6_cache
*uc
, struct mfc6_cache
*c
)
1030 struct sk_buff
*skb
;
1033 * Play the pending entries through our router
1036 while((skb
= __skb_dequeue(&uc
->mfc_un
.unres
.unresolved
))) {
1037 if (ipv6_hdr(skb
)->version
== 0) {
1039 struct nlmsghdr
*nlh
= (struct nlmsghdr
*)skb_pull(skb
, sizeof(struct ipv6hdr
));
1041 if (__ip6mr_fill_mroute(mrt
, skb
, c
, NLMSG_DATA(nlh
)) > 0) {
1042 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - (u8
*)nlh
;
1044 nlh
->nlmsg_type
= NLMSG_ERROR
;
1045 nlh
->nlmsg_len
= NLMSG_LENGTH(sizeof(struct nlmsgerr
));
1046 skb_trim(skb
, nlh
->nlmsg_len
);
1047 ((struct nlmsgerr
*)NLMSG_DATA(nlh
))->error
= -EMSGSIZE
;
1049 err
= rtnl_unicast(skb
, net
, NETLINK_CB(skb
).pid
);
1051 ip6_mr_forward(net
, mrt
, skb
, c
);
1056 * Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
1057 * expects the following bizarre scheme.
1059 * Called under mrt_lock.
1062 static int ip6mr_cache_report(struct mr6_table
*mrt
, struct sk_buff
*pkt
,
1063 mifi_t mifi
, int assert)
1065 struct sk_buff
*skb
;
1066 struct mrt6msg
*msg
;
1069 #ifdef CONFIG_IPV6_PIMSM_V2
1070 if (assert == MRT6MSG_WHOLEPKT
)
1071 skb
= skb_realloc_headroom(pkt
, -skb_network_offset(pkt
)
1075 skb
= alloc_skb(sizeof(struct ipv6hdr
) + sizeof(*msg
), GFP_ATOMIC
);
1080 /* I suppose that internal messages
1081 * do not require checksums */
1083 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1085 #ifdef CONFIG_IPV6_PIMSM_V2
1086 if (assert == MRT6MSG_WHOLEPKT
) {
1087 /* Ugly, but we have no choice with this interface.
1088 Duplicate old header, fix length etc.
1089 And all this only to mangle msg->im6_msgtype and
1090 to set msg->im6_mbz to "mbz" :-)
1092 skb_push(skb
, -skb_network_offset(pkt
));
1094 skb_push(skb
, sizeof(*msg
));
1095 skb_reset_transport_header(skb
);
1096 msg
= (struct mrt6msg
*)skb_transport_header(skb
);
1098 msg
->im6_msgtype
= MRT6MSG_WHOLEPKT
;
1099 msg
->im6_mif
= mrt
->mroute_reg_vif_num
;
1101 ipv6_addr_copy(&msg
->im6_src
, &ipv6_hdr(pkt
)->saddr
);
1102 ipv6_addr_copy(&msg
->im6_dst
, &ipv6_hdr(pkt
)->daddr
);
1104 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1109 * Copy the IP header
1112 skb_put(skb
, sizeof(struct ipv6hdr
));
1113 skb_reset_network_header(skb
);
1114 skb_copy_to_linear_data(skb
, ipv6_hdr(pkt
), sizeof(struct ipv6hdr
));
1119 skb_put(skb
, sizeof(*msg
));
1120 skb_reset_transport_header(skb
);
1121 msg
= (struct mrt6msg
*)skb_transport_header(skb
);
1124 msg
->im6_msgtype
= assert;
1125 msg
->im6_mif
= mifi
;
1127 ipv6_addr_copy(&msg
->im6_src
, &ipv6_hdr(pkt
)->saddr
);
1128 ipv6_addr_copy(&msg
->im6_dst
, &ipv6_hdr(pkt
)->daddr
);
1130 skb_dst_set(skb
, dst_clone(skb_dst(pkt
)));
1131 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1134 if (mrt
->mroute6_sk
== NULL
) {
1140 * Deliver to user space multicast routing algorithms
1142 ret
= sock_queue_rcv_skb(mrt
->mroute6_sk
, skb
);
1144 if (net_ratelimit())
1145 printk(KERN_WARNING
"mroute6: pending queue full, dropping entries.\n");
1153 * Queue a packet for resolution. It gets locked cache entry!
1157 ip6mr_cache_unresolved(struct mr6_table
*mrt
, mifi_t mifi
, struct sk_buff
*skb
)
1161 struct mfc6_cache
*c
;
1163 spin_lock_bh(&mfc_unres_lock
);
1164 list_for_each_entry(c
, &mrt
->mfc6_unres_queue
, list
) {
1165 if (ipv6_addr_equal(&c
->mf6c_mcastgrp
, &ipv6_hdr(skb
)->daddr
) &&
1166 ipv6_addr_equal(&c
->mf6c_origin
, &ipv6_hdr(skb
)->saddr
)) {
1174 * Create a new entry if allowable
1177 if (atomic_read(&mrt
->cache_resolve_queue_len
) >= 10 ||
1178 (c
= ip6mr_cache_alloc_unres()) == NULL
) {
1179 spin_unlock_bh(&mfc_unres_lock
);
1186 * Fill in the new cache entry
1188 c
->mf6c_parent
= -1;
1189 c
->mf6c_origin
= ipv6_hdr(skb
)->saddr
;
1190 c
->mf6c_mcastgrp
= ipv6_hdr(skb
)->daddr
;
1193 * Reflect first query at pim6sd
1195 err
= ip6mr_cache_report(mrt
, skb
, mifi
, MRT6MSG_NOCACHE
);
1197 /* If the report failed throw the cache entry
1200 spin_unlock_bh(&mfc_unres_lock
);
1202 ip6mr_cache_free(c
);
1207 atomic_inc(&mrt
->cache_resolve_queue_len
);
1208 list_add(&c
->list
, &mrt
->mfc6_unres_queue
);
1210 ipmr_do_expire_process(mrt
);
1214 * See if we can append the packet
1216 if (c
->mfc_un
.unres
.unresolved
.qlen
> 3) {
1220 skb_queue_tail(&c
->mfc_un
.unres
.unresolved
, skb
);
1224 spin_unlock_bh(&mfc_unres_lock
);
1229 * MFC6 cache manipulation by user space
1232 static int ip6mr_mfc_delete(struct mr6_table
*mrt
, struct mf6cctl
*mfc
)
1235 struct mfc6_cache
*c
, *next
;
1237 line
= MFC6_HASH(&mfc
->mf6cc_mcastgrp
.sin6_addr
, &mfc
->mf6cc_origin
.sin6_addr
);
1239 list_for_each_entry_safe(c
, next
, &mrt
->mfc6_cache_array
[line
], list
) {
1240 if (ipv6_addr_equal(&c
->mf6c_origin
, &mfc
->mf6cc_origin
.sin6_addr
) &&
1241 ipv6_addr_equal(&c
->mf6c_mcastgrp
, &mfc
->mf6cc_mcastgrp
.sin6_addr
)) {
1242 write_lock_bh(&mrt_lock
);
1244 write_unlock_bh(&mrt_lock
);
1246 ip6mr_cache_free(c
);
1253 static int ip6mr_device_event(struct notifier_block
*this,
1254 unsigned long event
, void *ptr
)
1256 struct net_device
*dev
= ptr
;
1257 struct net
*net
= dev_net(dev
);
1258 struct mr6_table
*mrt
;
1259 struct mif_device
*v
;
1263 if (event
!= NETDEV_UNREGISTER
)
1266 ip6mr_for_each_table(mrt
, net
) {
1267 v
= &mrt
->vif6_table
[0];
1268 for (ct
= 0; ct
< mrt
->maxvif
; ct
++, v
++) {
1270 mif6_delete(mrt
, ct
, &list
);
1273 unregister_netdevice_many(&list
);
1278 static struct notifier_block ip6_mr_notifier
= {
1279 .notifier_call
= ip6mr_device_event
1283 * Setup for IP multicast routing
1286 static int __net_init
ip6mr_net_init(struct net
*net
)
1290 err
= ip6mr_rules_init(net
);
1294 #ifdef CONFIG_PROC_FS
1296 if (!proc_net_fops_create(net
, "ip6_mr_vif", 0, &ip6mr_vif_fops
))
1298 if (!proc_net_fops_create(net
, "ip6_mr_cache", 0, &ip6mr_mfc_fops
))
1299 goto proc_cache_fail
;
1304 #ifdef CONFIG_PROC_FS
1306 proc_net_remove(net
, "ip6_mr_vif");
1308 ip6mr_rules_exit(net
);
1314 static void __net_exit
ip6mr_net_exit(struct net
*net
)
1316 #ifdef CONFIG_PROC_FS
1317 proc_net_remove(net
, "ip6_mr_cache");
1318 proc_net_remove(net
, "ip6_mr_vif");
1320 ip6mr_rules_exit(net
);
1323 static struct pernet_operations ip6mr_net_ops
= {
1324 .init
= ip6mr_net_init
,
1325 .exit
= ip6mr_net_exit
,
1328 int __init
ip6_mr_init(void)
1332 mrt_cachep
= kmem_cache_create("ip6_mrt_cache",
1333 sizeof(struct mfc6_cache
),
1334 0, SLAB_HWCACHE_ALIGN
,
1339 err
= register_pernet_subsys(&ip6mr_net_ops
);
1341 goto reg_pernet_fail
;
1343 err
= register_netdevice_notifier(&ip6_mr_notifier
);
1345 goto reg_notif_fail
;
1346 #ifdef CONFIG_IPV6_PIMSM_V2
1347 if (inet6_add_protocol(&pim6_protocol
, IPPROTO_PIM
) < 0) {
1348 printk(KERN_ERR
"ip6_mr_init: can't add PIM protocol\n");
1350 goto add_proto_fail
;
1353 rtnl_register(RTNL_FAMILY_IP6MR
, RTM_GETROUTE
, NULL
, ip6mr_rtm_dumproute
);
1355 #ifdef CONFIG_IPV6_PIMSM_V2
1357 unregister_netdevice_notifier(&ip6_mr_notifier
);
1360 unregister_pernet_subsys(&ip6mr_net_ops
);
1362 kmem_cache_destroy(mrt_cachep
);
1366 void ip6_mr_cleanup(void)
1368 unregister_netdevice_notifier(&ip6_mr_notifier
);
1369 unregister_pernet_subsys(&ip6mr_net_ops
);
1370 kmem_cache_destroy(mrt_cachep
);
1373 static int ip6mr_mfc_add(struct net
*net
, struct mr6_table
*mrt
,
1374 struct mf6cctl
*mfc
, int mrtsock
)
1378 struct mfc6_cache
*uc
, *c
;
1379 unsigned char ttls
[MAXMIFS
];
1382 if (mfc
->mf6cc_parent
>= MAXMIFS
)
1385 memset(ttls
, 255, MAXMIFS
);
1386 for (i
= 0; i
< MAXMIFS
; i
++) {
1387 if (IF_ISSET(i
, &mfc
->mf6cc_ifset
))
1392 line
= MFC6_HASH(&mfc
->mf6cc_mcastgrp
.sin6_addr
, &mfc
->mf6cc_origin
.sin6_addr
);
1394 list_for_each_entry(c
, &mrt
->mfc6_cache_array
[line
], list
) {
1395 if (ipv6_addr_equal(&c
->mf6c_origin
, &mfc
->mf6cc_origin
.sin6_addr
) &&
1396 ipv6_addr_equal(&c
->mf6c_mcastgrp
, &mfc
->mf6cc_mcastgrp
.sin6_addr
)) {
1403 write_lock_bh(&mrt_lock
);
1404 c
->mf6c_parent
= mfc
->mf6cc_parent
;
1405 ip6mr_update_thresholds(mrt
, c
, ttls
);
1407 c
->mfc_flags
|= MFC_STATIC
;
1408 write_unlock_bh(&mrt_lock
);
1412 if (!ipv6_addr_is_multicast(&mfc
->mf6cc_mcastgrp
.sin6_addr
))
1415 c
= ip6mr_cache_alloc();
1419 c
->mf6c_origin
= mfc
->mf6cc_origin
.sin6_addr
;
1420 c
->mf6c_mcastgrp
= mfc
->mf6cc_mcastgrp
.sin6_addr
;
1421 c
->mf6c_parent
= mfc
->mf6cc_parent
;
1422 ip6mr_update_thresholds(mrt
, c
, ttls
);
1424 c
->mfc_flags
|= MFC_STATIC
;
1426 write_lock_bh(&mrt_lock
);
1427 list_add(&c
->list
, &mrt
->mfc6_cache_array
[line
]);
1428 write_unlock_bh(&mrt_lock
);
1431 * Check to see if we resolved a queued list. If so we
1432 * need to send on the frames and tidy up.
1435 spin_lock_bh(&mfc_unres_lock
);
1436 list_for_each_entry(uc
, &mrt
->mfc6_unres_queue
, list
) {
1437 if (ipv6_addr_equal(&uc
->mf6c_origin
, &c
->mf6c_origin
) &&
1438 ipv6_addr_equal(&uc
->mf6c_mcastgrp
, &c
->mf6c_mcastgrp
)) {
1439 list_del(&uc
->list
);
1440 atomic_dec(&mrt
->cache_resolve_queue_len
);
1445 if (list_empty(&mrt
->mfc6_unres_queue
))
1446 del_timer(&mrt
->ipmr_expire_timer
);
1447 spin_unlock_bh(&mfc_unres_lock
);
1450 ip6mr_cache_resolve(net
, mrt
, uc
, c
);
1451 ip6mr_cache_free(uc
);
1457 * Close the multicast socket, and clear the vif tables etc
1460 static void mroute_clean_tables(struct mr6_table
*mrt
)
1464 struct mfc6_cache
*c
, *next
;
1467 * Shut down all active vif entries
1469 for (i
= 0; i
< mrt
->maxvif
; i
++) {
1470 if (!(mrt
->vif6_table
[i
].flags
& VIFF_STATIC
))
1471 mif6_delete(mrt
, i
, &list
);
1473 unregister_netdevice_many(&list
);
1478 for (i
= 0; i
< MFC6_LINES
; i
++) {
1479 list_for_each_entry_safe(c
, next
, &mrt
->mfc6_cache_array
[i
], list
) {
1480 if (c
->mfc_flags
& MFC_STATIC
)
1482 write_lock_bh(&mrt_lock
);
1484 write_unlock_bh(&mrt_lock
);
1486 ip6mr_cache_free(c
);
1490 if (atomic_read(&mrt
->cache_resolve_queue_len
) != 0) {
1491 spin_lock_bh(&mfc_unres_lock
);
1492 list_for_each_entry_safe(c
, next
, &mrt
->mfc6_unres_queue
, list
) {
1494 ip6mr_destroy_unres(mrt
, c
);
1496 spin_unlock_bh(&mfc_unres_lock
);
1500 static int ip6mr_sk_init(struct mr6_table
*mrt
, struct sock
*sk
)
1503 struct net
*net
= sock_net(sk
);
1506 write_lock_bh(&mrt_lock
);
1507 if (likely(mrt
->mroute6_sk
== NULL
)) {
1508 mrt
->mroute6_sk
= sk
;
1509 net
->ipv6
.devconf_all
->mc_forwarding
++;
1513 write_unlock_bh(&mrt_lock
);
1520 int ip6mr_sk_done(struct sock
*sk
)
1523 struct net
*net
= sock_net(sk
);
1524 struct mr6_table
*mrt
;
1527 ip6mr_for_each_table(mrt
, net
) {
1528 if (sk
== mrt
->mroute6_sk
) {
1529 write_lock_bh(&mrt_lock
);
1530 mrt
->mroute6_sk
= NULL
;
1531 net
->ipv6
.devconf_all
->mc_forwarding
--;
1532 write_unlock_bh(&mrt_lock
);
1534 mroute_clean_tables(mrt
);
1544 struct sock
*mroute6_socket(struct net
*net
, struct sk_buff
*skb
)
1546 struct mr6_table
*mrt
;
1548 .iif
= skb
->skb_iif
,
1549 .oif
= skb
->dev
->ifindex
,
1553 if (ip6mr_fib_lookup(net
, &fl
, &mrt
) < 0)
1556 return mrt
->mroute6_sk
;
1560 * Socket options and virtual interface manipulation. The whole
1561 * virtual interface system is a complete heap, but unfortunately
1562 * that's how BSD mrouted happens to think. Maybe one day with a proper
1563 * MOSPF/PIM router set up we can clean this up.
1566 int ip6_mroute_setsockopt(struct sock
*sk
, int optname
, char __user
*optval
, unsigned int optlen
)
1572 struct net
*net
= sock_net(sk
);
1573 struct mr6_table
*mrt
;
1575 mrt
= ip6mr_get_table(net
, raw6_sk(sk
)->ip6mr_table
? : RT6_TABLE_DFLT
);
1579 if (optname
!= MRT6_INIT
) {
1580 if (sk
!= mrt
->mroute6_sk
&& !capable(CAP_NET_ADMIN
))
1586 if (sk
->sk_type
!= SOCK_RAW
||
1587 inet_sk(sk
)->inet_num
!= IPPROTO_ICMPV6
)
1589 if (optlen
< sizeof(int))
1592 return ip6mr_sk_init(mrt
, sk
);
1595 return ip6mr_sk_done(sk
);
1598 if (optlen
< sizeof(vif
))
1600 if (copy_from_user(&vif
, optval
, sizeof(vif
)))
1602 if (vif
.mif6c_mifi
>= MAXMIFS
)
1605 ret
= mif6_add(net
, mrt
, &vif
, sk
== mrt
->mroute6_sk
);
1610 if (optlen
< sizeof(mifi_t
))
1612 if (copy_from_user(&mifi
, optval
, sizeof(mifi_t
)))
1615 ret
= mif6_delete(mrt
, mifi
, NULL
);
1620 * Manipulate the forwarding caches. These live
1621 * in a sort of kernel/user symbiosis.
1625 if (optlen
< sizeof(mfc
))
1627 if (copy_from_user(&mfc
, optval
, sizeof(mfc
)))
1630 if (optname
== MRT6_DEL_MFC
)
1631 ret
= ip6mr_mfc_delete(mrt
, &mfc
);
1633 ret
= ip6mr_mfc_add(net
, mrt
, &mfc
, sk
== mrt
->mroute6_sk
);
1638 * Control PIM assert (to activate pim will activate assert)
1643 if (get_user(v
, (int __user
*)optval
))
1645 mrt
->mroute_do_assert
= !!v
;
1649 #ifdef CONFIG_IPV6_PIMSM_V2
1653 if (get_user(v
, (int __user
*)optval
))
1658 if (v
!= mrt
->mroute_do_pim
) {
1659 mrt
->mroute_do_pim
= v
;
1660 mrt
->mroute_do_assert
= v
;
1667 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1672 if (optlen
!= sizeof(u32
))
1674 if (get_user(v
, (u32 __user
*)optval
))
1676 if (sk
== mrt
->mroute6_sk
)
1681 if (!ip6mr_new_table(net
, v
))
1683 raw6_sk(sk
)->ip6mr_table
= v
;
1689 * Spurious command, or MRT6_VERSION which you cannot
1693 return -ENOPROTOOPT
;
1698 * Getsock opt support for the multicast routing system.
1701 int ip6_mroute_getsockopt(struct sock
*sk
, int optname
, char __user
*optval
,
1706 struct net
*net
= sock_net(sk
);
1707 struct mr6_table
*mrt
;
1709 mrt
= ip6mr_get_table(net
, raw6_sk(sk
)->ip6mr_table
? : RT6_TABLE_DFLT
);
1717 #ifdef CONFIG_IPV6_PIMSM_V2
1719 val
= mrt
->mroute_do_pim
;
1723 val
= mrt
->mroute_do_assert
;
1726 return -ENOPROTOOPT
;
1729 if (get_user(olr
, optlen
))
1732 olr
= min_t(int, olr
, sizeof(int));
1736 if (put_user(olr
, optlen
))
1738 if (copy_to_user(optval
, &val
, olr
))
1744 * The IP multicast ioctl support routines.
1747 int ip6mr_ioctl(struct sock
*sk
, int cmd
, void __user
*arg
)
1749 struct sioc_sg_req6 sr
;
1750 struct sioc_mif_req6 vr
;
1751 struct mif_device
*vif
;
1752 struct mfc6_cache
*c
;
1753 struct net
*net
= sock_net(sk
);
1754 struct mr6_table
*mrt
;
1756 mrt
= ip6mr_get_table(net
, raw6_sk(sk
)->ip6mr_table
? : RT6_TABLE_DFLT
);
1761 case SIOCGETMIFCNT_IN6
:
1762 if (copy_from_user(&vr
, arg
, sizeof(vr
)))
1764 if (vr
.mifi
>= mrt
->maxvif
)
1766 read_lock(&mrt_lock
);
1767 vif
= &mrt
->vif6_table
[vr
.mifi
];
1768 if (MIF_EXISTS(mrt
, vr
.mifi
)) {
1769 vr
.icount
= vif
->pkt_in
;
1770 vr
.ocount
= vif
->pkt_out
;
1771 vr
.ibytes
= vif
->bytes_in
;
1772 vr
.obytes
= vif
->bytes_out
;
1773 read_unlock(&mrt_lock
);
1775 if (copy_to_user(arg
, &vr
, sizeof(vr
)))
1779 read_unlock(&mrt_lock
);
1780 return -EADDRNOTAVAIL
;
1781 case SIOCGETSGCNT_IN6
:
1782 if (copy_from_user(&sr
, arg
, sizeof(sr
)))
1785 read_lock(&mrt_lock
);
1786 c
= ip6mr_cache_find(mrt
, &sr
.src
.sin6_addr
, &sr
.grp
.sin6_addr
);
1788 sr
.pktcnt
= c
->mfc_un
.res
.pkt
;
1789 sr
.bytecnt
= c
->mfc_un
.res
.bytes
;
1790 sr
.wrong_if
= c
->mfc_un
.res
.wrong_if
;
1791 read_unlock(&mrt_lock
);
1793 if (copy_to_user(arg
, &sr
, sizeof(sr
)))
1797 read_unlock(&mrt_lock
);
1798 return -EADDRNOTAVAIL
;
1800 return -ENOIOCTLCMD
;
1805 static inline int ip6mr_forward2_finish(struct sk_buff
*skb
)
1807 IP6_INC_STATS_BH(dev_net(skb_dst(skb
)->dev
), ip6_dst_idev(skb_dst(skb
)),
1808 IPSTATS_MIB_OUTFORWDATAGRAMS
);
1809 return dst_output(skb
);
1813 * Processing handlers for ip6mr_forward
1816 static int ip6mr_forward2(struct net
*net
, struct mr6_table
*mrt
,
1817 struct sk_buff
*skb
, struct mfc6_cache
*c
, int vifi
)
1819 struct ipv6hdr
*ipv6h
;
1820 struct mif_device
*vif
= &mrt
->vif6_table
[vifi
];
1821 struct net_device
*dev
;
1822 struct dst_entry
*dst
;
1825 if (vif
->dev
== NULL
)
1828 #ifdef CONFIG_IPV6_PIMSM_V2
1829 if (vif
->flags
& MIFF_REGISTER
) {
1831 vif
->bytes_out
+= skb
->len
;
1832 vif
->dev
->stats
.tx_bytes
+= skb
->len
;
1833 vif
->dev
->stats
.tx_packets
++;
1834 ip6mr_cache_report(mrt
, skb
, vifi
, MRT6MSG_WHOLEPKT
);
1839 ipv6h
= ipv6_hdr(skb
);
1841 fl
= (struct flowi
) {
1844 { .daddr
= ipv6h
->daddr
, }
1848 dst
= ip6_route_output(net
, NULL
, &fl
);
1853 skb_dst_set(skb
, dst
);
1856 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1857 * not only before forwarding, but after forwarding on all output
1858 * interfaces. It is clear, if mrouter runs a multicasting
1859 * program, it should receive packets not depending to what interface
1860 * program is joined.
1861 * If we will not make it, the program will have to join on all
1862 * interfaces. On the other hand, multihoming host (or router, but
1863 * not mrouter) cannot join to more than one interface - it will
1864 * result in receiving multiple packets.
1869 vif
->bytes_out
+= skb
->len
;
1871 /* We are about to write */
1872 /* XXX: extension headers? */
1873 if (skb_cow(skb
, sizeof(*ipv6h
) + LL_RESERVED_SPACE(dev
)))
1876 ipv6h
= ipv6_hdr(skb
);
1879 IP6CB(skb
)->flags
|= IP6SKB_FORWARDED
;
1881 return NF_HOOK(NFPROTO_IPV6
, NF_INET_FORWARD
, skb
, skb
->dev
, dev
,
1882 ip6mr_forward2_finish
);
1889 static int ip6mr_find_vif(struct mr6_table
*mrt
, struct net_device
*dev
)
1893 for (ct
= mrt
->maxvif
- 1; ct
>= 0; ct
--) {
1894 if (mrt
->vif6_table
[ct
].dev
== dev
)
1900 static int ip6_mr_forward(struct net
*net
, struct mr6_table
*mrt
,
1901 struct sk_buff
*skb
, struct mfc6_cache
*cache
)
1906 vif
= cache
->mf6c_parent
;
1907 cache
->mfc_un
.res
.pkt
++;
1908 cache
->mfc_un
.res
.bytes
+= skb
->len
;
1911 * Wrong interface: drop packet and (maybe) send PIM assert.
1913 if (mrt
->vif6_table
[vif
].dev
!= skb
->dev
) {
1916 cache
->mfc_un
.res
.wrong_if
++;
1917 true_vifi
= ip6mr_find_vif(mrt
, skb
->dev
);
1919 if (true_vifi
>= 0 && mrt
->mroute_do_assert
&&
1920 /* pimsm uses asserts, when switching from RPT to SPT,
1921 so that we cannot check that packet arrived on an oif.
1922 It is bad, but otherwise we would need to move pretty
1923 large chunk of pimd to kernel. Ough... --ANK
1925 (mrt
->mroute_do_pim
||
1926 cache
->mfc_un
.res
.ttls
[true_vifi
] < 255) &&
1928 cache
->mfc_un
.res
.last_assert
+ MFC_ASSERT_THRESH
)) {
1929 cache
->mfc_un
.res
.last_assert
= jiffies
;
1930 ip6mr_cache_report(mrt
, skb
, true_vifi
, MRT6MSG_WRONGMIF
);
1935 mrt
->vif6_table
[vif
].pkt_in
++;
1936 mrt
->vif6_table
[vif
].bytes_in
+= skb
->len
;
1941 for (ct
= cache
->mfc_un
.res
.maxvif
- 1; ct
>= cache
->mfc_un
.res
.minvif
; ct
--) {
1942 if (ipv6_hdr(skb
)->hop_limit
> cache
->mfc_un
.res
.ttls
[ct
]) {
1944 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
1946 ip6mr_forward2(net
, mrt
, skb2
, cache
, psend
);
1952 ip6mr_forward2(net
, mrt
, skb
, cache
, psend
);
1963 * Multicast packets for forwarding arrive here
1966 int ip6_mr_input(struct sk_buff
*skb
)
1968 struct mfc6_cache
*cache
;
1969 struct net
*net
= dev_net(skb
->dev
);
1970 struct mr6_table
*mrt
;
1972 .iif
= skb
->dev
->ifindex
,
1977 err
= ip6mr_fib_lookup(net
, &fl
, &mrt
);
1981 read_lock(&mrt_lock
);
1982 cache
= ip6mr_cache_find(mrt
,
1983 &ipv6_hdr(skb
)->saddr
, &ipv6_hdr(skb
)->daddr
);
1986 * No usable cache entry
1988 if (cache
== NULL
) {
1991 vif
= ip6mr_find_vif(mrt
, skb
->dev
);
1993 int err
= ip6mr_cache_unresolved(mrt
, vif
, skb
);
1994 read_unlock(&mrt_lock
);
1998 read_unlock(&mrt_lock
);
2003 ip6_mr_forward(net
, mrt
, skb
, cache
);
2005 read_unlock(&mrt_lock
);
2011 static int __ip6mr_fill_mroute(struct mr6_table
*mrt
, struct sk_buff
*skb
,
2012 struct mfc6_cache
*c
, struct rtmsg
*rtm
)
2015 struct rtnexthop
*nhp
;
2016 u8
*b
= skb_tail_pointer(skb
);
2017 struct rtattr
*mp_head
;
2019 /* If cache is unresolved, don't try to parse IIF and OIF */
2020 if (c
->mf6c_parent
>= MAXMIFS
)
2023 if (MIF_EXISTS(mrt
, c
->mf6c_parent
))
2024 RTA_PUT(skb
, RTA_IIF
, 4, &mrt
->vif6_table
[c
->mf6c_parent
].dev
->ifindex
);
2026 mp_head
= (struct rtattr
*)skb_put(skb
, RTA_LENGTH(0));
2028 for (ct
= c
->mfc_un
.res
.minvif
; ct
< c
->mfc_un
.res
.maxvif
; ct
++) {
2029 if (MIF_EXISTS(mrt
, ct
) && c
->mfc_un
.res
.ttls
[ct
] < 255) {
2030 if (skb_tailroom(skb
) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp
)) + 4))
2031 goto rtattr_failure
;
2032 nhp
= (struct rtnexthop
*)skb_put(skb
, RTA_ALIGN(sizeof(*nhp
)));
2033 nhp
->rtnh_flags
= 0;
2034 nhp
->rtnh_hops
= c
->mfc_un
.res
.ttls
[ct
];
2035 nhp
->rtnh_ifindex
= mrt
->vif6_table
[ct
].dev
->ifindex
;
2036 nhp
->rtnh_len
= sizeof(*nhp
);
2039 mp_head
->rta_type
= RTA_MULTIPATH
;
2040 mp_head
->rta_len
= skb_tail_pointer(skb
) - (u8
*)mp_head
;
2041 rtm
->rtm_type
= RTN_MULTICAST
;
2049 int ip6mr_get_route(struct net
*net
,
2050 struct sk_buff
*skb
, struct rtmsg
*rtm
, int nowait
)
2053 struct mr6_table
*mrt
;
2054 struct mfc6_cache
*cache
;
2055 struct rt6_info
*rt
= (struct rt6_info
*)skb_dst(skb
);
2057 mrt
= ip6mr_get_table(net
, RT6_TABLE_DFLT
);
2061 read_lock(&mrt_lock
);
2062 cache
= ip6mr_cache_find(mrt
, &rt
->rt6i_src
.addr
, &rt
->rt6i_dst
.addr
);
2065 struct sk_buff
*skb2
;
2066 struct ipv6hdr
*iph
;
2067 struct net_device
*dev
;
2071 read_unlock(&mrt_lock
);
2076 if (dev
== NULL
|| (vif
= ip6mr_find_vif(mrt
, dev
)) < 0) {
2077 read_unlock(&mrt_lock
);
2081 /* really correct? */
2082 skb2
= alloc_skb(sizeof(struct ipv6hdr
), GFP_ATOMIC
);
2084 read_unlock(&mrt_lock
);
2088 skb_reset_transport_header(skb2
);
2090 skb_put(skb2
, sizeof(struct ipv6hdr
));
2091 skb_reset_network_header(skb2
);
2093 iph
= ipv6_hdr(skb2
);
2096 iph
->flow_lbl
[0] = 0;
2097 iph
->flow_lbl
[1] = 0;
2098 iph
->flow_lbl
[2] = 0;
2099 iph
->payload_len
= 0;
2100 iph
->nexthdr
= IPPROTO_NONE
;
2102 ipv6_addr_copy(&iph
->saddr
, &rt
->rt6i_src
.addr
);
2103 ipv6_addr_copy(&iph
->daddr
, &rt
->rt6i_dst
.addr
);
2105 err
= ip6mr_cache_unresolved(mrt
, vif
, skb2
);
2106 read_unlock(&mrt_lock
);
2111 if (!nowait
&& (rtm
->rtm_flags
&RTM_F_NOTIFY
))
2112 cache
->mfc_flags
|= MFC_NOTIFY
;
2114 err
= __ip6mr_fill_mroute(mrt
, skb
, cache
, rtm
);
2115 read_unlock(&mrt_lock
);
2119 static int ip6mr_fill_mroute(struct mr6_table
*mrt
, struct sk_buff
*skb
,
2120 u32 pid
, u32 seq
, struct mfc6_cache
*c
)
2122 struct nlmsghdr
*nlh
;
2125 nlh
= nlmsg_put(skb
, pid
, seq
, RTM_NEWROUTE
, sizeof(*rtm
), NLM_F_MULTI
);
2129 rtm
= nlmsg_data(nlh
);
2130 rtm
->rtm_family
= RTNL_FAMILY_IPMR
;
2131 rtm
->rtm_dst_len
= 128;
2132 rtm
->rtm_src_len
= 128;
2134 rtm
->rtm_table
= mrt
->id
;
2135 NLA_PUT_U32(skb
, RTA_TABLE
, mrt
->id
);
2136 rtm
->rtm_scope
= RT_SCOPE_UNIVERSE
;
2137 rtm
->rtm_protocol
= RTPROT_UNSPEC
;
2140 NLA_PUT(skb
, RTA_SRC
, 16, &c
->mf6c_origin
);
2141 NLA_PUT(skb
, RTA_DST
, 16, &c
->mf6c_mcastgrp
);
2143 if (__ip6mr_fill_mroute(mrt
, skb
, c
, rtm
) < 0)
2144 goto nla_put_failure
;
2146 return nlmsg_end(skb
, nlh
);
2149 nlmsg_cancel(skb
, nlh
);
2153 static int ip6mr_rtm_dumproute(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2155 struct net
*net
= sock_net(skb
->sk
);
2156 struct mr6_table
*mrt
;
2157 struct mfc6_cache
*mfc
;
2158 unsigned int t
= 0, s_t
;
2159 unsigned int h
= 0, s_h
;
2160 unsigned int e
= 0, s_e
;
2166 read_lock(&mrt_lock
);
2167 ip6mr_for_each_table(mrt
, net
) {
2172 for (h
= s_h
; h
< MFC6_LINES
; h
++) {
2173 list_for_each_entry(mfc
, &mrt
->mfc6_cache_array
[h
], list
) {
2176 if (ip6mr_fill_mroute(mrt
, skb
,
2177 NETLINK_CB(cb
->skb
).pid
,
2191 read_unlock(&mrt_lock
);