2 * IP multicast routing support for mrouted 3.6/3.8
4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 * Linux Consultancy and Custom Driver Development
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requirement to work with older peers.
29 #include <asm/system.h>
30 #include <asm/uaccess.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/timer.h>
36 #include <linux/kernel.h>
37 #include <linux/fcntl.h>
38 #include <linux/stat.h>
39 #include <linux/socket.h>
41 #include <linux/inet.h>
42 #include <linux/netdevice.h>
43 #include <linux/inetdevice.h>
44 #include <linux/igmp.h>
45 #include <linux/proc_fs.h>
46 #include <linux/seq_file.h>
47 #include <linux/mroute.h>
48 #include <linux/init.h>
49 #include <linux/if_ether.h>
50 #include <linux/slab.h>
51 #include <net/net_namespace.h>
53 #include <net/protocol.h>
54 #include <linux/skbuff.h>
55 #include <net/route.h>
60 #include <linux/notifier.h>
61 #include <linux/if_arp.h>
62 #include <linux/netfilter_ipv4.h>
64 #include <net/checksum.h>
65 #include <net/netlink.h>
66 #include <net/fib_rules.h>
68 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
69 #define CONFIG_IP_PIMSM 1
73 struct list_head list
;
78 struct sock
*mroute_sk
;
79 struct timer_list ipmr_expire_timer
;
80 struct list_head mfc_unres_queue
;
81 struct list_head mfc_cache_array
[MFC_LINES
];
82 struct vif_device vif_table
[MAXVIFS
];
84 atomic_t cache_resolve_queue_len
;
87 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
88 int mroute_reg_vif_num
;
93 struct fib_rule common
;
100 /* Big lock, protecting vif table, mrt cache and mroute socket state.
101 Note that the changes are semaphored via rtnl_lock.
104 static DEFINE_RWLOCK(mrt_lock
);
107 * Multicast router control variables
110 #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
112 /* Special spinlock for queue of unresolved entries */
113 static DEFINE_SPINLOCK(mfc_unres_lock
);
115 /* We return to original Alan's scheme. Hash table of resolved
116 entries is changed only in process context and protected
117 with weak lock mrt_lock. Queue of unresolved entries is protected
118 with strong spinlock mfc_unres_lock.
120 In this case data path is free of exclusive locks at all.
123 static struct kmem_cache
*mrt_cachep __read_mostly
;
125 static struct mr_table
*ipmr_new_table(struct net
*net
, u32 id
);
126 static int ip_mr_forward(struct net
*net
, struct mr_table
*mrt
,
127 struct sk_buff
*skb
, struct mfc_cache
*cache
,
129 static int ipmr_cache_report(struct mr_table
*mrt
,
130 struct sk_buff
*pkt
, vifi_t vifi
, int assert);
131 static int __ipmr_fill_mroute(struct mr_table
*mrt
, struct sk_buff
*skb
,
132 struct mfc_cache
*c
, struct rtmsg
*rtm
);
133 static void ipmr_expire_process(unsigned long arg
);
135 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
136 #define ipmr_for_each_table(mrt, net) \
137 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
139 static struct mr_table
*ipmr_get_table(struct net
*net
, u32 id
)
141 struct mr_table
*mrt
;
143 ipmr_for_each_table(mrt
, net
) {
150 static int ipmr_fib_lookup(struct net
*net
, struct flowi
*flp
,
151 struct mr_table
**mrt
)
153 struct ipmr_result res
;
154 struct fib_lookup_arg arg
= { .result
= &res
, };
157 err
= fib_rules_lookup(net
->ipv4
.mr_rules_ops
, flp
, 0, &arg
);
164 static int ipmr_rule_action(struct fib_rule
*rule
, struct flowi
*flp
,
165 int flags
, struct fib_lookup_arg
*arg
)
167 struct ipmr_result
*res
= arg
->result
;
168 struct mr_table
*mrt
;
170 switch (rule
->action
) {
173 case FR_ACT_UNREACHABLE
:
175 case FR_ACT_PROHIBIT
:
177 case FR_ACT_BLACKHOLE
:
182 mrt
= ipmr_get_table(rule
->fr_net
, rule
->table
);
189 static int ipmr_rule_match(struct fib_rule
*rule
, struct flowi
*fl
, int flags
)
194 static const struct nla_policy ipmr_rule_policy
[FRA_MAX
+ 1] = {
198 static int ipmr_rule_configure(struct fib_rule
*rule
, struct sk_buff
*skb
,
199 struct fib_rule_hdr
*frh
, struct nlattr
**tb
)
204 static int ipmr_rule_compare(struct fib_rule
*rule
, struct fib_rule_hdr
*frh
,
210 static int ipmr_rule_fill(struct fib_rule
*rule
, struct sk_buff
*skb
,
211 struct fib_rule_hdr
*frh
)
219 static const struct fib_rules_ops __net_initdata ipmr_rules_ops_template
= {
220 .family
= RTNL_FAMILY_IPMR
,
221 .rule_size
= sizeof(struct ipmr_rule
),
222 .addr_size
= sizeof(u32
),
223 .action
= ipmr_rule_action
,
224 .match
= ipmr_rule_match
,
225 .configure
= ipmr_rule_configure
,
226 .compare
= ipmr_rule_compare
,
227 .default_pref
= fib_default_rule_pref
,
228 .fill
= ipmr_rule_fill
,
229 .nlgroup
= RTNLGRP_IPV4_RULE
,
230 .policy
= ipmr_rule_policy
,
231 .owner
= THIS_MODULE
,
234 static int __net_init
ipmr_rules_init(struct net
*net
)
236 struct fib_rules_ops
*ops
;
237 struct mr_table
*mrt
;
240 ops
= fib_rules_register(&ipmr_rules_ops_template
, net
);
244 INIT_LIST_HEAD(&net
->ipv4
.mr_tables
);
246 mrt
= ipmr_new_table(net
, RT_TABLE_DEFAULT
);
252 err
= fib_default_rule_add(ops
, 0x7fff, RT_TABLE_DEFAULT
, 0);
256 net
->ipv4
.mr_rules_ops
= ops
;
262 fib_rules_unregister(ops
);
266 static void __net_exit
ipmr_rules_exit(struct net
*net
)
268 struct mr_table
*mrt
, *next
;
270 list_for_each_entry_safe(mrt
, next
, &net
->ipv4
.mr_tables
, list
)
272 fib_rules_unregister(net
->ipv4
.mr_rules_ops
);
275 #define ipmr_for_each_table(mrt, net) \
276 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
278 static struct mr_table
*ipmr_get_table(struct net
*net
, u32 id
)
280 return net
->ipv4
.mrt
;
283 static int ipmr_fib_lookup(struct net
*net
, struct flowi
*flp
,
284 struct mr_table
**mrt
)
286 *mrt
= net
->ipv4
.mrt
;
290 static int __net_init
ipmr_rules_init(struct net
*net
)
292 net
->ipv4
.mrt
= ipmr_new_table(net
, RT_TABLE_DEFAULT
);
293 return net
->ipv4
.mrt
? 0 : -ENOMEM
;
296 static void __net_exit
ipmr_rules_exit(struct net
*net
)
298 kfree(net
->ipv4
.mrt
);
302 static struct mr_table
*ipmr_new_table(struct net
*net
, u32 id
)
304 struct mr_table
*mrt
;
307 mrt
= ipmr_get_table(net
, id
);
311 mrt
= kzalloc(sizeof(*mrt
), GFP_KERNEL
);
314 write_pnet(&mrt
->net
, net
);
317 /* Forwarding cache */
318 for (i
= 0; i
< MFC_LINES
; i
++)
319 INIT_LIST_HEAD(&mrt
->mfc_cache_array
[i
]);
321 INIT_LIST_HEAD(&mrt
->mfc_unres_queue
);
323 setup_timer(&mrt
->ipmr_expire_timer
, ipmr_expire_process
,
326 #ifdef CONFIG_IP_PIMSM
327 mrt
->mroute_reg_vif_num
= -1;
329 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
330 list_add_tail_rcu(&mrt
->list
, &net
->ipv4
.mr_tables
);
335 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
337 static void ipmr_del_tunnel(struct net_device
*dev
, struct vifctl
*v
)
339 struct net
*net
= dev_net(dev
);
343 dev
= __dev_get_by_name(net
, "tunl0");
345 const struct net_device_ops
*ops
= dev
->netdev_ops
;
347 struct ip_tunnel_parm p
;
349 memset(&p
, 0, sizeof(p
));
350 p
.iph
.daddr
= v
->vifc_rmt_addr
.s_addr
;
351 p
.iph
.saddr
= v
->vifc_lcl_addr
.s_addr
;
354 p
.iph
.protocol
= IPPROTO_IPIP
;
355 sprintf(p
.name
, "dvmrp%d", v
->vifc_vifi
);
356 ifr
.ifr_ifru
.ifru_data
= (__force
void __user
*)&p
;
358 if (ops
->ndo_do_ioctl
) {
359 mm_segment_t oldfs
= get_fs();
362 ops
->ndo_do_ioctl(dev
, &ifr
, SIOCDELTUNNEL
);
369 struct net_device
*ipmr_new_tunnel(struct net
*net
, struct vifctl
*v
)
371 struct net_device
*dev
;
373 dev
= __dev_get_by_name(net
, "tunl0");
376 const struct net_device_ops
*ops
= dev
->netdev_ops
;
379 struct ip_tunnel_parm p
;
380 struct in_device
*in_dev
;
382 memset(&p
, 0, sizeof(p
));
383 p
.iph
.daddr
= v
->vifc_rmt_addr
.s_addr
;
384 p
.iph
.saddr
= v
->vifc_lcl_addr
.s_addr
;
387 p
.iph
.protocol
= IPPROTO_IPIP
;
388 sprintf(p
.name
, "dvmrp%d", v
->vifc_vifi
);
389 ifr
.ifr_ifru
.ifru_data
= (__force
void __user
*)&p
;
391 if (ops
->ndo_do_ioctl
) {
392 mm_segment_t oldfs
= get_fs();
395 err
= ops
->ndo_do_ioctl(dev
, &ifr
, SIOCADDTUNNEL
);
403 (dev
= __dev_get_by_name(net
, p
.name
)) != NULL
) {
404 dev
->flags
|= IFF_MULTICAST
;
406 in_dev
= __in_dev_get_rtnl(dev
);
410 ipv4_devconf_setall(in_dev
);
411 IPV4_DEVCONF(in_dev
->cnf
, RP_FILTER
) = 0;
421 /* allow the register to be completed before unregistering. */
425 unregister_netdevice(dev
);
429 #ifdef CONFIG_IP_PIMSM
431 static netdev_tx_t
reg_vif_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
433 struct net
*net
= dev_net(dev
);
434 struct mr_table
*mrt
;
442 err
= ipmr_fib_lookup(net
, &fl
, &mrt
);
446 read_lock(&mrt_lock
);
447 dev
->stats
.tx_bytes
+= skb
->len
;
448 dev
->stats
.tx_packets
++;
449 ipmr_cache_report(mrt
, skb
, mrt
->mroute_reg_vif_num
, IGMPMSG_WHOLEPKT
);
450 read_unlock(&mrt_lock
);
455 static const struct net_device_ops reg_vif_netdev_ops
= {
456 .ndo_start_xmit
= reg_vif_xmit
,
459 static void reg_vif_setup(struct net_device
*dev
)
461 dev
->type
= ARPHRD_PIMREG
;
462 dev
->mtu
= ETH_DATA_LEN
- sizeof(struct iphdr
) - 8;
463 dev
->flags
= IFF_NOARP
;
464 dev
->netdev_ops
= ®_vif_netdev_ops
,
465 dev
->destructor
= free_netdev
;
466 dev
->features
|= NETIF_F_NETNS_LOCAL
;
469 static struct net_device
*ipmr_reg_vif(struct net
*net
, struct mr_table
*mrt
)
471 struct net_device
*dev
;
472 struct in_device
*in_dev
;
475 if (mrt
->id
== RT_TABLE_DEFAULT
)
476 sprintf(name
, "pimreg");
478 sprintf(name
, "pimreg%u", mrt
->id
);
480 dev
= alloc_netdev(0, name
, reg_vif_setup
);
485 dev_net_set(dev
, net
);
487 if (register_netdevice(dev
)) {
494 if ((in_dev
= __in_dev_get_rcu(dev
)) == NULL
) {
499 ipv4_devconf_setall(in_dev
);
500 IPV4_DEVCONF(in_dev
->cnf
, RP_FILTER
) = 0;
511 /* allow the register to be completed before unregistering. */
515 unregister_netdevice(dev
);
522 * @notify: Set to 1, if the caller is a notifier_call
525 static int vif_delete(struct mr_table
*mrt
, int vifi
, int notify
,
526 struct list_head
*head
)
528 struct vif_device
*v
;
529 struct net_device
*dev
;
530 struct in_device
*in_dev
;
532 if (vifi
< 0 || vifi
>= mrt
->maxvif
)
533 return -EADDRNOTAVAIL
;
535 v
= &mrt
->vif_table
[vifi
];
537 write_lock_bh(&mrt_lock
);
542 write_unlock_bh(&mrt_lock
);
543 return -EADDRNOTAVAIL
;
546 #ifdef CONFIG_IP_PIMSM
547 if (vifi
== mrt
->mroute_reg_vif_num
)
548 mrt
->mroute_reg_vif_num
= -1;
551 if (vifi
+1 == mrt
->maxvif
) {
553 for (tmp
=vifi
-1; tmp
>=0; tmp
--) {
554 if (VIF_EXISTS(mrt
, tmp
))
560 write_unlock_bh(&mrt_lock
);
562 dev_set_allmulti(dev
, -1);
564 if ((in_dev
= __in_dev_get_rtnl(dev
)) != NULL
) {
565 IPV4_DEVCONF(in_dev
->cnf
, MC_FORWARDING
)--;
566 ip_rt_multicast_event(in_dev
);
569 if (v
->flags
&(VIFF_TUNNEL
|VIFF_REGISTER
) && !notify
)
570 unregister_netdevice_queue(dev
, head
);
576 static inline void ipmr_cache_free(struct mfc_cache
*c
)
578 kmem_cache_free(mrt_cachep
, c
);
581 /* Destroy an unresolved cache entry, killing queued skbs
582 and reporting error to netlink readers.
585 static void ipmr_destroy_unres(struct mr_table
*mrt
, struct mfc_cache
*c
)
587 struct net
*net
= read_pnet(&mrt
->net
);
591 atomic_dec(&mrt
->cache_resolve_queue_len
);
593 while ((skb
= skb_dequeue(&c
->mfc_un
.unres
.unresolved
))) {
594 if (ip_hdr(skb
)->version
== 0) {
595 struct nlmsghdr
*nlh
= (struct nlmsghdr
*)skb_pull(skb
, sizeof(struct iphdr
));
596 nlh
->nlmsg_type
= NLMSG_ERROR
;
597 nlh
->nlmsg_len
= NLMSG_LENGTH(sizeof(struct nlmsgerr
));
598 skb_trim(skb
, nlh
->nlmsg_len
);
600 e
->error
= -ETIMEDOUT
;
601 memset(&e
->msg
, 0, sizeof(e
->msg
));
603 rtnl_unicast(skb
, net
, NETLINK_CB(skb
).pid
);
612 /* Timer process for the unresolved queue. */
614 static void ipmr_expire_process(unsigned long arg
)
616 struct mr_table
*mrt
= (struct mr_table
*)arg
;
618 unsigned long expires
;
619 struct mfc_cache
*c
, *next
;
621 if (!spin_trylock(&mfc_unres_lock
)) {
622 mod_timer(&mrt
->ipmr_expire_timer
, jiffies
+HZ
/10);
626 if (list_empty(&mrt
->mfc_unres_queue
))
632 list_for_each_entry_safe(c
, next
, &mrt
->mfc_unres_queue
, list
) {
633 if (time_after(c
->mfc_un
.unres
.expires
, now
)) {
634 unsigned long interval
= c
->mfc_un
.unres
.expires
- now
;
635 if (interval
< expires
)
641 ipmr_destroy_unres(mrt
, c
);
644 if (!list_empty(&mrt
->mfc_unres_queue
))
645 mod_timer(&mrt
->ipmr_expire_timer
, jiffies
+ expires
);
648 spin_unlock(&mfc_unres_lock
);
651 /* Fill oifs list. It is called under write locked mrt_lock. */
653 static void ipmr_update_thresholds(struct mr_table
*mrt
, struct mfc_cache
*cache
,
658 cache
->mfc_un
.res
.minvif
= MAXVIFS
;
659 cache
->mfc_un
.res
.maxvif
= 0;
660 memset(cache
->mfc_un
.res
.ttls
, 255, MAXVIFS
);
662 for (vifi
= 0; vifi
< mrt
->maxvif
; vifi
++) {
663 if (VIF_EXISTS(mrt
, vifi
) &&
664 ttls
[vifi
] && ttls
[vifi
] < 255) {
665 cache
->mfc_un
.res
.ttls
[vifi
] = ttls
[vifi
];
666 if (cache
->mfc_un
.res
.minvif
> vifi
)
667 cache
->mfc_un
.res
.minvif
= vifi
;
668 if (cache
->mfc_un
.res
.maxvif
<= vifi
)
669 cache
->mfc_un
.res
.maxvif
= vifi
+ 1;
674 static int vif_add(struct net
*net
, struct mr_table
*mrt
,
675 struct vifctl
*vifc
, int mrtsock
)
677 int vifi
= vifc
->vifc_vifi
;
678 struct vif_device
*v
= &mrt
->vif_table
[vifi
];
679 struct net_device
*dev
;
680 struct in_device
*in_dev
;
684 if (VIF_EXISTS(mrt
, vifi
))
687 switch (vifc
->vifc_flags
) {
688 #ifdef CONFIG_IP_PIMSM
691 * Special Purpose VIF in PIM
692 * All the packets will be sent to the daemon
694 if (mrt
->mroute_reg_vif_num
>= 0)
696 dev
= ipmr_reg_vif(net
, mrt
);
699 err
= dev_set_allmulti(dev
, 1);
701 unregister_netdevice(dev
);
708 dev
= ipmr_new_tunnel(net
, vifc
);
711 err
= dev_set_allmulti(dev
, 1);
713 ipmr_del_tunnel(dev
, vifc
);
719 case VIFF_USE_IFINDEX
:
721 if (vifc
->vifc_flags
== VIFF_USE_IFINDEX
) {
722 dev
= dev_get_by_index(net
, vifc
->vifc_lcl_ifindex
);
723 if (dev
&& dev
->ip_ptr
== NULL
) {
725 return -EADDRNOTAVAIL
;
728 dev
= ip_dev_find(net
, vifc
->vifc_lcl_addr
.s_addr
);
731 return -EADDRNOTAVAIL
;
732 err
= dev_set_allmulti(dev
, 1);
742 if ((in_dev
= __in_dev_get_rtnl(dev
)) == NULL
) {
744 return -EADDRNOTAVAIL
;
746 IPV4_DEVCONF(in_dev
->cnf
, MC_FORWARDING
)++;
747 ip_rt_multicast_event(in_dev
);
750 * Fill in the VIF structures
752 v
->rate_limit
= vifc
->vifc_rate_limit
;
753 v
->local
= vifc
->vifc_lcl_addr
.s_addr
;
754 v
->remote
= vifc
->vifc_rmt_addr
.s_addr
;
755 v
->flags
= vifc
->vifc_flags
;
757 v
->flags
|= VIFF_STATIC
;
758 v
->threshold
= vifc
->vifc_threshold
;
763 v
->link
= dev
->ifindex
;
764 if (v
->flags
&(VIFF_TUNNEL
|VIFF_REGISTER
))
765 v
->link
= dev
->iflink
;
767 /* And finish update writing critical data */
768 write_lock_bh(&mrt_lock
);
770 #ifdef CONFIG_IP_PIMSM
771 if (v
->flags
&VIFF_REGISTER
)
772 mrt
->mroute_reg_vif_num
= vifi
;
774 if (vifi
+1 > mrt
->maxvif
)
775 mrt
->maxvif
= vifi
+1;
776 write_unlock_bh(&mrt_lock
);
780 static struct mfc_cache
*ipmr_cache_find(struct mr_table
*mrt
,
784 int line
= MFC_HASH(mcastgrp
, origin
);
787 list_for_each_entry(c
, &mrt
->mfc_cache_array
[line
], list
) {
788 if (c
->mfc_origin
== origin
&& c
->mfc_mcastgrp
== mcastgrp
)
795 * Allocate a multicast cache entry
797 static struct mfc_cache
*ipmr_cache_alloc(void)
799 struct mfc_cache
*c
= kmem_cache_zalloc(mrt_cachep
, GFP_KERNEL
);
802 c
->mfc_un
.res
.minvif
= MAXVIFS
;
806 static struct mfc_cache
*ipmr_cache_alloc_unres(void)
808 struct mfc_cache
*c
= kmem_cache_zalloc(mrt_cachep
, GFP_ATOMIC
);
811 skb_queue_head_init(&c
->mfc_un
.unres
.unresolved
);
812 c
->mfc_un
.unres
.expires
= jiffies
+ 10*HZ
;
817 * A cache entry has gone into a resolved state from queued
820 static void ipmr_cache_resolve(struct net
*net
, struct mr_table
*mrt
,
821 struct mfc_cache
*uc
, struct mfc_cache
*c
)
827 * Play the pending entries through our router
830 while ((skb
= __skb_dequeue(&uc
->mfc_un
.unres
.unresolved
))) {
831 if (ip_hdr(skb
)->version
== 0) {
832 struct nlmsghdr
*nlh
= (struct nlmsghdr
*)skb_pull(skb
, sizeof(struct iphdr
));
834 if (__ipmr_fill_mroute(mrt
, skb
, c
, NLMSG_DATA(nlh
)) > 0) {
835 nlh
->nlmsg_len
= (skb_tail_pointer(skb
) -
838 nlh
->nlmsg_type
= NLMSG_ERROR
;
839 nlh
->nlmsg_len
= NLMSG_LENGTH(sizeof(struct nlmsgerr
));
840 skb_trim(skb
, nlh
->nlmsg_len
);
842 e
->error
= -EMSGSIZE
;
843 memset(&e
->msg
, 0, sizeof(e
->msg
));
846 rtnl_unicast(skb
, net
, NETLINK_CB(skb
).pid
);
848 ip_mr_forward(net
, mrt
, skb
, c
, 0);
853 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
854 * expects the following bizarre scheme.
856 * Called under mrt_lock.
859 static int ipmr_cache_report(struct mr_table
*mrt
,
860 struct sk_buff
*pkt
, vifi_t vifi
, int assert)
863 const int ihl
= ip_hdrlen(pkt
);
864 struct igmphdr
*igmp
;
868 #ifdef CONFIG_IP_PIMSM
869 if (assert == IGMPMSG_WHOLEPKT
)
870 skb
= skb_realloc_headroom(pkt
, sizeof(struct iphdr
));
873 skb
= alloc_skb(128, GFP_ATOMIC
);
878 #ifdef CONFIG_IP_PIMSM
879 if (assert == IGMPMSG_WHOLEPKT
) {
880 /* Ugly, but we have no choice with this interface.
881 Duplicate old header, fix ihl, length etc.
882 And all this only to mangle msg->im_msgtype and
883 to set msg->im_mbz to "mbz" :-)
885 skb_push(skb
, sizeof(struct iphdr
));
886 skb_reset_network_header(skb
);
887 skb_reset_transport_header(skb
);
888 msg
= (struct igmpmsg
*)skb_network_header(skb
);
889 memcpy(msg
, skb_network_header(pkt
), sizeof(struct iphdr
));
890 msg
->im_msgtype
= IGMPMSG_WHOLEPKT
;
892 msg
->im_vif
= mrt
->mroute_reg_vif_num
;
893 ip_hdr(skb
)->ihl
= sizeof(struct iphdr
) >> 2;
894 ip_hdr(skb
)->tot_len
= htons(ntohs(ip_hdr(pkt
)->tot_len
) +
895 sizeof(struct iphdr
));
904 skb
->network_header
= skb
->tail
;
906 skb_copy_to_linear_data(skb
, pkt
->data
, ihl
);
907 ip_hdr(skb
)->protocol
= 0; /* Flag to the kernel this is a route add */
908 msg
= (struct igmpmsg
*)skb_network_header(skb
);
910 skb_dst_set(skb
, dst_clone(skb_dst(pkt
)));
916 igmp
=(struct igmphdr
*)skb_put(skb
, sizeof(struct igmphdr
));
918 msg
->im_msgtype
= assert;
920 ip_hdr(skb
)->tot_len
= htons(skb
->len
); /* Fix the length */
921 skb
->transport_header
= skb
->network_header
;
924 if (mrt
->mroute_sk
== NULL
) {
932 ret
= sock_queue_rcv_skb(mrt
->mroute_sk
, skb
);
935 printk(KERN_WARNING
"mroute: pending queue full, dropping entries.\n");
943 * Queue a packet for resolution. It gets locked cache entry!
947 ipmr_cache_unresolved(struct mr_table
*mrt
, vifi_t vifi
, struct sk_buff
*skb
)
952 const struct iphdr
*iph
= ip_hdr(skb
);
954 spin_lock_bh(&mfc_unres_lock
);
955 list_for_each_entry(c
, &mrt
->mfc_unres_queue
, list
) {
956 if (c
->mfc_mcastgrp
== iph
->daddr
&&
957 c
->mfc_origin
== iph
->saddr
) {
965 * Create a new entry if allowable
968 if (atomic_read(&mrt
->cache_resolve_queue_len
) >= 10 ||
969 (c
= ipmr_cache_alloc_unres()) == NULL
) {
970 spin_unlock_bh(&mfc_unres_lock
);
977 * Fill in the new cache entry
980 c
->mfc_origin
= iph
->saddr
;
981 c
->mfc_mcastgrp
= iph
->daddr
;
984 * Reflect first query at mrouted.
986 err
= ipmr_cache_report(mrt
, skb
, vifi
, IGMPMSG_NOCACHE
);
988 /* If the report failed throw the cache entry
991 spin_unlock_bh(&mfc_unres_lock
);
998 atomic_inc(&mrt
->cache_resolve_queue_len
);
999 list_add(&c
->list
, &mrt
->mfc_unres_queue
);
1001 if (atomic_read(&mrt
->cache_resolve_queue_len
) == 1)
1002 mod_timer(&mrt
->ipmr_expire_timer
, c
->mfc_un
.unres
.expires
);
1006 * See if we can append the packet
1008 if (c
->mfc_un
.unres
.unresolved
.qlen
>3) {
1012 skb_queue_tail(&c
->mfc_un
.unres
.unresolved
, skb
);
1016 spin_unlock_bh(&mfc_unres_lock
);
1021 * MFC cache manipulation by user space mroute daemon
1024 static int ipmr_mfc_delete(struct mr_table
*mrt
, struct mfcctl
*mfc
)
1027 struct mfc_cache
*c
, *next
;
1029 line
= MFC_HASH(mfc
->mfcc_mcastgrp
.s_addr
, mfc
->mfcc_origin
.s_addr
);
1031 list_for_each_entry_safe(c
, next
, &mrt
->mfc_cache_array
[line
], list
) {
1032 if (c
->mfc_origin
== mfc
->mfcc_origin
.s_addr
&&
1033 c
->mfc_mcastgrp
== mfc
->mfcc_mcastgrp
.s_addr
) {
1034 write_lock_bh(&mrt_lock
);
1036 write_unlock_bh(&mrt_lock
);
1045 static int ipmr_mfc_add(struct net
*net
, struct mr_table
*mrt
,
1046 struct mfcctl
*mfc
, int mrtsock
)
1050 struct mfc_cache
*uc
, *c
;
1052 if (mfc
->mfcc_parent
>= MAXVIFS
)
1055 line
= MFC_HASH(mfc
->mfcc_mcastgrp
.s_addr
, mfc
->mfcc_origin
.s_addr
);
1057 list_for_each_entry(c
, &mrt
->mfc_cache_array
[line
], list
) {
1058 if (c
->mfc_origin
== mfc
->mfcc_origin
.s_addr
&&
1059 c
->mfc_mcastgrp
== mfc
->mfcc_mcastgrp
.s_addr
) {
1066 write_lock_bh(&mrt_lock
);
1067 c
->mfc_parent
= mfc
->mfcc_parent
;
1068 ipmr_update_thresholds(mrt
, c
, mfc
->mfcc_ttls
);
1070 c
->mfc_flags
|= MFC_STATIC
;
1071 write_unlock_bh(&mrt_lock
);
1075 if (!ipv4_is_multicast(mfc
->mfcc_mcastgrp
.s_addr
))
1078 c
= ipmr_cache_alloc();
1082 c
->mfc_origin
= mfc
->mfcc_origin
.s_addr
;
1083 c
->mfc_mcastgrp
= mfc
->mfcc_mcastgrp
.s_addr
;
1084 c
->mfc_parent
= mfc
->mfcc_parent
;
1085 ipmr_update_thresholds(mrt
, c
, mfc
->mfcc_ttls
);
1087 c
->mfc_flags
|= MFC_STATIC
;
1089 write_lock_bh(&mrt_lock
);
1090 list_add(&c
->list
, &mrt
->mfc_cache_array
[line
]);
1091 write_unlock_bh(&mrt_lock
);
1094 * Check to see if we resolved a queued list. If so we
1095 * need to send on the frames and tidy up.
1098 spin_lock_bh(&mfc_unres_lock
);
1099 list_for_each_entry(uc
, &mrt
->mfc_unres_queue
, list
) {
1100 if (uc
->mfc_origin
== c
->mfc_origin
&&
1101 uc
->mfc_mcastgrp
== c
->mfc_mcastgrp
) {
1102 list_del(&uc
->list
);
1103 atomic_dec(&mrt
->cache_resolve_queue_len
);
1108 if (list_empty(&mrt
->mfc_unres_queue
))
1109 del_timer(&mrt
->ipmr_expire_timer
);
1110 spin_unlock_bh(&mfc_unres_lock
);
1113 ipmr_cache_resolve(net
, mrt
, uc
, c
);
1114 ipmr_cache_free(uc
);
1120 * Close the multicast socket, and clear the vif tables etc
1123 static void mroute_clean_tables(struct mr_table
*mrt
)
1127 struct mfc_cache
*c
, *next
;
1130 * Shut down all active vif entries
1132 for (i
= 0; i
< mrt
->maxvif
; i
++) {
1133 if (!(mrt
->vif_table
[i
].flags
&VIFF_STATIC
))
1134 vif_delete(mrt
, i
, 0, &list
);
1136 unregister_netdevice_many(&list
);
1141 for (i
= 0; i
< MFC_LINES
; i
++) {
1142 list_for_each_entry_safe(c
, next
, &mrt
->mfc_cache_array
[i
], list
) {
1143 if (c
->mfc_flags
&MFC_STATIC
)
1145 write_lock_bh(&mrt_lock
);
1147 write_unlock_bh(&mrt_lock
);
1153 if (atomic_read(&mrt
->cache_resolve_queue_len
) != 0) {
1154 spin_lock_bh(&mfc_unres_lock
);
1155 list_for_each_entry_safe(c
, next
, &mrt
->mfc_unres_queue
, list
) {
1157 ipmr_destroy_unres(mrt
, c
);
1159 spin_unlock_bh(&mfc_unres_lock
);
1163 static void mrtsock_destruct(struct sock
*sk
)
1165 struct net
*net
= sock_net(sk
);
1166 struct mr_table
*mrt
;
1169 ipmr_for_each_table(mrt
, net
) {
1170 if (sk
== mrt
->mroute_sk
) {
1171 IPV4_DEVCONF_ALL(net
, MC_FORWARDING
)--;
1173 write_lock_bh(&mrt_lock
);
1174 mrt
->mroute_sk
= NULL
;
1175 write_unlock_bh(&mrt_lock
);
1177 mroute_clean_tables(mrt
);
1184 * Socket options and virtual interface manipulation. The whole
1185 * virtual interface system is a complete heap, but unfortunately
1186 * that's how BSD mrouted happens to think. Maybe one day with a proper
1187 * MOSPF/PIM router set up we can clean this up.
1190 int ip_mroute_setsockopt(struct sock
*sk
, int optname
, char __user
*optval
, unsigned int optlen
)
1195 struct net
*net
= sock_net(sk
);
1196 struct mr_table
*mrt
;
1198 mrt
= ipmr_get_table(net
, raw_sk(sk
)->ipmr_table
? : RT_TABLE_DEFAULT
);
1202 if (optname
!= MRT_INIT
) {
1203 if (sk
!= mrt
->mroute_sk
&& !capable(CAP_NET_ADMIN
))
1209 if (sk
->sk_type
!= SOCK_RAW
||
1210 inet_sk(sk
)->inet_num
!= IPPROTO_IGMP
)
1212 if (optlen
!= sizeof(int))
1213 return -ENOPROTOOPT
;
1216 if (mrt
->mroute_sk
) {
1221 ret
= ip_ra_control(sk
, 1, mrtsock_destruct
);
1223 write_lock_bh(&mrt_lock
);
1224 mrt
->mroute_sk
= sk
;
1225 write_unlock_bh(&mrt_lock
);
1227 IPV4_DEVCONF_ALL(net
, MC_FORWARDING
)++;
1232 if (sk
!= mrt
->mroute_sk
)
1234 return ip_ra_control(sk
, 0, NULL
);
1237 if (optlen
!= sizeof(vif
))
1239 if (copy_from_user(&vif
, optval
, sizeof(vif
)))
1241 if (vif
.vifc_vifi
>= MAXVIFS
)
1244 if (optname
== MRT_ADD_VIF
) {
1245 ret
= vif_add(net
, mrt
, &vif
, sk
== mrt
->mroute_sk
);
1247 ret
= vif_delete(mrt
, vif
.vifc_vifi
, 0, NULL
);
1253 * Manipulate the forwarding caches. These live
1254 * in a sort of kernel/user symbiosis.
1258 if (optlen
!= sizeof(mfc
))
1260 if (copy_from_user(&mfc
, optval
, sizeof(mfc
)))
1263 if (optname
== MRT_DEL_MFC
)
1264 ret
= ipmr_mfc_delete(mrt
, &mfc
);
1266 ret
= ipmr_mfc_add(net
, mrt
, &mfc
, sk
== mrt
->mroute_sk
);
1270 * Control PIM assert.
1275 if (get_user(v
,(int __user
*)optval
))
1277 mrt
->mroute_do_assert
= (v
) ? 1 : 0;
1280 #ifdef CONFIG_IP_PIMSM
1285 if (get_user(v
,(int __user
*)optval
))
1291 if (v
!= mrt
->mroute_do_pim
) {
1292 mrt
->mroute_do_pim
= v
;
1293 mrt
->mroute_do_assert
= v
;
1299 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
1304 if (optlen
!= sizeof(u32
))
1306 if (get_user(v
, (u32 __user
*)optval
))
1308 if (sk
== mrt
->mroute_sk
)
1313 if (!ipmr_new_table(net
, v
))
1315 raw_sk(sk
)->ipmr_table
= v
;
1321 * Spurious command, or MRT_VERSION which you cannot
1325 return -ENOPROTOOPT
;
1330 * Getsock opt support for the multicast routing system.
1333 int ip_mroute_getsockopt(struct sock
*sk
, int optname
, char __user
*optval
, int __user
*optlen
)
1337 struct net
*net
= sock_net(sk
);
1338 struct mr_table
*mrt
;
1340 mrt
= ipmr_get_table(net
, raw_sk(sk
)->ipmr_table
? : RT_TABLE_DEFAULT
);
1344 if (optname
!= MRT_VERSION
&&
1345 #ifdef CONFIG_IP_PIMSM
1348 optname
!=MRT_ASSERT
)
1349 return -ENOPROTOOPT
;
1351 if (get_user(olr
, optlen
))
1354 olr
= min_t(unsigned int, olr
, sizeof(int));
1358 if (put_user(olr
, optlen
))
1360 if (optname
== MRT_VERSION
)
1362 #ifdef CONFIG_IP_PIMSM
1363 else if (optname
== MRT_PIM
)
1364 val
= mrt
->mroute_do_pim
;
1367 val
= mrt
->mroute_do_assert
;
1368 if (copy_to_user(optval
, &val
, olr
))
1374 * The IP multicast ioctl support routines.
1377 int ipmr_ioctl(struct sock
*sk
, int cmd
, void __user
*arg
)
1379 struct sioc_sg_req sr
;
1380 struct sioc_vif_req vr
;
1381 struct vif_device
*vif
;
1382 struct mfc_cache
*c
;
1383 struct net
*net
= sock_net(sk
);
1384 struct mr_table
*mrt
;
1386 mrt
= ipmr_get_table(net
, raw_sk(sk
)->ipmr_table
? : RT_TABLE_DEFAULT
);
1392 if (copy_from_user(&vr
, arg
, sizeof(vr
)))
1394 if (vr
.vifi
>= mrt
->maxvif
)
1396 read_lock(&mrt_lock
);
1397 vif
= &mrt
->vif_table
[vr
.vifi
];
1398 if (VIF_EXISTS(mrt
, vr
.vifi
)) {
1399 vr
.icount
= vif
->pkt_in
;
1400 vr
.ocount
= vif
->pkt_out
;
1401 vr
.ibytes
= vif
->bytes_in
;
1402 vr
.obytes
= vif
->bytes_out
;
1403 read_unlock(&mrt_lock
);
1405 if (copy_to_user(arg
, &vr
, sizeof(vr
)))
1409 read_unlock(&mrt_lock
);
1410 return -EADDRNOTAVAIL
;
1412 if (copy_from_user(&sr
, arg
, sizeof(sr
)))
1415 read_lock(&mrt_lock
);
1416 c
= ipmr_cache_find(mrt
, sr
.src
.s_addr
, sr
.grp
.s_addr
);
1418 sr
.pktcnt
= c
->mfc_un
.res
.pkt
;
1419 sr
.bytecnt
= c
->mfc_un
.res
.bytes
;
1420 sr
.wrong_if
= c
->mfc_un
.res
.wrong_if
;
1421 read_unlock(&mrt_lock
);
1423 if (copy_to_user(arg
, &sr
, sizeof(sr
)))
1427 read_unlock(&mrt_lock
);
1428 return -EADDRNOTAVAIL
;
1430 return -ENOIOCTLCMD
;
1435 static int ipmr_device_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
1437 struct net_device
*dev
= ptr
;
1438 struct net
*net
= dev_net(dev
);
1439 struct mr_table
*mrt
;
1440 struct vif_device
*v
;
1444 if (event
!= NETDEV_UNREGISTER
)
1447 ipmr_for_each_table(mrt
, net
) {
1448 v
= &mrt
->vif_table
[0];
1449 for (ct
= 0; ct
< mrt
->maxvif
; ct
++, v
++) {
1451 vif_delete(mrt
, ct
, 1, &list
);
1454 unregister_netdevice_many(&list
);
1459 static struct notifier_block ip_mr_notifier
= {
1460 .notifier_call
= ipmr_device_event
,
1464 * Encapsulate a packet by attaching a valid IPIP header to it.
1465 * This avoids tunnel drivers and other mess and gives us the speed so
1466 * important for multicast video.
1469 static void ip_encap(struct sk_buff
*skb
, __be32 saddr
, __be32 daddr
)
1472 struct iphdr
*old_iph
= ip_hdr(skb
);
1474 skb_push(skb
, sizeof(struct iphdr
));
1475 skb
->transport_header
= skb
->network_header
;
1476 skb_reset_network_header(skb
);
1480 iph
->tos
= old_iph
->tos
;
1481 iph
->ttl
= old_iph
->ttl
;
1485 iph
->protocol
= IPPROTO_IPIP
;
1487 iph
->tot_len
= htons(skb
->len
);
1488 ip_select_ident(iph
, skb_dst(skb
), NULL
);
1491 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
1495 static inline int ipmr_forward_finish(struct sk_buff
*skb
)
1497 struct ip_options
* opt
= &(IPCB(skb
)->opt
);
1499 IP_INC_STATS_BH(dev_net(skb_dst(skb
)->dev
), IPSTATS_MIB_OUTFORWDATAGRAMS
);
1501 if (unlikely(opt
->optlen
))
1502 ip_forward_options(skb
);
1504 return dst_output(skb
);
1508 * Processing handlers for ipmr_forward
1511 static void ipmr_queue_xmit(struct net
*net
, struct mr_table
*mrt
,
1512 struct sk_buff
*skb
, struct mfc_cache
*c
, int vifi
)
1514 const struct iphdr
*iph
= ip_hdr(skb
);
1515 struct vif_device
*vif
= &mrt
->vif_table
[vifi
];
1516 struct net_device
*dev
;
1520 if (vif
->dev
== NULL
)
1523 #ifdef CONFIG_IP_PIMSM
1524 if (vif
->flags
& VIFF_REGISTER
) {
1526 vif
->bytes_out
+= skb
->len
;
1527 vif
->dev
->stats
.tx_bytes
+= skb
->len
;
1528 vif
->dev
->stats
.tx_packets
++;
1529 ipmr_cache_report(mrt
, skb
, vifi
, IGMPMSG_WHOLEPKT
);
1534 if (vif
->flags
&VIFF_TUNNEL
) {
1535 struct flowi fl
= { .oif
= vif
->link
,
1537 { .daddr
= vif
->remote
,
1538 .saddr
= vif
->local
,
1539 .tos
= RT_TOS(iph
->tos
) } },
1540 .proto
= IPPROTO_IPIP
};
1541 if (ip_route_output_key(net
, &rt
, &fl
))
1543 encap
= sizeof(struct iphdr
);
1545 struct flowi fl
= { .oif
= vif
->link
,
1547 { .daddr
= iph
->daddr
,
1548 .tos
= RT_TOS(iph
->tos
) } },
1549 .proto
= IPPROTO_IPIP
};
1550 if (ip_route_output_key(net
, &rt
, &fl
))
1554 dev
= rt
->u
.dst
.dev
;
1556 if (skb
->len
+encap
> dst_mtu(&rt
->u
.dst
) && (ntohs(iph
->frag_off
) & IP_DF
)) {
1557 /* Do not fragment multicasts. Alas, IPv4 does not
1558 allow to send ICMP, so that packets will disappear
1562 IP_INC_STATS_BH(dev_net(dev
), IPSTATS_MIB_FRAGFAILS
);
1567 encap
+= LL_RESERVED_SPACE(dev
) + rt
->u
.dst
.header_len
;
1569 if (skb_cow(skb
, encap
)) {
1575 vif
->bytes_out
+= skb
->len
;
1578 skb_dst_set(skb
, &rt
->u
.dst
);
1579 ip_decrease_ttl(ip_hdr(skb
));
1581 /* FIXME: forward and output firewalls used to be called here.
1582 * What do we do with netfilter? -- RR */
1583 if (vif
->flags
& VIFF_TUNNEL
) {
1584 ip_encap(skb
, vif
->local
, vif
->remote
);
1585 /* FIXME: extra output firewall step used to be here. --RR */
1586 vif
->dev
->stats
.tx_packets
++;
1587 vif
->dev
->stats
.tx_bytes
+= skb
->len
;
1590 IPCB(skb
)->flags
|= IPSKB_FORWARDED
;
1593 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1594 * not only before forwarding, but after forwarding on all output
1595 * interfaces. It is clear, if mrouter runs a multicasting
1596 * program, it should receive packets not depending to what interface
1597 * program is joined.
1598 * If we will not make it, the program will have to join on all
1599 * interfaces. On the other hand, multihoming host (or router, but
1600 * not mrouter) cannot join to more than one interface - it will
1601 * result in receiving multiple packets.
1603 NF_HOOK(NFPROTO_IPV4
, NF_INET_FORWARD
, skb
, skb
->dev
, dev
,
1604 ipmr_forward_finish
);
1611 static int ipmr_find_vif(struct mr_table
*mrt
, struct net_device
*dev
)
1615 for (ct
= mrt
->maxvif
-1; ct
>= 0; ct
--) {
1616 if (mrt
->vif_table
[ct
].dev
== dev
)
1622 /* "local" means that we should preserve one skb (for local delivery) */
1624 static int ip_mr_forward(struct net
*net
, struct mr_table
*mrt
,
1625 struct sk_buff
*skb
, struct mfc_cache
*cache
,
1631 vif
= cache
->mfc_parent
;
1632 cache
->mfc_un
.res
.pkt
++;
1633 cache
->mfc_un
.res
.bytes
+= skb
->len
;
1636 * Wrong interface: drop packet and (maybe) send PIM assert.
1638 if (mrt
->vif_table
[vif
].dev
!= skb
->dev
) {
1641 if (skb_rtable(skb
)->fl
.iif
== 0) {
1642 /* It is our own packet, looped back.
1643 Very complicated situation...
1645 The best workaround until routing daemons will be
1646 fixed is not to redistribute packet, if it was
1647 send through wrong interface. It means, that
1648 multicast applications WILL NOT work for
1649 (S,G), which have default multicast route pointing
1650 to wrong oif. In any case, it is not a good
1651 idea to use multicasting applications on router.
1656 cache
->mfc_un
.res
.wrong_if
++;
1657 true_vifi
= ipmr_find_vif(mrt
, skb
->dev
);
1659 if (true_vifi
>= 0 && mrt
->mroute_do_assert
&&
1660 /* pimsm uses asserts, when switching from RPT to SPT,
1661 so that we cannot check that packet arrived on an oif.
1662 It is bad, but otherwise we would need to move pretty
1663 large chunk of pimd to kernel. Ough... --ANK
1665 (mrt
->mroute_do_pim
||
1666 cache
->mfc_un
.res
.ttls
[true_vifi
] < 255) &&
1668 cache
->mfc_un
.res
.last_assert
+ MFC_ASSERT_THRESH
)) {
1669 cache
->mfc_un
.res
.last_assert
= jiffies
;
1670 ipmr_cache_report(mrt
, skb
, true_vifi
, IGMPMSG_WRONGVIF
);
1675 mrt
->vif_table
[vif
].pkt_in
++;
1676 mrt
->vif_table
[vif
].bytes_in
+= skb
->len
;
1681 for (ct
= cache
->mfc_un
.res
.maxvif
-1; ct
>= cache
->mfc_un
.res
.minvif
; ct
--) {
1682 if (ip_hdr(skb
)->ttl
> cache
->mfc_un
.res
.ttls
[ct
]) {
1684 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
1686 ipmr_queue_xmit(net
, mrt
, skb2
, cache
,
1694 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
1696 ipmr_queue_xmit(net
, mrt
, skb2
, cache
, psend
);
1698 ipmr_queue_xmit(net
, mrt
, skb
, cache
, psend
);
1711 * Multicast packets for forwarding arrive here
1714 int ip_mr_input(struct sk_buff
*skb
)
1716 struct mfc_cache
*cache
;
1717 struct net
*net
= dev_net(skb
->dev
);
1718 int local
= skb_rtable(skb
)->rt_flags
& RTCF_LOCAL
;
1719 struct mr_table
*mrt
;
1722 /* Packet is looped back after forward, it should not be
1723 forwarded second time, but still can be delivered locally.
1725 if (IPCB(skb
)->flags
&IPSKB_FORWARDED
)
1728 err
= ipmr_fib_lookup(net
, &skb_rtable(skb
)->fl
, &mrt
);
1733 if (IPCB(skb
)->opt
.router_alert
) {
1734 if (ip_call_ra_chain(skb
))
1736 } else if (ip_hdr(skb
)->protocol
== IPPROTO_IGMP
){
1737 /* IGMPv1 (and broken IGMPv2 implementations sort of
1738 Cisco IOS <= 11.2(8)) do not put router alert
1739 option to IGMP packets destined to routable
1740 groups. It is very bad, because it means
1741 that we can forward NO IGMP messages.
1743 read_lock(&mrt_lock
);
1744 if (mrt
->mroute_sk
) {
1746 raw_rcv(mrt
->mroute_sk
, skb
);
1747 read_unlock(&mrt_lock
);
1750 read_unlock(&mrt_lock
);
1754 read_lock(&mrt_lock
);
1755 cache
= ipmr_cache_find(mrt
, ip_hdr(skb
)->saddr
, ip_hdr(skb
)->daddr
);
1758 * No usable cache entry
1760 if (cache
== NULL
) {
1764 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
1765 ip_local_deliver(skb
);
1767 read_unlock(&mrt_lock
);
1773 vif
= ipmr_find_vif(mrt
, skb
->dev
);
1775 int err2
= ipmr_cache_unresolved(mrt
, vif
, skb
);
1776 read_unlock(&mrt_lock
);
1780 read_unlock(&mrt_lock
);
1785 ip_mr_forward(net
, mrt
, skb
, cache
, local
);
1787 read_unlock(&mrt_lock
);
1790 return ip_local_deliver(skb
);
1796 return ip_local_deliver(skb
);
1801 #ifdef CONFIG_IP_PIMSM
1802 static int __pim_rcv(struct mr_table
*mrt
, struct sk_buff
*skb
,
1803 unsigned int pimlen
)
1805 struct net_device
*reg_dev
= NULL
;
1806 struct iphdr
*encap
;
1808 encap
= (struct iphdr
*)(skb_transport_header(skb
) + pimlen
);
1811 a. packet is really destinted to a multicast group
1812 b. packet is not a NULL-REGISTER
1813 c. packet is not truncated
1815 if (!ipv4_is_multicast(encap
->daddr
) ||
1816 encap
->tot_len
== 0 ||
1817 ntohs(encap
->tot_len
) + pimlen
> skb
->len
)
1820 read_lock(&mrt_lock
);
1821 if (mrt
->mroute_reg_vif_num
>= 0)
1822 reg_dev
= mrt
->vif_table
[mrt
->mroute_reg_vif_num
].dev
;
1825 read_unlock(&mrt_lock
);
1827 if (reg_dev
== NULL
)
1830 skb
->mac_header
= skb
->network_header
;
1831 skb_pull(skb
, (u8
*)encap
- skb
->data
);
1832 skb_reset_network_header(skb
);
1833 skb
->protocol
= htons(ETH_P_IP
);
1835 skb
->pkt_type
= PACKET_HOST
;
1837 skb_tunnel_rx(skb
, reg_dev
);
1846 #ifdef CONFIG_IP_PIMSM_V1
1848 * Handle IGMP messages of PIMv1
1851 int pim_rcv_v1(struct sk_buff
* skb
)
1853 struct igmphdr
*pim
;
1854 struct net
*net
= dev_net(skb
->dev
);
1855 struct mr_table
*mrt
;
1857 if (!pskb_may_pull(skb
, sizeof(*pim
) + sizeof(struct iphdr
)))
1860 pim
= igmp_hdr(skb
);
1862 if (ipmr_fib_lookup(net
, &skb_rtable(skb
)->fl
, &mrt
) < 0)
1865 if (!mrt
->mroute_do_pim
||
1866 pim
->group
!= PIM_V1_VERSION
|| pim
->code
!= PIM_V1_REGISTER
)
1869 if (__pim_rcv(mrt
, skb
, sizeof(*pim
))) {
1877 #ifdef CONFIG_IP_PIMSM_V2
1878 static int pim_rcv(struct sk_buff
* skb
)
1880 struct pimreghdr
*pim
;
1881 struct net
*net
= dev_net(skb
->dev
);
1882 struct mr_table
*mrt
;
1884 if (!pskb_may_pull(skb
, sizeof(*pim
) + sizeof(struct iphdr
)))
1887 pim
= (struct pimreghdr
*)skb_transport_header(skb
);
1888 if (pim
->type
!= ((PIM_VERSION
<<4)|(PIM_REGISTER
)) ||
1889 (pim
->flags
&PIM_NULL_REGISTER
) ||
1890 (ip_compute_csum((void *)pim
, sizeof(*pim
)) != 0 &&
1891 csum_fold(skb_checksum(skb
, 0, skb
->len
, 0))))
1894 if (ipmr_fib_lookup(net
, &skb_rtable(skb
)->fl
, &mrt
) < 0)
1897 if (__pim_rcv(mrt
, skb
, sizeof(*pim
))) {
1905 static int __ipmr_fill_mroute(struct mr_table
*mrt
, struct sk_buff
*skb
,
1906 struct mfc_cache
*c
, struct rtmsg
*rtm
)
1909 struct rtnexthop
*nhp
;
1910 u8
*b
= skb_tail_pointer(skb
);
1911 struct rtattr
*mp_head
;
1913 /* If cache is unresolved, don't try to parse IIF and OIF */
1914 if (c
->mfc_parent
>= MAXVIFS
)
1917 if (VIF_EXISTS(mrt
, c
->mfc_parent
))
1918 RTA_PUT(skb
, RTA_IIF
, 4, &mrt
->vif_table
[c
->mfc_parent
].dev
->ifindex
);
1920 mp_head
= (struct rtattr
*)skb_put(skb
, RTA_LENGTH(0));
1922 for (ct
= c
->mfc_un
.res
.minvif
; ct
< c
->mfc_un
.res
.maxvif
; ct
++) {
1923 if (VIF_EXISTS(mrt
, ct
) && c
->mfc_un
.res
.ttls
[ct
] < 255) {
1924 if (skb_tailroom(skb
) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp
)) + 4))
1925 goto rtattr_failure
;
1926 nhp
= (struct rtnexthop
*)skb_put(skb
, RTA_ALIGN(sizeof(*nhp
)));
1927 nhp
->rtnh_flags
= 0;
1928 nhp
->rtnh_hops
= c
->mfc_un
.res
.ttls
[ct
];
1929 nhp
->rtnh_ifindex
= mrt
->vif_table
[ct
].dev
->ifindex
;
1930 nhp
->rtnh_len
= sizeof(*nhp
);
1933 mp_head
->rta_type
= RTA_MULTIPATH
;
1934 mp_head
->rta_len
= skb_tail_pointer(skb
) - (u8
*)mp_head
;
1935 rtm
->rtm_type
= RTN_MULTICAST
;
1943 int ipmr_get_route(struct net
*net
,
1944 struct sk_buff
*skb
, struct rtmsg
*rtm
, int nowait
)
1947 struct mr_table
*mrt
;
1948 struct mfc_cache
*cache
;
1949 struct rtable
*rt
= skb_rtable(skb
);
1951 mrt
= ipmr_get_table(net
, RT_TABLE_DEFAULT
);
1955 read_lock(&mrt_lock
);
1956 cache
= ipmr_cache_find(mrt
, rt
->rt_src
, rt
->rt_dst
);
1958 if (cache
== NULL
) {
1959 struct sk_buff
*skb2
;
1961 struct net_device
*dev
;
1965 read_unlock(&mrt_lock
);
1970 if (dev
== NULL
|| (vif
= ipmr_find_vif(mrt
, dev
)) < 0) {
1971 read_unlock(&mrt_lock
);
1974 skb2
= skb_clone(skb
, GFP_ATOMIC
);
1976 read_unlock(&mrt_lock
);
1980 skb_push(skb2
, sizeof(struct iphdr
));
1981 skb_reset_network_header(skb2
);
1983 iph
->ihl
= sizeof(struct iphdr
) >> 2;
1984 iph
->saddr
= rt
->rt_src
;
1985 iph
->daddr
= rt
->rt_dst
;
1987 err
= ipmr_cache_unresolved(mrt
, vif
, skb2
);
1988 read_unlock(&mrt_lock
);
1992 if (!nowait
&& (rtm
->rtm_flags
&RTM_F_NOTIFY
))
1993 cache
->mfc_flags
|= MFC_NOTIFY
;
1994 err
= __ipmr_fill_mroute(mrt
, skb
, cache
, rtm
);
1995 read_unlock(&mrt_lock
);
1999 static int ipmr_fill_mroute(struct mr_table
*mrt
, struct sk_buff
*skb
,
2000 u32 pid
, u32 seq
, struct mfc_cache
*c
)
2002 struct nlmsghdr
*nlh
;
2005 nlh
= nlmsg_put(skb
, pid
, seq
, RTM_NEWROUTE
, sizeof(*rtm
), NLM_F_MULTI
);
2009 rtm
= nlmsg_data(nlh
);
2010 rtm
->rtm_family
= RTNL_FAMILY_IPMR
;
2011 rtm
->rtm_dst_len
= 32;
2012 rtm
->rtm_src_len
= 32;
2014 rtm
->rtm_table
= mrt
->id
;
2015 NLA_PUT_U32(skb
, RTA_TABLE
, mrt
->id
);
2016 rtm
->rtm_type
= RTN_MULTICAST
;
2017 rtm
->rtm_scope
= RT_SCOPE_UNIVERSE
;
2018 rtm
->rtm_protocol
= RTPROT_UNSPEC
;
2021 NLA_PUT_BE32(skb
, RTA_SRC
, c
->mfc_origin
);
2022 NLA_PUT_BE32(skb
, RTA_DST
, c
->mfc_mcastgrp
);
2024 if (__ipmr_fill_mroute(mrt
, skb
, c
, rtm
) < 0)
2025 goto nla_put_failure
;
2027 return nlmsg_end(skb
, nlh
);
2030 nlmsg_cancel(skb
, nlh
);
2034 static int ipmr_rtm_dumproute(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2036 struct net
*net
= sock_net(skb
->sk
);
2037 struct mr_table
*mrt
;
2038 struct mfc_cache
*mfc
;
2039 unsigned int t
= 0, s_t
;
2040 unsigned int h
= 0, s_h
;
2041 unsigned int e
= 0, s_e
;
2047 read_lock(&mrt_lock
);
2048 ipmr_for_each_table(mrt
, net
) {
2053 for (h
= s_h
; h
< MFC_LINES
; h
++) {
2054 list_for_each_entry(mfc
, &mrt
->mfc_cache_array
[h
], list
) {
2057 if (ipmr_fill_mroute(mrt
, skb
,
2058 NETLINK_CB(cb
->skb
).pid
,
2072 read_unlock(&mrt_lock
);
2081 #ifdef CONFIG_PROC_FS
2083 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
2085 struct ipmr_vif_iter
{
2086 struct seq_net_private p
;
2087 struct mr_table
*mrt
;
2091 static struct vif_device
*ipmr_vif_seq_idx(struct net
*net
,
2092 struct ipmr_vif_iter
*iter
,
2095 struct mr_table
*mrt
= iter
->mrt
;
2097 for (iter
->ct
= 0; iter
->ct
< mrt
->maxvif
; ++iter
->ct
) {
2098 if (!VIF_EXISTS(mrt
, iter
->ct
))
2101 return &mrt
->vif_table
[iter
->ct
];
2106 static void *ipmr_vif_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2107 __acquires(mrt_lock
)
2109 struct ipmr_vif_iter
*iter
= seq
->private;
2110 struct net
*net
= seq_file_net(seq
);
2111 struct mr_table
*mrt
;
2113 mrt
= ipmr_get_table(net
, RT_TABLE_DEFAULT
);
2115 return ERR_PTR(-ENOENT
);
2119 read_lock(&mrt_lock
);
2120 return *pos
? ipmr_vif_seq_idx(net
, seq
->private, *pos
- 1)
2124 static void *ipmr_vif_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2126 struct ipmr_vif_iter
*iter
= seq
->private;
2127 struct net
*net
= seq_file_net(seq
);
2128 struct mr_table
*mrt
= iter
->mrt
;
2131 if (v
== SEQ_START_TOKEN
)
2132 return ipmr_vif_seq_idx(net
, iter
, 0);
2134 while (++iter
->ct
< mrt
->maxvif
) {
2135 if (!VIF_EXISTS(mrt
, iter
->ct
))
2137 return &mrt
->vif_table
[iter
->ct
];
2142 static void ipmr_vif_seq_stop(struct seq_file
*seq
, void *v
)
2143 __releases(mrt_lock
)
2145 read_unlock(&mrt_lock
);
2148 static int ipmr_vif_seq_show(struct seq_file
*seq
, void *v
)
2150 struct ipmr_vif_iter
*iter
= seq
->private;
2151 struct mr_table
*mrt
= iter
->mrt
;
2153 if (v
== SEQ_START_TOKEN
) {
2155 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
2157 const struct vif_device
*vif
= v
;
2158 const char *name
= vif
->dev
? vif
->dev
->name
: "none";
2161 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
2162 vif
- mrt
->vif_table
,
2163 name
, vif
->bytes_in
, vif
->pkt_in
,
2164 vif
->bytes_out
, vif
->pkt_out
,
2165 vif
->flags
, vif
->local
, vif
->remote
);
2170 static const struct seq_operations ipmr_vif_seq_ops
= {
2171 .start
= ipmr_vif_seq_start
,
2172 .next
= ipmr_vif_seq_next
,
2173 .stop
= ipmr_vif_seq_stop
,
2174 .show
= ipmr_vif_seq_show
,
2177 static int ipmr_vif_open(struct inode
*inode
, struct file
*file
)
2179 return seq_open_net(inode
, file
, &ipmr_vif_seq_ops
,
2180 sizeof(struct ipmr_vif_iter
));
2183 static const struct file_operations ipmr_vif_fops
= {
2184 .owner
= THIS_MODULE
,
2185 .open
= ipmr_vif_open
,
2187 .llseek
= seq_lseek
,
2188 .release
= seq_release_net
,
2191 struct ipmr_mfc_iter
{
2192 struct seq_net_private p
;
2193 struct mr_table
*mrt
;
2194 struct list_head
*cache
;
2199 static struct mfc_cache
*ipmr_mfc_seq_idx(struct net
*net
,
2200 struct ipmr_mfc_iter
*it
, loff_t pos
)
2202 struct mr_table
*mrt
= it
->mrt
;
2203 struct mfc_cache
*mfc
;
2205 read_lock(&mrt_lock
);
2206 for (it
->ct
= 0; it
->ct
< MFC_LINES
; it
->ct
++) {
2207 it
->cache
= &mrt
->mfc_cache_array
[it
->ct
];
2208 list_for_each_entry(mfc
, it
->cache
, list
)
2212 read_unlock(&mrt_lock
);
2214 spin_lock_bh(&mfc_unres_lock
);
2215 it
->cache
= &mrt
->mfc_unres_queue
;
2216 list_for_each_entry(mfc
, it
->cache
, list
)
2219 spin_unlock_bh(&mfc_unres_lock
);
2226 static void *ipmr_mfc_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2228 struct ipmr_mfc_iter
*it
= seq
->private;
2229 struct net
*net
= seq_file_net(seq
);
2230 struct mr_table
*mrt
;
2232 mrt
= ipmr_get_table(net
, RT_TABLE_DEFAULT
);
2234 return ERR_PTR(-ENOENT
);
2239 return *pos
? ipmr_mfc_seq_idx(net
, seq
->private, *pos
- 1)
2243 static void *ipmr_mfc_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2245 struct mfc_cache
*mfc
= v
;
2246 struct ipmr_mfc_iter
*it
= seq
->private;
2247 struct net
*net
= seq_file_net(seq
);
2248 struct mr_table
*mrt
= it
->mrt
;
2252 if (v
== SEQ_START_TOKEN
)
2253 return ipmr_mfc_seq_idx(net
, seq
->private, 0);
2255 if (mfc
->list
.next
!= it
->cache
)
2256 return list_entry(mfc
->list
.next
, struct mfc_cache
, list
);
2258 if (it
->cache
== &mrt
->mfc_unres_queue
)
2261 BUG_ON(it
->cache
!= &mrt
->mfc_cache_array
[it
->ct
]);
2263 while (++it
->ct
< MFC_LINES
) {
2264 it
->cache
= &mrt
->mfc_cache_array
[it
->ct
];
2265 if (list_empty(it
->cache
))
2267 return list_first_entry(it
->cache
, struct mfc_cache
, list
);
2270 /* exhausted cache_array, show unresolved */
2271 read_unlock(&mrt_lock
);
2272 it
->cache
= &mrt
->mfc_unres_queue
;
2275 spin_lock_bh(&mfc_unres_lock
);
2276 if (!list_empty(it
->cache
))
2277 return list_first_entry(it
->cache
, struct mfc_cache
, list
);
2280 spin_unlock_bh(&mfc_unres_lock
);
2286 static void ipmr_mfc_seq_stop(struct seq_file
*seq
, void *v
)
2288 struct ipmr_mfc_iter
*it
= seq
->private;
2289 struct mr_table
*mrt
= it
->mrt
;
2291 if (it
->cache
== &mrt
->mfc_unres_queue
)
2292 spin_unlock_bh(&mfc_unres_lock
);
2293 else if (it
->cache
== &mrt
->mfc_cache_array
[it
->ct
])
2294 read_unlock(&mrt_lock
);
2297 static int ipmr_mfc_seq_show(struct seq_file
*seq
, void *v
)
2301 if (v
== SEQ_START_TOKEN
) {
2303 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
2305 const struct mfc_cache
*mfc
= v
;
2306 const struct ipmr_mfc_iter
*it
= seq
->private;
2307 const struct mr_table
*mrt
= it
->mrt
;
2309 seq_printf(seq
, "%08X %08X %-3hd",
2310 (__force u32
) mfc
->mfc_mcastgrp
,
2311 (__force u32
) mfc
->mfc_origin
,
2314 if (it
->cache
!= &mrt
->mfc_unres_queue
) {
2315 seq_printf(seq
, " %8lu %8lu %8lu",
2316 mfc
->mfc_un
.res
.pkt
,
2317 mfc
->mfc_un
.res
.bytes
,
2318 mfc
->mfc_un
.res
.wrong_if
);
2319 for (n
= mfc
->mfc_un
.res
.minvif
;
2320 n
< mfc
->mfc_un
.res
.maxvif
; n
++ ) {
2321 if (VIF_EXISTS(mrt
, n
) &&
2322 mfc
->mfc_un
.res
.ttls
[n
] < 255)
2325 n
, mfc
->mfc_un
.res
.ttls
[n
]);
2328 /* unresolved mfc_caches don't contain
2329 * pkt, bytes and wrong_if values
2331 seq_printf(seq
, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
2333 seq_putc(seq
, '\n');
2338 static const struct seq_operations ipmr_mfc_seq_ops
= {
2339 .start
= ipmr_mfc_seq_start
,
2340 .next
= ipmr_mfc_seq_next
,
2341 .stop
= ipmr_mfc_seq_stop
,
2342 .show
= ipmr_mfc_seq_show
,
2345 static int ipmr_mfc_open(struct inode
*inode
, struct file
*file
)
2347 return seq_open_net(inode
, file
, &ipmr_mfc_seq_ops
,
2348 sizeof(struct ipmr_mfc_iter
));
2351 static const struct file_operations ipmr_mfc_fops
= {
2352 .owner
= THIS_MODULE
,
2353 .open
= ipmr_mfc_open
,
2355 .llseek
= seq_lseek
,
2356 .release
= seq_release_net
,
2360 #ifdef CONFIG_IP_PIMSM_V2
2361 static const struct net_protocol pim_protocol
= {
2369 * Setup for IP multicast routing
2371 static int __net_init
ipmr_net_init(struct net
*net
)
2375 err
= ipmr_rules_init(net
);
2379 #ifdef CONFIG_PROC_FS
2381 if (!proc_net_fops_create(net
, "ip_mr_vif", 0, &ipmr_vif_fops
))
2383 if (!proc_net_fops_create(net
, "ip_mr_cache", 0, &ipmr_mfc_fops
))
2384 goto proc_cache_fail
;
2388 #ifdef CONFIG_PROC_FS
2390 proc_net_remove(net
, "ip_mr_vif");
2392 ipmr_rules_exit(net
);
2398 static void __net_exit
ipmr_net_exit(struct net
*net
)
2400 #ifdef CONFIG_PROC_FS
2401 proc_net_remove(net
, "ip_mr_cache");
2402 proc_net_remove(net
, "ip_mr_vif");
2404 ipmr_rules_exit(net
);
2407 static struct pernet_operations ipmr_net_ops
= {
2408 .init
= ipmr_net_init
,
2409 .exit
= ipmr_net_exit
,
2412 int __init
ip_mr_init(void)
2416 mrt_cachep
= kmem_cache_create("ip_mrt_cache",
2417 sizeof(struct mfc_cache
),
2418 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
,
2423 err
= register_pernet_subsys(&ipmr_net_ops
);
2425 goto reg_pernet_fail
;
2427 err
= register_netdevice_notifier(&ip_mr_notifier
);
2429 goto reg_notif_fail
;
2430 #ifdef CONFIG_IP_PIMSM_V2
2431 if (inet_add_protocol(&pim_protocol
, IPPROTO_PIM
) < 0) {
2432 printk(KERN_ERR
"ip_mr_init: can't add PIM protocol\n");
2434 goto add_proto_fail
;
2437 rtnl_register(RTNL_FAMILY_IPMR
, RTM_GETROUTE
, NULL
, ipmr_rtm_dumproute
);
2440 #ifdef CONFIG_IP_PIMSM_V2
2442 unregister_netdevice_notifier(&ip_mr_notifier
);
2445 unregister_pernet_subsys(&ipmr_net_ops
);
2447 kmem_cache_destroy(mrt_cachep
);