1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Linux IPv6 multicast routing support for BSD pim6sd
4 * Based on net/ipv4/ipmr.c.
6 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
7 * LSIIT Laboratory, Strasbourg, France
8 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
10 * Copyright (C)2007,2008 USAGI/WIDE Project
11 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
14 #include <linux/uaccess.h>
15 #include <linux/types.h>
16 #include <linux/sched.h>
17 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/fcntl.h>
21 #include <linux/stat.h>
22 #include <linux/socket.h>
23 #include <linux/inet.h>
24 #include <linux/netdevice.h>
25 #include <linux/inetdevice.h>
26 #include <linux/proc_fs.h>
27 #include <linux/seq_file.h>
28 #include <linux/init.h>
29 #include <linux/compat.h>
30 #include <linux/rhashtable.h>
31 #include <net/protocol.h>
32 #include <linux/skbuff.h>
34 #include <linux/notifier.h>
35 #include <linux/if_arp.h>
36 #include <net/checksum.h>
37 #include <net/netlink.h>
38 #include <net/fib_rules.h>
41 #include <net/ip6_route.h>
42 #include <linux/mroute6.h>
43 #include <linux/pim.h>
44 #include <net/addrconf.h>
45 #include <linux/netfilter_ipv6.h>
46 #include <linux/export.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/netconf.h>
49 #include <net/ip_tunnels.h>
51 #include <linux/nospec.h>
54 struct fib_rule common
;
61 /* Big lock, protecting vif table, mrt cache and mroute socket state.
62 Note that the changes are semaphored via rtnl_lock.
65 static DEFINE_RWLOCK(mrt_lock
);
67 /* Multicast router control variables */
69 /* Special spinlock for queue of unresolved entries */
70 static DEFINE_SPINLOCK(mfc_unres_lock
);
72 /* We return to original Alan's scheme. Hash table of resolved
73 entries is changed only in process context and protected
74 with weak lock mrt_lock. Queue of unresolved entries is protected
75 with strong spinlock mfc_unres_lock.
77 In this case data path is free of exclusive locks at all.
80 static struct kmem_cache
*mrt_cachep __read_mostly
;
82 static struct mr_table
*ip6mr_new_table(struct net
*net
, u32 id
);
83 static void ip6mr_free_table(struct mr_table
*mrt
);
85 static void ip6_mr_forward(struct net
*net
, struct mr_table
*mrt
,
86 struct net_device
*dev
, struct sk_buff
*skb
,
87 struct mfc6_cache
*cache
);
88 static int ip6mr_cache_report(struct mr_table
*mrt
, struct sk_buff
*pkt
,
89 mifi_t mifi
, int assert);
90 static void mr6_netlink_event(struct mr_table
*mrt
, struct mfc6_cache
*mfc
,
92 static void mrt6msg_netlink_event(struct mr_table
*mrt
, struct sk_buff
*pkt
);
93 static int ip6mr_rtm_dumproute(struct sk_buff
*skb
,
94 struct netlink_callback
*cb
);
95 static void mroute_clean_tables(struct mr_table
*mrt
, int flags
);
96 static void ipmr_expire_process(struct timer_list
*t
);
98 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
99 #define ip6mr_for_each_table(mrt, net) \
100 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
102 static struct mr_table
*ip6mr_mr_table_iter(struct net
*net
,
103 struct mr_table
*mrt
)
105 struct mr_table
*ret
;
108 ret
= list_entry_rcu(net
->ipv6
.mr6_tables
.next
,
109 struct mr_table
, list
);
111 ret
= list_entry_rcu(mrt
->list
.next
,
112 struct mr_table
, list
);
114 if (&ret
->list
== &net
->ipv6
.mr6_tables
)
119 static struct mr_table
*ip6mr_get_table(struct net
*net
, u32 id
)
121 struct mr_table
*mrt
;
123 ip6mr_for_each_table(mrt
, net
) {
130 static int ip6mr_fib_lookup(struct net
*net
, struct flowi6
*flp6
,
131 struct mr_table
**mrt
)
134 struct ip6mr_result res
;
135 struct fib_lookup_arg arg
= {
137 .flags
= FIB_LOOKUP_NOREF
,
140 /* update flow if oif or iif point to device enslaved to l3mdev */
141 l3mdev_update_flow(net
, flowi6_to_flowi(flp6
));
143 err
= fib_rules_lookup(net
->ipv6
.mr6_rules_ops
,
144 flowi6_to_flowi(flp6
), 0, &arg
);
151 static int ip6mr_rule_action(struct fib_rule
*rule
, struct flowi
*flp
,
152 int flags
, struct fib_lookup_arg
*arg
)
154 struct ip6mr_result
*res
= arg
->result
;
155 struct mr_table
*mrt
;
157 switch (rule
->action
) {
160 case FR_ACT_UNREACHABLE
:
162 case FR_ACT_PROHIBIT
:
164 case FR_ACT_BLACKHOLE
:
169 arg
->table
= fib_rule_get_table(rule
, arg
);
171 mrt
= ip6mr_get_table(rule
->fr_net
, arg
->table
);
178 static int ip6mr_rule_match(struct fib_rule
*rule
, struct flowi
*flp
, int flags
)
183 static const struct nla_policy ip6mr_rule_policy
[FRA_MAX
+ 1] = {
187 static int ip6mr_rule_configure(struct fib_rule
*rule
, struct sk_buff
*skb
,
188 struct fib_rule_hdr
*frh
, struct nlattr
**tb
,
189 struct netlink_ext_ack
*extack
)
194 static int ip6mr_rule_compare(struct fib_rule
*rule
, struct fib_rule_hdr
*frh
,
200 static int ip6mr_rule_fill(struct fib_rule
*rule
, struct sk_buff
*skb
,
201 struct fib_rule_hdr
*frh
)
209 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template
= {
210 .family
= RTNL_FAMILY_IP6MR
,
211 .rule_size
= sizeof(struct ip6mr_rule
),
212 .addr_size
= sizeof(struct in6_addr
),
213 .action
= ip6mr_rule_action
,
214 .match
= ip6mr_rule_match
,
215 .configure
= ip6mr_rule_configure
,
216 .compare
= ip6mr_rule_compare
,
217 .fill
= ip6mr_rule_fill
,
218 .nlgroup
= RTNLGRP_IPV6_RULE
,
219 .policy
= ip6mr_rule_policy
,
220 .owner
= THIS_MODULE
,
223 static int __net_init
ip6mr_rules_init(struct net
*net
)
225 struct fib_rules_ops
*ops
;
226 struct mr_table
*mrt
;
229 ops
= fib_rules_register(&ip6mr_rules_ops_template
, net
);
233 INIT_LIST_HEAD(&net
->ipv6
.mr6_tables
);
235 mrt
= ip6mr_new_table(net
, RT6_TABLE_DFLT
);
241 err
= fib_default_rule_add(ops
, 0x7fff, RT6_TABLE_DFLT
, 0);
245 net
->ipv6
.mr6_rules_ops
= ops
;
249 ip6mr_free_table(mrt
);
251 fib_rules_unregister(ops
);
255 static void __net_exit
ip6mr_rules_exit(struct net
*net
)
257 struct mr_table
*mrt
, *next
;
260 list_for_each_entry_safe(mrt
, next
, &net
->ipv6
.mr6_tables
, list
) {
261 list_del(&mrt
->list
);
262 ip6mr_free_table(mrt
);
264 fib_rules_unregister(net
->ipv6
.mr6_rules_ops
);
268 static int ip6mr_rules_dump(struct net
*net
, struct notifier_block
*nb
,
269 struct netlink_ext_ack
*extack
)
271 return fib_rules_dump(net
, nb
, RTNL_FAMILY_IP6MR
, extack
);
274 static unsigned int ip6mr_rules_seq_read(struct net
*net
)
276 return fib_rules_seq_read(net
, RTNL_FAMILY_IP6MR
);
279 bool ip6mr_rule_default(const struct fib_rule
*rule
)
281 return fib_rule_matchall(rule
) && rule
->action
== FR_ACT_TO_TBL
&&
282 rule
->table
== RT6_TABLE_DFLT
&& !rule
->l3mdev
;
284 EXPORT_SYMBOL(ip6mr_rule_default
);
286 #define ip6mr_for_each_table(mrt, net) \
287 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
289 static struct mr_table
*ip6mr_mr_table_iter(struct net
*net
,
290 struct mr_table
*mrt
)
293 return net
->ipv6
.mrt6
;
297 static struct mr_table
*ip6mr_get_table(struct net
*net
, u32 id
)
299 return net
->ipv6
.mrt6
;
302 static int ip6mr_fib_lookup(struct net
*net
, struct flowi6
*flp6
,
303 struct mr_table
**mrt
)
305 *mrt
= net
->ipv6
.mrt6
;
309 static int __net_init
ip6mr_rules_init(struct net
*net
)
311 struct mr_table
*mrt
;
313 mrt
= ip6mr_new_table(net
, RT6_TABLE_DFLT
);
316 net
->ipv6
.mrt6
= mrt
;
320 static void __net_exit
ip6mr_rules_exit(struct net
*net
)
323 ip6mr_free_table(net
->ipv6
.mrt6
);
324 net
->ipv6
.mrt6
= NULL
;
328 static int ip6mr_rules_dump(struct net
*net
, struct notifier_block
*nb
,
329 struct netlink_ext_ack
*extack
)
334 static unsigned int ip6mr_rules_seq_read(struct net
*net
)
340 static int ip6mr_hash_cmp(struct rhashtable_compare_arg
*arg
,
343 const struct mfc6_cache_cmp_arg
*cmparg
= arg
->key
;
344 struct mfc6_cache
*c
= (struct mfc6_cache
*)ptr
;
346 return !ipv6_addr_equal(&c
->mf6c_mcastgrp
, &cmparg
->mf6c_mcastgrp
) ||
347 !ipv6_addr_equal(&c
->mf6c_origin
, &cmparg
->mf6c_origin
);
350 static const struct rhashtable_params ip6mr_rht_params
= {
351 .head_offset
= offsetof(struct mr_mfc
, mnode
),
352 .key_offset
= offsetof(struct mfc6_cache
, cmparg
),
353 .key_len
= sizeof(struct mfc6_cache_cmp_arg
),
355 .obj_cmpfn
= ip6mr_hash_cmp
,
356 .automatic_shrinking
= true,
359 static void ip6mr_new_table_set(struct mr_table
*mrt
,
362 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
363 list_add_tail_rcu(&mrt
->list
, &net
->ipv6
.mr6_tables
);
367 static struct mfc6_cache_cmp_arg ip6mr_mr_table_ops_cmparg_any
= {
368 .mf6c_origin
= IN6ADDR_ANY_INIT
,
369 .mf6c_mcastgrp
= IN6ADDR_ANY_INIT
,
372 static struct mr_table_ops ip6mr_mr_table_ops
= {
373 .rht_params
= &ip6mr_rht_params
,
374 .cmparg_any
= &ip6mr_mr_table_ops_cmparg_any
,
377 static struct mr_table
*ip6mr_new_table(struct net
*net
, u32 id
)
379 struct mr_table
*mrt
;
381 mrt
= ip6mr_get_table(net
, id
);
385 return mr_table_alloc(net
, id
, &ip6mr_mr_table_ops
,
386 ipmr_expire_process
, ip6mr_new_table_set
);
389 static void ip6mr_free_table(struct mr_table
*mrt
)
391 del_timer_sync(&mrt
->ipmr_expire_timer
);
392 mroute_clean_tables(mrt
, MRT6_FLUSH_MIFS
| MRT6_FLUSH_MIFS_STATIC
|
393 MRT6_FLUSH_MFC
| MRT6_FLUSH_MFC_STATIC
);
394 rhltable_destroy(&mrt
->mfc_hash
);
398 #ifdef CONFIG_PROC_FS
399 /* The /proc interfaces to multicast routing
400 * /proc/ip6_mr_cache /proc/ip6_mr_vif
403 static void *ip6mr_vif_seq_start(struct seq_file
*seq
, loff_t
*pos
)
406 struct mr_vif_iter
*iter
= seq
->private;
407 struct net
*net
= seq_file_net(seq
);
408 struct mr_table
*mrt
;
410 mrt
= ip6mr_get_table(net
, RT6_TABLE_DFLT
);
412 return ERR_PTR(-ENOENT
);
416 read_lock(&mrt_lock
);
417 return mr_vif_seq_start(seq
, pos
);
420 static void ip6mr_vif_seq_stop(struct seq_file
*seq
, void *v
)
423 read_unlock(&mrt_lock
);
426 static int ip6mr_vif_seq_show(struct seq_file
*seq
, void *v
)
428 struct mr_vif_iter
*iter
= seq
->private;
429 struct mr_table
*mrt
= iter
->mrt
;
431 if (v
== SEQ_START_TOKEN
) {
433 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
435 const struct vif_device
*vif
= v
;
436 const char *name
= vif
->dev
? vif
->dev
->name
: "none";
439 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
440 vif
- mrt
->vif_table
,
441 name
, vif
->bytes_in
, vif
->pkt_in
,
442 vif
->bytes_out
, vif
->pkt_out
,
448 static const struct seq_operations ip6mr_vif_seq_ops
= {
449 .start
= ip6mr_vif_seq_start
,
450 .next
= mr_vif_seq_next
,
451 .stop
= ip6mr_vif_seq_stop
,
452 .show
= ip6mr_vif_seq_show
,
455 static void *ipmr_mfc_seq_start(struct seq_file
*seq
, loff_t
*pos
)
457 struct net
*net
= seq_file_net(seq
);
458 struct mr_table
*mrt
;
460 mrt
= ip6mr_get_table(net
, RT6_TABLE_DFLT
);
462 return ERR_PTR(-ENOENT
);
464 return mr_mfc_seq_start(seq
, pos
, mrt
, &mfc_unres_lock
);
467 static int ipmr_mfc_seq_show(struct seq_file
*seq
, void *v
)
471 if (v
== SEQ_START_TOKEN
) {
475 "Iif Pkts Bytes Wrong Oifs\n");
477 const struct mfc6_cache
*mfc
= v
;
478 const struct mr_mfc_iter
*it
= seq
->private;
479 struct mr_table
*mrt
= it
->mrt
;
481 seq_printf(seq
, "%pI6 %pI6 %-3hd",
482 &mfc
->mf6c_mcastgrp
, &mfc
->mf6c_origin
,
485 if (it
->cache
!= &mrt
->mfc_unres_queue
) {
486 seq_printf(seq
, " %8lu %8lu %8lu",
487 mfc
->_c
.mfc_un
.res
.pkt
,
488 mfc
->_c
.mfc_un
.res
.bytes
,
489 mfc
->_c
.mfc_un
.res
.wrong_if
);
490 for (n
= mfc
->_c
.mfc_un
.res
.minvif
;
491 n
< mfc
->_c
.mfc_un
.res
.maxvif
; n
++) {
492 if (VIF_EXISTS(mrt
, n
) &&
493 mfc
->_c
.mfc_un
.res
.ttls
[n
] < 255)
496 mfc
->_c
.mfc_un
.res
.ttls
[n
]);
499 /* unresolved mfc_caches don't contain
500 * pkt, bytes and wrong_if values
502 seq_printf(seq
, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
509 static const struct seq_operations ipmr_mfc_seq_ops
= {
510 .start
= ipmr_mfc_seq_start
,
511 .next
= mr_mfc_seq_next
,
512 .stop
= mr_mfc_seq_stop
,
513 .show
= ipmr_mfc_seq_show
,
517 #ifdef CONFIG_IPV6_PIMSM_V2
519 static int pim6_rcv(struct sk_buff
*skb
)
521 struct pimreghdr
*pim
;
522 struct ipv6hdr
*encap
;
523 struct net_device
*reg_dev
= NULL
;
524 struct net
*net
= dev_net(skb
->dev
);
525 struct mr_table
*mrt
;
526 struct flowi6 fl6
= {
527 .flowi6_iif
= skb
->dev
->ifindex
,
528 .flowi6_mark
= skb
->mark
,
532 if (!pskb_may_pull(skb
, sizeof(*pim
) + sizeof(*encap
)))
535 pim
= (struct pimreghdr
*)skb_transport_header(skb
);
536 if (pim
->type
!= ((PIM_VERSION
<< 4) | PIM_TYPE_REGISTER
) ||
537 (pim
->flags
& PIM_NULL_REGISTER
) ||
538 (csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
, &ipv6_hdr(skb
)->daddr
,
539 sizeof(*pim
), IPPROTO_PIM
,
540 csum_partial((void *)pim
, sizeof(*pim
), 0)) &&
541 csum_fold(skb_checksum(skb
, 0, skb
->len
, 0))))
544 /* check if the inner packet is destined to mcast group */
545 encap
= (struct ipv6hdr
*)(skb_transport_header(skb
) +
548 if (!ipv6_addr_is_multicast(&encap
->daddr
) ||
549 encap
->payload_len
== 0 ||
550 ntohs(encap
->payload_len
) + sizeof(*pim
) > skb
->len
)
553 if (ip6mr_fib_lookup(net
, &fl6
, &mrt
) < 0)
555 reg_vif_num
= mrt
->mroute_reg_vif_num
;
557 read_lock(&mrt_lock
);
558 if (reg_vif_num
>= 0)
559 reg_dev
= mrt
->vif_table
[reg_vif_num
].dev
;
562 read_unlock(&mrt_lock
);
567 skb
->mac_header
= skb
->network_header
;
568 skb_pull(skb
, (u8
*)encap
- skb
->data
);
569 skb_reset_network_header(skb
);
570 skb
->protocol
= htons(ETH_P_IPV6
);
571 skb
->ip_summed
= CHECKSUM_NONE
;
573 skb_tunnel_rx(skb
, reg_dev
, dev_net(reg_dev
));
584 static const struct inet6_protocol pim6_protocol
= {
588 /* Service routines creating virtual interfaces: PIMREG */
590 static netdev_tx_t
reg_vif_xmit(struct sk_buff
*skb
,
591 struct net_device
*dev
)
593 struct net
*net
= dev_net(dev
);
594 struct mr_table
*mrt
;
595 struct flowi6 fl6
= {
596 .flowi6_oif
= dev
->ifindex
,
597 .flowi6_iif
= skb
->skb_iif
? : LOOPBACK_IFINDEX
,
598 .flowi6_mark
= skb
->mark
,
601 if (!pskb_inet_may_pull(skb
))
604 if (ip6mr_fib_lookup(net
, &fl6
, &mrt
) < 0)
607 read_lock(&mrt_lock
);
608 dev
->stats
.tx_bytes
+= skb
->len
;
609 dev
->stats
.tx_packets
++;
610 ip6mr_cache_report(mrt
, skb
, mrt
->mroute_reg_vif_num
, MRT6MSG_WHOLEPKT
);
611 read_unlock(&mrt_lock
);
616 dev
->stats
.tx_errors
++;
621 static int reg_vif_get_iflink(const struct net_device
*dev
)
626 static const struct net_device_ops reg_vif_netdev_ops
= {
627 .ndo_start_xmit
= reg_vif_xmit
,
628 .ndo_get_iflink
= reg_vif_get_iflink
,
631 static void reg_vif_setup(struct net_device
*dev
)
633 dev
->type
= ARPHRD_PIMREG
;
634 dev
->mtu
= 1500 - sizeof(struct ipv6hdr
) - 8;
635 dev
->flags
= IFF_NOARP
;
636 dev
->netdev_ops
= ®_vif_netdev_ops
;
637 dev
->needs_free_netdev
= true;
638 dev
->features
|= NETIF_F_NETNS_LOCAL
;
641 static struct net_device
*ip6mr_reg_vif(struct net
*net
, struct mr_table
*mrt
)
643 struct net_device
*dev
;
646 if (mrt
->id
== RT6_TABLE_DFLT
)
647 sprintf(name
, "pim6reg");
649 sprintf(name
, "pim6reg%u", mrt
->id
);
651 dev
= alloc_netdev(0, name
, NET_NAME_UNKNOWN
, reg_vif_setup
);
655 dev_net_set(dev
, net
);
657 if (register_netdevice(dev
)) {
662 if (dev_open(dev
, NULL
))
669 unregister_netdevice(dev
);
674 static int call_ip6mr_vif_entry_notifiers(struct net
*net
,
675 enum fib_event_type event_type
,
676 struct vif_device
*vif
,
677 mifi_t vif_index
, u32 tb_id
)
679 return mr_call_vif_notifiers(net
, RTNL_FAMILY_IP6MR
, event_type
,
680 vif
, vif_index
, tb_id
,
681 &net
->ipv6
.ipmr_seq
);
684 static int call_ip6mr_mfc_entry_notifiers(struct net
*net
,
685 enum fib_event_type event_type
,
686 struct mfc6_cache
*mfc
, u32 tb_id
)
688 return mr_call_mfc_notifiers(net
, RTNL_FAMILY_IP6MR
, event_type
,
689 &mfc
->_c
, tb_id
, &net
->ipv6
.ipmr_seq
);
692 /* Delete a VIF entry */
693 static int mif6_delete(struct mr_table
*mrt
, int vifi
, int notify
,
694 struct list_head
*head
)
696 struct vif_device
*v
;
697 struct net_device
*dev
;
698 struct inet6_dev
*in6_dev
;
700 if (vifi
< 0 || vifi
>= mrt
->maxvif
)
701 return -EADDRNOTAVAIL
;
703 v
= &mrt
->vif_table
[vifi
];
705 if (VIF_EXISTS(mrt
, vifi
))
706 call_ip6mr_vif_entry_notifiers(read_pnet(&mrt
->net
),
707 FIB_EVENT_VIF_DEL
, v
, vifi
,
710 write_lock_bh(&mrt_lock
);
715 write_unlock_bh(&mrt_lock
);
716 return -EADDRNOTAVAIL
;
719 #ifdef CONFIG_IPV6_PIMSM_V2
720 if (vifi
== mrt
->mroute_reg_vif_num
)
721 mrt
->mroute_reg_vif_num
= -1;
724 if (vifi
+ 1 == mrt
->maxvif
) {
726 for (tmp
= vifi
- 1; tmp
>= 0; tmp
--) {
727 if (VIF_EXISTS(mrt
, tmp
))
730 mrt
->maxvif
= tmp
+ 1;
733 write_unlock_bh(&mrt_lock
);
735 dev_set_allmulti(dev
, -1);
737 in6_dev
= __in6_dev_get(dev
);
739 in6_dev
->cnf
.mc_forwarding
--;
740 inet6_netconf_notify_devconf(dev_net(dev
), RTM_NEWNETCONF
,
741 NETCONFA_MC_FORWARDING
,
742 dev
->ifindex
, &in6_dev
->cnf
);
745 if ((v
->flags
& MIFF_REGISTER
) && !notify
)
746 unregister_netdevice_queue(dev
, head
);
752 static inline void ip6mr_cache_free_rcu(struct rcu_head
*head
)
754 struct mr_mfc
*c
= container_of(head
, struct mr_mfc
, rcu
);
756 kmem_cache_free(mrt_cachep
, (struct mfc6_cache
*)c
);
759 static inline void ip6mr_cache_free(struct mfc6_cache
*c
)
761 call_rcu(&c
->_c
.rcu
, ip6mr_cache_free_rcu
);
764 /* Destroy an unresolved cache entry, killing queued skbs
765 and reporting error to netlink readers.
768 static void ip6mr_destroy_unres(struct mr_table
*mrt
, struct mfc6_cache
*c
)
770 struct net
*net
= read_pnet(&mrt
->net
);
773 atomic_dec(&mrt
->cache_resolve_queue_len
);
775 while ((skb
= skb_dequeue(&c
->_c
.mfc_un
.unres
.unresolved
)) != NULL
) {
776 if (ipv6_hdr(skb
)->version
== 0) {
777 struct nlmsghdr
*nlh
= skb_pull(skb
,
778 sizeof(struct ipv6hdr
));
779 nlh
->nlmsg_type
= NLMSG_ERROR
;
780 nlh
->nlmsg_len
= nlmsg_msg_size(sizeof(struct nlmsgerr
));
781 skb_trim(skb
, nlh
->nlmsg_len
);
782 ((struct nlmsgerr
*)nlmsg_data(nlh
))->error
= -ETIMEDOUT
;
783 rtnl_unicast(skb
, net
, NETLINK_CB(skb
).portid
);
792 /* Timer process for all the unresolved queue. */
794 static void ipmr_do_expire_process(struct mr_table
*mrt
)
796 unsigned long now
= jiffies
;
797 unsigned long expires
= 10 * HZ
;
798 struct mr_mfc
*c
, *next
;
800 list_for_each_entry_safe(c
, next
, &mrt
->mfc_unres_queue
, list
) {
801 if (time_after(c
->mfc_un
.unres
.expires
, now
)) {
803 unsigned long interval
= c
->mfc_un
.unres
.expires
- now
;
804 if (interval
< expires
)
810 mr6_netlink_event(mrt
, (struct mfc6_cache
*)c
, RTM_DELROUTE
);
811 ip6mr_destroy_unres(mrt
, (struct mfc6_cache
*)c
);
814 if (!list_empty(&mrt
->mfc_unres_queue
))
815 mod_timer(&mrt
->ipmr_expire_timer
, jiffies
+ expires
);
818 static void ipmr_expire_process(struct timer_list
*t
)
820 struct mr_table
*mrt
= from_timer(mrt
, t
, ipmr_expire_timer
);
822 if (!spin_trylock(&mfc_unres_lock
)) {
823 mod_timer(&mrt
->ipmr_expire_timer
, jiffies
+ 1);
827 if (!list_empty(&mrt
->mfc_unres_queue
))
828 ipmr_do_expire_process(mrt
);
830 spin_unlock(&mfc_unres_lock
);
833 /* Fill oifs list. It is called under write locked mrt_lock. */
835 static void ip6mr_update_thresholds(struct mr_table
*mrt
,
836 struct mr_mfc
*cache
,
841 cache
->mfc_un
.res
.minvif
= MAXMIFS
;
842 cache
->mfc_un
.res
.maxvif
= 0;
843 memset(cache
->mfc_un
.res
.ttls
, 255, MAXMIFS
);
845 for (vifi
= 0; vifi
< mrt
->maxvif
; vifi
++) {
846 if (VIF_EXISTS(mrt
, vifi
) &&
847 ttls
[vifi
] && ttls
[vifi
] < 255) {
848 cache
->mfc_un
.res
.ttls
[vifi
] = ttls
[vifi
];
849 if (cache
->mfc_un
.res
.minvif
> vifi
)
850 cache
->mfc_un
.res
.minvif
= vifi
;
851 if (cache
->mfc_un
.res
.maxvif
<= vifi
)
852 cache
->mfc_un
.res
.maxvif
= vifi
+ 1;
855 cache
->mfc_un
.res
.lastuse
= jiffies
;
858 static int mif6_add(struct net
*net
, struct mr_table
*mrt
,
859 struct mif6ctl
*vifc
, int mrtsock
)
861 int vifi
= vifc
->mif6c_mifi
;
862 struct vif_device
*v
= &mrt
->vif_table
[vifi
];
863 struct net_device
*dev
;
864 struct inet6_dev
*in6_dev
;
868 if (VIF_EXISTS(mrt
, vifi
))
871 switch (vifc
->mif6c_flags
) {
872 #ifdef CONFIG_IPV6_PIMSM_V2
875 * Special Purpose VIF in PIM
876 * All the packets will be sent to the daemon
878 if (mrt
->mroute_reg_vif_num
>= 0)
880 dev
= ip6mr_reg_vif(net
, mrt
);
883 err
= dev_set_allmulti(dev
, 1);
885 unregister_netdevice(dev
);
892 dev
= dev_get_by_index(net
, vifc
->mif6c_pifi
);
894 return -EADDRNOTAVAIL
;
895 err
= dev_set_allmulti(dev
, 1);
905 in6_dev
= __in6_dev_get(dev
);
907 in6_dev
->cnf
.mc_forwarding
++;
908 inet6_netconf_notify_devconf(dev_net(dev
), RTM_NEWNETCONF
,
909 NETCONFA_MC_FORWARDING
,
910 dev
->ifindex
, &in6_dev
->cnf
);
913 /* Fill in the VIF structures */
914 vif_device_init(v
, dev
, vifc
->vifc_rate_limit
, vifc
->vifc_threshold
,
915 vifc
->mif6c_flags
| (!mrtsock
? VIFF_STATIC
: 0),
918 /* And finish update writing critical data */
919 write_lock_bh(&mrt_lock
);
921 #ifdef CONFIG_IPV6_PIMSM_V2
922 if (v
->flags
& MIFF_REGISTER
)
923 mrt
->mroute_reg_vif_num
= vifi
;
925 if (vifi
+ 1 > mrt
->maxvif
)
926 mrt
->maxvif
= vifi
+ 1;
927 write_unlock_bh(&mrt_lock
);
928 call_ip6mr_vif_entry_notifiers(net
, FIB_EVENT_VIF_ADD
,
933 static struct mfc6_cache
*ip6mr_cache_find(struct mr_table
*mrt
,
934 const struct in6_addr
*origin
,
935 const struct in6_addr
*mcastgrp
)
937 struct mfc6_cache_cmp_arg arg
= {
938 .mf6c_origin
= *origin
,
939 .mf6c_mcastgrp
= *mcastgrp
,
942 return mr_mfc_find(mrt
, &arg
);
945 /* Look for a (*,G) entry */
946 static struct mfc6_cache
*ip6mr_cache_find_any(struct mr_table
*mrt
,
947 struct in6_addr
*mcastgrp
,
950 struct mfc6_cache_cmp_arg arg
= {
951 .mf6c_origin
= in6addr_any
,
952 .mf6c_mcastgrp
= *mcastgrp
,
955 if (ipv6_addr_any(mcastgrp
))
956 return mr_mfc_find_any_parent(mrt
, mifi
);
957 return mr_mfc_find_any(mrt
, mifi
, &arg
);
960 /* Look for a (S,G,iif) entry if parent != -1 */
961 static struct mfc6_cache
*
962 ip6mr_cache_find_parent(struct mr_table
*mrt
,
963 const struct in6_addr
*origin
,
964 const struct in6_addr
*mcastgrp
,
967 struct mfc6_cache_cmp_arg arg
= {
968 .mf6c_origin
= *origin
,
969 .mf6c_mcastgrp
= *mcastgrp
,
972 return mr_mfc_find_parent(mrt
, &arg
, parent
);
975 /* Allocate a multicast cache entry */
976 static struct mfc6_cache
*ip6mr_cache_alloc(void)
978 struct mfc6_cache
*c
= kmem_cache_zalloc(mrt_cachep
, GFP_KERNEL
);
981 c
->_c
.mfc_un
.res
.last_assert
= jiffies
- MFC_ASSERT_THRESH
- 1;
982 c
->_c
.mfc_un
.res
.minvif
= MAXMIFS
;
983 c
->_c
.free
= ip6mr_cache_free_rcu
;
984 refcount_set(&c
->_c
.mfc_un
.res
.refcount
, 1);
988 static struct mfc6_cache
*ip6mr_cache_alloc_unres(void)
990 struct mfc6_cache
*c
= kmem_cache_zalloc(mrt_cachep
, GFP_ATOMIC
);
993 skb_queue_head_init(&c
->_c
.mfc_un
.unres
.unresolved
);
994 c
->_c
.mfc_un
.unres
.expires
= jiffies
+ 10 * HZ
;
999 * A cache entry has gone into a resolved state from queued
1002 static void ip6mr_cache_resolve(struct net
*net
, struct mr_table
*mrt
,
1003 struct mfc6_cache
*uc
, struct mfc6_cache
*c
)
1005 struct sk_buff
*skb
;
1008 * Play the pending entries through our router
1011 while ((skb
= __skb_dequeue(&uc
->_c
.mfc_un
.unres
.unresolved
))) {
1012 if (ipv6_hdr(skb
)->version
== 0) {
1013 struct nlmsghdr
*nlh
= skb_pull(skb
,
1014 sizeof(struct ipv6hdr
));
1016 if (mr_fill_mroute(mrt
, skb
, &c
->_c
,
1017 nlmsg_data(nlh
)) > 0) {
1018 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - (u8
*)nlh
;
1020 nlh
->nlmsg_type
= NLMSG_ERROR
;
1021 nlh
->nlmsg_len
= nlmsg_msg_size(sizeof(struct nlmsgerr
));
1022 skb_trim(skb
, nlh
->nlmsg_len
);
1023 ((struct nlmsgerr
*)nlmsg_data(nlh
))->error
= -EMSGSIZE
;
1025 rtnl_unicast(skb
, net
, NETLINK_CB(skb
).portid
);
1027 ip6_mr_forward(net
, mrt
, skb
->dev
, skb
, c
);
1032 * Bounce a cache query up to pim6sd and netlink.
1034 * Called under mrt_lock.
1037 static int ip6mr_cache_report(struct mr_table
*mrt
, struct sk_buff
*pkt
,
1038 mifi_t mifi
, int assert)
1040 struct sock
*mroute6_sk
;
1041 struct sk_buff
*skb
;
1042 struct mrt6msg
*msg
;
1045 #ifdef CONFIG_IPV6_PIMSM_V2
1046 if (assert == MRT6MSG_WHOLEPKT
)
1047 skb
= skb_realloc_headroom(pkt
, -skb_network_offset(pkt
)
1051 skb
= alloc_skb(sizeof(struct ipv6hdr
) + sizeof(*msg
), GFP_ATOMIC
);
1056 /* I suppose that internal messages
1057 * do not require checksums */
1059 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1061 #ifdef CONFIG_IPV6_PIMSM_V2
1062 if (assert == MRT6MSG_WHOLEPKT
) {
1063 /* Ugly, but we have no choice with this interface.
1064 Duplicate old header, fix length etc.
1065 And all this only to mangle msg->im6_msgtype and
1066 to set msg->im6_mbz to "mbz" :-)
1068 skb_push(skb
, -skb_network_offset(pkt
));
1070 skb_push(skb
, sizeof(*msg
));
1071 skb_reset_transport_header(skb
);
1072 msg
= (struct mrt6msg
*)skb_transport_header(skb
);
1074 msg
->im6_msgtype
= MRT6MSG_WHOLEPKT
;
1075 msg
->im6_mif
= mrt
->mroute_reg_vif_num
;
1077 msg
->im6_src
= ipv6_hdr(pkt
)->saddr
;
1078 msg
->im6_dst
= ipv6_hdr(pkt
)->daddr
;
1080 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1085 * Copy the IP header
1088 skb_put(skb
, sizeof(struct ipv6hdr
));
1089 skb_reset_network_header(skb
);
1090 skb_copy_to_linear_data(skb
, ipv6_hdr(pkt
), sizeof(struct ipv6hdr
));
1095 skb_put(skb
, sizeof(*msg
));
1096 skb_reset_transport_header(skb
);
1097 msg
= (struct mrt6msg
*)skb_transport_header(skb
);
1100 msg
->im6_msgtype
= assert;
1101 msg
->im6_mif
= mifi
;
1103 msg
->im6_src
= ipv6_hdr(pkt
)->saddr
;
1104 msg
->im6_dst
= ipv6_hdr(pkt
)->daddr
;
1106 skb_dst_set(skb
, dst_clone(skb_dst(pkt
)));
1107 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1111 mroute6_sk
= rcu_dereference(mrt
->mroute_sk
);
1118 mrt6msg_netlink_event(mrt
, skb
);
1120 /* Deliver to user space multicast routing algorithms */
1121 ret
= sock_queue_rcv_skb(mroute6_sk
, skb
);
1124 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1131 /* Queue a packet for resolution. It gets locked cache entry! */
1132 static int ip6mr_cache_unresolved(struct mr_table
*mrt
, mifi_t mifi
,
1133 struct sk_buff
*skb
, struct net_device
*dev
)
1135 struct mfc6_cache
*c
;
1139 spin_lock_bh(&mfc_unres_lock
);
1140 list_for_each_entry(c
, &mrt
->mfc_unres_queue
, _c
.list
) {
1141 if (ipv6_addr_equal(&c
->mf6c_mcastgrp
, &ipv6_hdr(skb
)->daddr
) &&
1142 ipv6_addr_equal(&c
->mf6c_origin
, &ipv6_hdr(skb
)->saddr
)) {
1150 * Create a new entry if allowable
1153 c
= ip6mr_cache_alloc_unres();
1155 spin_unlock_bh(&mfc_unres_lock
);
1161 /* Fill in the new cache entry */
1162 c
->_c
.mfc_parent
= -1;
1163 c
->mf6c_origin
= ipv6_hdr(skb
)->saddr
;
1164 c
->mf6c_mcastgrp
= ipv6_hdr(skb
)->daddr
;
1167 * Reflect first query at pim6sd
1169 err
= ip6mr_cache_report(mrt
, skb
, mifi
, MRT6MSG_NOCACHE
);
1171 /* If the report failed throw the cache entry
1174 spin_unlock_bh(&mfc_unres_lock
);
1176 ip6mr_cache_free(c
);
1181 atomic_inc(&mrt
->cache_resolve_queue_len
);
1182 list_add(&c
->_c
.list
, &mrt
->mfc_unres_queue
);
1183 mr6_netlink_event(mrt
, c
, RTM_NEWROUTE
);
1185 ipmr_do_expire_process(mrt
);
1188 /* See if we can append the packet */
1189 if (c
->_c
.mfc_un
.unres
.unresolved
.qlen
> 3) {
1195 skb
->skb_iif
= dev
->ifindex
;
1197 skb_queue_tail(&c
->_c
.mfc_un
.unres
.unresolved
, skb
);
1201 spin_unlock_bh(&mfc_unres_lock
);
1206 * MFC6 cache manipulation by user space
1209 static int ip6mr_mfc_delete(struct mr_table
*mrt
, struct mf6cctl
*mfc
,
1212 struct mfc6_cache
*c
;
1214 /* The entries are added/deleted only under RTNL */
1216 c
= ip6mr_cache_find_parent(mrt
, &mfc
->mf6cc_origin
.sin6_addr
,
1217 &mfc
->mf6cc_mcastgrp
.sin6_addr
, parent
);
1221 rhltable_remove(&mrt
->mfc_hash
, &c
->_c
.mnode
, ip6mr_rht_params
);
1222 list_del_rcu(&c
->_c
.list
);
1224 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt
->net
),
1225 FIB_EVENT_ENTRY_DEL
, c
, mrt
->id
);
1226 mr6_netlink_event(mrt
, c
, RTM_DELROUTE
);
1227 mr_cache_put(&c
->_c
);
1231 static int ip6mr_device_event(struct notifier_block
*this,
1232 unsigned long event
, void *ptr
)
1234 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1235 struct net
*net
= dev_net(dev
);
1236 struct mr_table
*mrt
;
1237 struct vif_device
*v
;
1240 if (event
!= NETDEV_UNREGISTER
)
1243 ip6mr_for_each_table(mrt
, net
) {
1244 v
= &mrt
->vif_table
[0];
1245 for (ct
= 0; ct
< mrt
->maxvif
; ct
++, v
++) {
1247 mif6_delete(mrt
, ct
, 1, NULL
);
1254 static unsigned int ip6mr_seq_read(struct net
*net
)
1258 return net
->ipv6
.ipmr_seq
+ ip6mr_rules_seq_read(net
);
1261 static int ip6mr_dump(struct net
*net
, struct notifier_block
*nb
,
1262 struct netlink_ext_ack
*extack
)
1264 return mr_dump(net
, nb
, RTNL_FAMILY_IP6MR
, ip6mr_rules_dump
,
1265 ip6mr_mr_table_iter
, &mrt_lock
, extack
);
1268 static struct notifier_block ip6_mr_notifier
= {
1269 .notifier_call
= ip6mr_device_event
1272 static const struct fib_notifier_ops ip6mr_notifier_ops_template
= {
1273 .family
= RTNL_FAMILY_IP6MR
,
1274 .fib_seq_read
= ip6mr_seq_read
,
1275 .fib_dump
= ip6mr_dump
,
1276 .owner
= THIS_MODULE
,
1279 static int __net_init
ip6mr_notifier_init(struct net
*net
)
1281 struct fib_notifier_ops
*ops
;
1283 net
->ipv6
.ipmr_seq
= 0;
1285 ops
= fib_notifier_ops_register(&ip6mr_notifier_ops_template
, net
);
1287 return PTR_ERR(ops
);
1289 net
->ipv6
.ip6mr_notifier_ops
= ops
;
1294 static void __net_exit
ip6mr_notifier_exit(struct net
*net
)
1296 fib_notifier_ops_unregister(net
->ipv6
.ip6mr_notifier_ops
);
1297 net
->ipv6
.ip6mr_notifier_ops
= NULL
;
1300 /* Setup for IP multicast routing */
1301 static int __net_init
ip6mr_net_init(struct net
*net
)
1305 err
= ip6mr_notifier_init(net
);
1309 err
= ip6mr_rules_init(net
);
1311 goto ip6mr_rules_fail
;
1313 #ifdef CONFIG_PROC_FS
1315 if (!proc_create_net("ip6_mr_vif", 0, net
->proc_net
, &ip6mr_vif_seq_ops
,
1316 sizeof(struct mr_vif_iter
)))
1318 if (!proc_create_net("ip6_mr_cache", 0, net
->proc_net
, &ipmr_mfc_seq_ops
,
1319 sizeof(struct mr_mfc_iter
)))
1320 goto proc_cache_fail
;
1325 #ifdef CONFIG_PROC_FS
1327 remove_proc_entry("ip6_mr_vif", net
->proc_net
);
1329 ip6mr_rules_exit(net
);
1332 ip6mr_notifier_exit(net
);
1336 static void __net_exit
ip6mr_net_exit(struct net
*net
)
1338 #ifdef CONFIG_PROC_FS
1339 remove_proc_entry("ip6_mr_cache", net
->proc_net
);
1340 remove_proc_entry("ip6_mr_vif", net
->proc_net
);
1342 ip6mr_rules_exit(net
);
1343 ip6mr_notifier_exit(net
);
1346 static struct pernet_operations ip6mr_net_ops
= {
1347 .init
= ip6mr_net_init
,
1348 .exit
= ip6mr_net_exit
,
1351 int __init
ip6_mr_init(void)
1355 mrt_cachep
= kmem_cache_create("ip6_mrt_cache",
1356 sizeof(struct mfc6_cache
),
1357 0, SLAB_HWCACHE_ALIGN
,
1362 err
= register_pernet_subsys(&ip6mr_net_ops
);
1364 goto reg_pernet_fail
;
1366 err
= register_netdevice_notifier(&ip6_mr_notifier
);
1368 goto reg_notif_fail
;
1369 #ifdef CONFIG_IPV6_PIMSM_V2
1370 if (inet6_add_protocol(&pim6_protocol
, IPPROTO_PIM
) < 0) {
1371 pr_err("%s: can't add PIM protocol\n", __func__
);
1373 goto add_proto_fail
;
1376 err
= rtnl_register_module(THIS_MODULE
, RTNL_FAMILY_IP6MR
, RTM_GETROUTE
,
1377 NULL
, ip6mr_rtm_dumproute
, 0);
1381 #ifdef CONFIG_IPV6_PIMSM_V2
1382 inet6_del_protocol(&pim6_protocol
, IPPROTO_PIM
);
1384 unregister_netdevice_notifier(&ip6_mr_notifier
);
1387 unregister_pernet_subsys(&ip6mr_net_ops
);
1389 kmem_cache_destroy(mrt_cachep
);
1393 void ip6_mr_cleanup(void)
1395 rtnl_unregister(RTNL_FAMILY_IP6MR
, RTM_GETROUTE
);
1396 #ifdef CONFIG_IPV6_PIMSM_V2
1397 inet6_del_protocol(&pim6_protocol
, IPPROTO_PIM
);
1399 unregister_netdevice_notifier(&ip6_mr_notifier
);
1400 unregister_pernet_subsys(&ip6mr_net_ops
);
1401 kmem_cache_destroy(mrt_cachep
);
1404 static int ip6mr_mfc_add(struct net
*net
, struct mr_table
*mrt
,
1405 struct mf6cctl
*mfc
, int mrtsock
, int parent
)
1407 unsigned char ttls
[MAXMIFS
];
1408 struct mfc6_cache
*uc
, *c
;
1413 if (mfc
->mf6cc_parent
>= MAXMIFS
)
1416 memset(ttls
, 255, MAXMIFS
);
1417 for (i
= 0; i
< MAXMIFS
; i
++) {
1418 if (IF_ISSET(i
, &mfc
->mf6cc_ifset
))
1422 /* The entries are added/deleted only under RTNL */
1424 c
= ip6mr_cache_find_parent(mrt
, &mfc
->mf6cc_origin
.sin6_addr
,
1425 &mfc
->mf6cc_mcastgrp
.sin6_addr
, parent
);
1428 write_lock_bh(&mrt_lock
);
1429 c
->_c
.mfc_parent
= mfc
->mf6cc_parent
;
1430 ip6mr_update_thresholds(mrt
, &c
->_c
, ttls
);
1432 c
->_c
.mfc_flags
|= MFC_STATIC
;
1433 write_unlock_bh(&mrt_lock
);
1434 call_ip6mr_mfc_entry_notifiers(net
, FIB_EVENT_ENTRY_REPLACE
,
1436 mr6_netlink_event(mrt
, c
, RTM_NEWROUTE
);
1440 if (!ipv6_addr_any(&mfc
->mf6cc_mcastgrp
.sin6_addr
) &&
1441 !ipv6_addr_is_multicast(&mfc
->mf6cc_mcastgrp
.sin6_addr
))
1444 c
= ip6mr_cache_alloc();
1448 c
->mf6c_origin
= mfc
->mf6cc_origin
.sin6_addr
;
1449 c
->mf6c_mcastgrp
= mfc
->mf6cc_mcastgrp
.sin6_addr
;
1450 c
->_c
.mfc_parent
= mfc
->mf6cc_parent
;
1451 ip6mr_update_thresholds(mrt
, &c
->_c
, ttls
);
1453 c
->_c
.mfc_flags
|= MFC_STATIC
;
1455 err
= rhltable_insert_key(&mrt
->mfc_hash
, &c
->cmparg
, &c
->_c
.mnode
,
1458 pr_err("ip6mr: rhtable insert error %d\n", err
);
1459 ip6mr_cache_free(c
);
1462 list_add_tail_rcu(&c
->_c
.list
, &mrt
->mfc_cache_list
);
1464 /* Check to see if we resolved a queued list. If so we
1465 * need to send on the frames and tidy up.
1468 spin_lock_bh(&mfc_unres_lock
);
1469 list_for_each_entry(_uc
, &mrt
->mfc_unres_queue
, list
) {
1470 uc
= (struct mfc6_cache
*)_uc
;
1471 if (ipv6_addr_equal(&uc
->mf6c_origin
, &c
->mf6c_origin
) &&
1472 ipv6_addr_equal(&uc
->mf6c_mcastgrp
, &c
->mf6c_mcastgrp
)) {
1473 list_del(&_uc
->list
);
1474 atomic_dec(&mrt
->cache_resolve_queue_len
);
1479 if (list_empty(&mrt
->mfc_unres_queue
))
1480 del_timer(&mrt
->ipmr_expire_timer
);
1481 spin_unlock_bh(&mfc_unres_lock
);
1484 ip6mr_cache_resolve(net
, mrt
, uc
, c
);
1485 ip6mr_cache_free(uc
);
1487 call_ip6mr_mfc_entry_notifiers(net
, FIB_EVENT_ENTRY_ADD
,
1489 mr6_netlink_event(mrt
, c
, RTM_NEWROUTE
);
1494 * Close the multicast socket, and clear the vif tables etc
1497 static void mroute_clean_tables(struct mr_table
*mrt
, int flags
)
1499 struct mr_mfc
*c
, *tmp
;
1503 /* Shut down all active vif entries */
1504 if (flags
& (MRT6_FLUSH_MIFS
| MRT6_FLUSH_MIFS_STATIC
)) {
1505 for (i
= 0; i
< mrt
->maxvif
; i
++) {
1506 if (((mrt
->vif_table
[i
].flags
& VIFF_STATIC
) &&
1507 !(flags
& MRT6_FLUSH_MIFS_STATIC
)) ||
1508 (!(mrt
->vif_table
[i
].flags
& VIFF_STATIC
) && !(flags
& MRT6_FLUSH_MIFS
)))
1510 mif6_delete(mrt
, i
, 0, &list
);
1512 unregister_netdevice_many(&list
);
1515 /* Wipe the cache */
1516 if (flags
& (MRT6_FLUSH_MFC
| MRT6_FLUSH_MFC_STATIC
)) {
1517 list_for_each_entry_safe(c
, tmp
, &mrt
->mfc_cache_list
, list
) {
1518 if (((c
->mfc_flags
& MFC_STATIC
) && !(flags
& MRT6_FLUSH_MFC_STATIC
)) ||
1519 (!(c
->mfc_flags
& MFC_STATIC
) && !(flags
& MRT6_FLUSH_MFC
)))
1521 rhltable_remove(&mrt
->mfc_hash
, &c
->mnode
, ip6mr_rht_params
);
1522 list_del_rcu(&c
->list
);
1523 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt
->net
),
1524 FIB_EVENT_ENTRY_DEL
,
1525 (struct mfc6_cache
*)c
, mrt
->id
);
1526 mr6_netlink_event(mrt
, (struct mfc6_cache
*)c
, RTM_DELROUTE
);
1531 if (flags
& MRT6_FLUSH_MFC
) {
1532 if (atomic_read(&mrt
->cache_resolve_queue_len
) != 0) {
1533 spin_lock_bh(&mfc_unres_lock
);
1534 list_for_each_entry_safe(c
, tmp
, &mrt
->mfc_unres_queue
, list
) {
1536 mr6_netlink_event(mrt
, (struct mfc6_cache
*)c
,
1538 ip6mr_destroy_unres(mrt
, (struct mfc6_cache
*)c
);
1540 spin_unlock_bh(&mfc_unres_lock
);
1545 static int ip6mr_sk_init(struct mr_table
*mrt
, struct sock
*sk
)
1548 struct net
*net
= sock_net(sk
);
1551 write_lock_bh(&mrt_lock
);
1552 if (rtnl_dereference(mrt
->mroute_sk
)) {
1555 rcu_assign_pointer(mrt
->mroute_sk
, sk
);
1556 sock_set_flag(sk
, SOCK_RCU_FREE
);
1557 net
->ipv6
.devconf_all
->mc_forwarding
++;
1559 write_unlock_bh(&mrt_lock
);
1562 inet6_netconf_notify_devconf(net
, RTM_NEWNETCONF
,
1563 NETCONFA_MC_FORWARDING
,
1564 NETCONFA_IFINDEX_ALL
,
1565 net
->ipv6
.devconf_all
);
1571 int ip6mr_sk_done(struct sock
*sk
)
1574 struct net
*net
= sock_net(sk
);
1575 struct mr_table
*mrt
;
1577 if (sk
->sk_type
!= SOCK_RAW
||
1578 inet_sk(sk
)->inet_num
!= IPPROTO_ICMPV6
)
1582 ip6mr_for_each_table(mrt
, net
) {
1583 if (sk
== rtnl_dereference(mrt
->mroute_sk
)) {
1584 write_lock_bh(&mrt_lock
);
1585 RCU_INIT_POINTER(mrt
->mroute_sk
, NULL
);
1586 /* Note that mroute_sk had SOCK_RCU_FREE set,
1587 * so the RCU grace period before sk freeing
1588 * is guaranteed by sk_destruct()
1590 net
->ipv6
.devconf_all
->mc_forwarding
--;
1591 write_unlock_bh(&mrt_lock
);
1592 inet6_netconf_notify_devconf(net
, RTM_NEWNETCONF
,
1593 NETCONFA_MC_FORWARDING
,
1594 NETCONFA_IFINDEX_ALL
,
1595 net
->ipv6
.devconf_all
);
1597 mroute_clean_tables(mrt
, MRT6_FLUSH_MIFS
| MRT6_FLUSH_MFC
);
1607 bool mroute6_is_socket(struct net
*net
, struct sk_buff
*skb
)
1609 struct mr_table
*mrt
;
1610 struct flowi6 fl6
= {
1611 .flowi6_iif
= skb
->skb_iif
? : LOOPBACK_IFINDEX
,
1612 .flowi6_oif
= skb
->dev
->ifindex
,
1613 .flowi6_mark
= skb
->mark
,
1616 if (ip6mr_fib_lookup(net
, &fl6
, &mrt
) < 0)
1619 return rcu_access_pointer(mrt
->mroute_sk
);
1621 EXPORT_SYMBOL(mroute6_is_socket
);
1624 * Socket options and virtual interface manipulation. The whole
1625 * virtual interface system is a complete heap, but unfortunately
1626 * that's how BSD mrouted happens to think. Maybe one day with a proper
1627 * MOSPF/PIM router set up we can clean this up.
1630 int ip6_mroute_setsockopt(struct sock
*sk
, int optname
, char __user
*optval
, unsigned int optlen
)
1632 int ret
, parent
= 0;
1636 struct net
*net
= sock_net(sk
);
1637 struct mr_table
*mrt
;
1639 if (sk
->sk_type
!= SOCK_RAW
||
1640 inet_sk(sk
)->inet_num
!= IPPROTO_ICMPV6
)
1643 mrt
= ip6mr_get_table(net
, raw6_sk(sk
)->ip6mr_table
? : RT6_TABLE_DFLT
);
1647 if (optname
!= MRT6_INIT
) {
1648 if (sk
!= rcu_access_pointer(mrt
->mroute_sk
) &&
1649 !ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1655 if (optlen
< sizeof(int))
1658 return ip6mr_sk_init(mrt
, sk
);
1661 return ip6mr_sk_done(sk
);
1664 if (optlen
< sizeof(vif
))
1666 if (copy_from_user(&vif
, optval
, sizeof(vif
)))
1668 if (vif
.mif6c_mifi
>= MAXMIFS
)
1671 ret
= mif6_add(net
, mrt
, &vif
,
1672 sk
== rtnl_dereference(mrt
->mroute_sk
));
1677 if (optlen
< sizeof(mifi_t
))
1679 if (copy_from_user(&mifi
, optval
, sizeof(mifi_t
)))
1682 ret
= mif6_delete(mrt
, mifi
, 0, NULL
);
1687 * Manipulate the forwarding caches. These live
1688 * in a sort of kernel/user symbiosis.
1694 case MRT6_ADD_MFC_PROXY
:
1695 case MRT6_DEL_MFC_PROXY
:
1696 if (optlen
< sizeof(mfc
))
1698 if (copy_from_user(&mfc
, optval
, sizeof(mfc
)))
1701 parent
= mfc
.mf6cc_parent
;
1703 if (optname
== MRT6_DEL_MFC
|| optname
== MRT6_DEL_MFC_PROXY
)
1704 ret
= ip6mr_mfc_delete(mrt
, &mfc
, parent
);
1706 ret
= ip6mr_mfc_add(net
, mrt
, &mfc
,
1708 rtnl_dereference(mrt
->mroute_sk
),
1717 if (optlen
!= sizeof(flags
))
1719 if (get_user(flags
, (int __user
*)optval
))
1722 mroute_clean_tables(mrt
, flags
);
1728 * Control PIM assert (to activate pim will activate assert)
1734 if (optlen
!= sizeof(v
))
1736 if (get_user(v
, (int __user
*)optval
))
1738 mrt
->mroute_do_assert
= v
;
1742 #ifdef CONFIG_IPV6_PIMSM_V2
1747 if (optlen
!= sizeof(v
))
1749 if (get_user(v
, (int __user
*)optval
))
1754 if (v
!= mrt
->mroute_do_pim
) {
1755 mrt
->mroute_do_pim
= v
;
1756 mrt
->mroute_do_assert
= v
;
1763 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1768 if (optlen
!= sizeof(u32
))
1770 if (get_user(v
, (u32 __user
*)optval
))
1772 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1773 if (v
!= RT_TABLE_DEFAULT
&& v
>= 100000000)
1775 if (sk
== rcu_access_pointer(mrt
->mroute_sk
))
1780 mrt
= ip6mr_new_table(net
, v
);
1784 raw6_sk(sk
)->ip6mr_table
= v
;
1790 * Spurious command, or MRT6_VERSION which you cannot
1794 return -ENOPROTOOPT
;
1799 * Getsock opt support for the multicast routing system.
1802 int ip6_mroute_getsockopt(struct sock
*sk
, int optname
, char __user
*optval
,
1807 struct net
*net
= sock_net(sk
);
1808 struct mr_table
*mrt
;
1810 if (sk
->sk_type
!= SOCK_RAW
||
1811 inet_sk(sk
)->inet_num
!= IPPROTO_ICMPV6
)
1814 mrt
= ip6mr_get_table(net
, raw6_sk(sk
)->ip6mr_table
? : RT6_TABLE_DFLT
);
1822 #ifdef CONFIG_IPV6_PIMSM_V2
1824 val
= mrt
->mroute_do_pim
;
1828 val
= mrt
->mroute_do_assert
;
1831 return -ENOPROTOOPT
;
1834 if (get_user(olr
, optlen
))
1837 olr
= min_t(int, olr
, sizeof(int));
1841 if (put_user(olr
, optlen
))
1843 if (copy_to_user(optval
, &val
, olr
))
1849 * The IP multicast ioctl support routines.
1852 int ip6mr_ioctl(struct sock
*sk
, int cmd
, void __user
*arg
)
1854 struct sioc_sg_req6 sr
;
1855 struct sioc_mif_req6 vr
;
1856 struct vif_device
*vif
;
1857 struct mfc6_cache
*c
;
1858 struct net
*net
= sock_net(sk
);
1859 struct mr_table
*mrt
;
1861 mrt
= ip6mr_get_table(net
, raw6_sk(sk
)->ip6mr_table
? : RT6_TABLE_DFLT
);
1866 case SIOCGETMIFCNT_IN6
:
1867 if (copy_from_user(&vr
, arg
, sizeof(vr
)))
1869 if (vr
.mifi
>= mrt
->maxvif
)
1871 vr
.mifi
= array_index_nospec(vr
.mifi
, mrt
->maxvif
);
1872 read_lock(&mrt_lock
);
1873 vif
= &mrt
->vif_table
[vr
.mifi
];
1874 if (VIF_EXISTS(mrt
, vr
.mifi
)) {
1875 vr
.icount
= vif
->pkt_in
;
1876 vr
.ocount
= vif
->pkt_out
;
1877 vr
.ibytes
= vif
->bytes_in
;
1878 vr
.obytes
= vif
->bytes_out
;
1879 read_unlock(&mrt_lock
);
1881 if (copy_to_user(arg
, &vr
, sizeof(vr
)))
1885 read_unlock(&mrt_lock
);
1886 return -EADDRNOTAVAIL
;
1887 case SIOCGETSGCNT_IN6
:
1888 if (copy_from_user(&sr
, arg
, sizeof(sr
)))
1892 c
= ip6mr_cache_find(mrt
, &sr
.src
.sin6_addr
, &sr
.grp
.sin6_addr
);
1894 sr
.pktcnt
= c
->_c
.mfc_un
.res
.pkt
;
1895 sr
.bytecnt
= c
->_c
.mfc_un
.res
.bytes
;
1896 sr
.wrong_if
= c
->_c
.mfc_un
.res
.wrong_if
;
1899 if (copy_to_user(arg
, &sr
, sizeof(sr
)))
1904 return -EADDRNOTAVAIL
;
1906 return -ENOIOCTLCMD
;
1910 #ifdef CONFIG_COMPAT
1911 struct compat_sioc_sg_req6
{
1912 struct sockaddr_in6 src
;
1913 struct sockaddr_in6 grp
;
1914 compat_ulong_t pktcnt
;
1915 compat_ulong_t bytecnt
;
1916 compat_ulong_t wrong_if
;
1919 struct compat_sioc_mif_req6
{
1921 compat_ulong_t icount
;
1922 compat_ulong_t ocount
;
1923 compat_ulong_t ibytes
;
1924 compat_ulong_t obytes
;
1927 int ip6mr_compat_ioctl(struct sock
*sk
, unsigned int cmd
, void __user
*arg
)
1929 struct compat_sioc_sg_req6 sr
;
1930 struct compat_sioc_mif_req6 vr
;
1931 struct vif_device
*vif
;
1932 struct mfc6_cache
*c
;
1933 struct net
*net
= sock_net(sk
);
1934 struct mr_table
*mrt
;
1936 mrt
= ip6mr_get_table(net
, raw6_sk(sk
)->ip6mr_table
? : RT6_TABLE_DFLT
);
1941 case SIOCGETMIFCNT_IN6
:
1942 if (copy_from_user(&vr
, arg
, sizeof(vr
)))
1944 if (vr
.mifi
>= mrt
->maxvif
)
1946 vr
.mifi
= array_index_nospec(vr
.mifi
, mrt
->maxvif
);
1947 read_lock(&mrt_lock
);
1948 vif
= &mrt
->vif_table
[vr
.mifi
];
1949 if (VIF_EXISTS(mrt
, vr
.mifi
)) {
1950 vr
.icount
= vif
->pkt_in
;
1951 vr
.ocount
= vif
->pkt_out
;
1952 vr
.ibytes
= vif
->bytes_in
;
1953 vr
.obytes
= vif
->bytes_out
;
1954 read_unlock(&mrt_lock
);
1956 if (copy_to_user(arg
, &vr
, sizeof(vr
)))
1960 read_unlock(&mrt_lock
);
1961 return -EADDRNOTAVAIL
;
1962 case SIOCGETSGCNT_IN6
:
1963 if (copy_from_user(&sr
, arg
, sizeof(sr
)))
1967 c
= ip6mr_cache_find(mrt
, &sr
.src
.sin6_addr
, &sr
.grp
.sin6_addr
);
1969 sr
.pktcnt
= c
->_c
.mfc_un
.res
.pkt
;
1970 sr
.bytecnt
= c
->_c
.mfc_un
.res
.bytes
;
1971 sr
.wrong_if
= c
->_c
.mfc_un
.res
.wrong_if
;
1974 if (copy_to_user(arg
, &sr
, sizeof(sr
)))
1979 return -EADDRNOTAVAIL
;
1981 return -ENOIOCTLCMD
;
1986 static inline int ip6mr_forward2_finish(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
1988 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
1989 IPSTATS_MIB_OUTFORWDATAGRAMS
);
1990 IP6_ADD_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
1991 IPSTATS_MIB_OUTOCTETS
, skb
->len
);
1992 return dst_output(net
, sk
, skb
);
1996 * Processing handlers for ip6mr_forward
1999 static int ip6mr_forward2(struct net
*net
, struct mr_table
*mrt
,
2000 struct sk_buff
*skb
, int vifi
)
2002 struct ipv6hdr
*ipv6h
;
2003 struct vif_device
*vif
= &mrt
->vif_table
[vifi
];
2004 struct net_device
*dev
;
2005 struct dst_entry
*dst
;
2011 #ifdef CONFIG_IPV6_PIMSM_V2
2012 if (vif
->flags
& MIFF_REGISTER
) {
2014 vif
->bytes_out
+= skb
->len
;
2015 vif
->dev
->stats
.tx_bytes
+= skb
->len
;
2016 vif
->dev
->stats
.tx_packets
++;
2017 ip6mr_cache_report(mrt
, skb
, vifi
, MRT6MSG_WHOLEPKT
);
2022 ipv6h
= ipv6_hdr(skb
);
2024 fl6
= (struct flowi6
) {
2025 .flowi6_oif
= vif
->link
,
2026 .daddr
= ipv6h
->daddr
,
2029 dst
= ip6_route_output(net
, NULL
, &fl6
);
2036 skb_dst_set(skb
, dst
);
2039 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2040 * not only before forwarding, but after forwarding on all output
2041 * interfaces. It is clear, if mrouter runs a multicasting
2042 * program, it should receive packets not depending to what interface
2043 * program is joined.
2044 * If we will not make it, the program will have to join on all
2045 * interfaces. On the other hand, multihoming host (or router, but
2046 * not mrouter) cannot join to more than one interface - it will
2047 * result in receiving multiple packets.
2052 vif
->bytes_out
+= skb
->len
;
2054 /* We are about to write */
2055 /* XXX: extension headers? */
2056 if (skb_cow(skb
, sizeof(*ipv6h
) + LL_RESERVED_SPACE(dev
)))
2059 ipv6h
= ipv6_hdr(skb
);
2062 IP6CB(skb
)->flags
|= IP6SKB_FORWARDED
;
2064 return NF_HOOK(NFPROTO_IPV6
, NF_INET_FORWARD
,
2065 net
, NULL
, skb
, skb
->dev
, dev
,
2066 ip6mr_forward2_finish
);
2073 static int ip6mr_find_vif(struct mr_table
*mrt
, struct net_device
*dev
)
2077 for (ct
= mrt
->maxvif
- 1; ct
>= 0; ct
--) {
2078 if (mrt
->vif_table
[ct
].dev
== dev
)
2084 static void ip6_mr_forward(struct net
*net
, struct mr_table
*mrt
,
2085 struct net_device
*dev
, struct sk_buff
*skb
,
2086 struct mfc6_cache
*c
)
2090 int true_vifi
= ip6mr_find_vif(mrt
, dev
);
2092 vif
= c
->_c
.mfc_parent
;
2093 c
->_c
.mfc_un
.res
.pkt
++;
2094 c
->_c
.mfc_un
.res
.bytes
+= skb
->len
;
2095 c
->_c
.mfc_un
.res
.lastuse
= jiffies
;
2097 if (ipv6_addr_any(&c
->mf6c_origin
) && true_vifi
>= 0) {
2098 struct mfc6_cache
*cache_proxy
;
2100 /* For an (*,G) entry, we only check that the incoming
2101 * interface is part of the static tree.
2104 cache_proxy
= mr_mfc_find_any_parent(mrt
, vif
);
2106 cache_proxy
->_c
.mfc_un
.res
.ttls
[true_vifi
] < 255) {
2114 * Wrong interface: drop packet and (maybe) send PIM assert.
2116 if (mrt
->vif_table
[vif
].dev
!= dev
) {
2117 c
->_c
.mfc_un
.res
.wrong_if
++;
2119 if (true_vifi
>= 0 && mrt
->mroute_do_assert
&&
2120 /* pimsm uses asserts, when switching from RPT to SPT,
2121 so that we cannot check that packet arrived on an oif.
2122 It is bad, but otherwise we would need to move pretty
2123 large chunk of pimd to kernel. Ough... --ANK
2125 (mrt
->mroute_do_pim
||
2126 c
->_c
.mfc_un
.res
.ttls
[true_vifi
] < 255) &&
2128 c
->_c
.mfc_un
.res
.last_assert
+
2129 MFC_ASSERT_THRESH
)) {
2130 c
->_c
.mfc_un
.res
.last_assert
= jiffies
;
2131 ip6mr_cache_report(mrt
, skb
, true_vifi
, MRT6MSG_WRONGMIF
);
2137 mrt
->vif_table
[vif
].pkt_in
++;
2138 mrt
->vif_table
[vif
].bytes_in
+= skb
->len
;
2143 if (ipv6_addr_any(&c
->mf6c_origin
) &&
2144 ipv6_addr_any(&c
->mf6c_mcastgrp
)) {
2145 if (true_vifi
>= 0 &&
2146 true_vifi
!= c
->_c
.mfc_parent
&&
2147 ipv6_hdr(skb
)->hop_limit
>
2148 c
->_c
.mfc_un
.res
.ttls
[c
->_c
.mfc_parent
]) {
2149 /* It's an (*,*) entry and the packet is not coming from
2150 * the upstream: forward the packet to the upstream
2153 psend
= c
->_c
.mfc_parent
;
2158 for (ct
= c
->_c
.mfc_un
.res
.maxvif
- 1;
2159 ct
>= c
->_c
.mfc_un
.res
.minvif
; ct
--) {
2160 /* For (*,G) entry, don't forward to the incoming interface */
2161 if ((!ipv6_addr_any(&c
->mf6c_origin
) || ct
!= true_vifi
) &&
2162 ipv6_hdr(skb
)->hop_limit
> c
->_c
.mfc_un
.res
.ttls
[ct
]) {
2164 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
2166 ip6mr_forward2(net
, mrt
, skb2
, psend
);
2173 ip6mr_forward2(net
, mrt
, skb
, psend
);
2183 * Multicast packets for forwarding arrive here
2186 int ip6_mr_input(struct sk_buff
*skb
)
2188 struct mfc6_cache
*cache
;
2189 struct net
*net
= dev_net(skb
->dev
);
2190 struct mr_table
*mrt
;
2191 struct flowi6 fl6
= {
2192 .flowi6_iif
= skb
->dev
->ifindex
,
2193 .flowi6_mark
= skb
->mark
,
2196 struct net_device
*dev
;
2198 /* skb->dev passed in is the master dev for vrfs.
2199 * Get the proper interface that does have a vif associated with it.
2202 if (netif_is_l3_master(skb
->dev
)) {
2203 dev
= dev_get_by_index_rcu(net
, IPCB(skb
)->iif
);
2210 err
= ip6mr_fib_lookup(net
, &fl6
, &mrt
);
2216 read_lock(&mrt_lock
);
2217 cache
= ip6mr_cache_find(mrt
,
2218 &ipv6_hdr(skb
)->saddr
, &ipv6_hdr(skb
)->daddr
);
2220 int vif
= ip6mr_find_vif(mrt
, dev
);
2223 cache
= ip6mr_cache_find_any(mrt
,
2224 &ipv6_hdr(skb
)->daddr
,
2229 * No usable cache entry
2234 vif
= ip6mr_find_vif(mrt
, dev
);
2236 int err
= ip6mr_cache_unresolved(mrt
, vif
, skb
, dev
);
2237 read_unlock(&mrt_lock
);
2241 read_unlock(&mrt_lock
);
2246 ip6_mr_forward(net
, mrt
, dev
, skb
, cache
);
2248 read_unlock(&mrt_lock
);
2253 int ip6mr_get_route(struct net
*net
, struct sk_buff
*skb
, struct rtmsg
*rtm
,
2257 struct mr_table
*mrt
;
2258 struct mfc6_cache
*cache
;
2259 struct rt6_info
*rt
= (struct rt6_info
*)skb_dst(skb
);
2261 mrt
= ip6mr_get_table(net
, RT6_TABLE_DFLT
);
2265 read_lock(&mrt_lock
);
2266 cache
= ip6mr_cache_find(mrt
, &rt
->rt6i_src
.addr
, &rt
->rt6i_dst
.addr
);
2267 if (!cache
&& skb
->dev
) {
2268 int vif
= ip6mr_find_vif(mrt
, skb
->dev
);
2271 cache
= ip6mr_cache_find_any(mrt
, &rt
->rt6i_dst
.addr
,
2276 struct sk_buff
*skb2
;
2277 struct ipv6hdr
*iph
;
2278 struct net_device
*dev
;
2282 if (!dev
|| (vif
= ip6mr_find_vif(mrt
, dev
)) < 0) {
2283 read_unlock(&mrt_lock
);
2287 /* really correct? */
2288 skb2
= alloc_skb(sizeof(struct ipv6hdr
), GFP_ATOMIC
);
2290 read_unlock(&mrt_lock
);
2294 NETLINK_CB(skb2
).portid
= portid
;
2295 skb_reset_transport_header(skb2
);
2297 skb_put(skb2
, sizeof(struct ipv6hdr
));
2298 skb_reset_network_header(skb2
);
2300 iph
= ipv6_hdr(skb2
);
2303 iph
->flow_lbl
[0] = 0;
2304 iph
->flow_lbl
[1] = 0;
2305 iph
->flow_lbl
[2] = 0;
2306 iph
->payload_len
= 0;
2307 iph
->nexthdr
= IPPROTO_NONE
;
2309 iph
->saddr
= rt
->rt6i_src
.addr
;
2310 iph
->daddr
= rt
->rt6i_dst
.addr
;
2312 err
= ip6mr_cache_unresolved(mrt
, vif
, skb2
, dev
);
2313 read_unlock(&mrt_lock
);
2318 err
= mr_fill_mroute(mrt
, skb
, &cache
->_c
, rtm
);
2319 read_unlock(&mrt_lock
);
2323 static int ip6mr_fill_mroute(struct mr_table
*mrt
, struct sk_buff
*skb
,
2324 u32 portid
, u32 seq
, struct mfc6_cache
*c
, int cmd
,
2327 struct nlmsghdr
*nlh
;
2331 nlh
= nlmsg_put(skb
, portid
, seq
, cmd
, sizeof(*rtm
), flags
);
2335 rtm
= nlmsg_data(nlh
);
2336 rtm
->rtm_family
= RTNL_FAMILY_IP6MR
;
2337 rtm
->rtm_dst_len
= 128;
2338 rtm
->rtm_src_len
= 128;
2340 rtm
->rtm_table
= mrt
->id
;
2341 if (nla_put_u32(skb
, RTA_TABLE
, mrt
->id
))
2342 goto nla_put_failure
;
2343 rtm
->rtm_type
= RTN_MULTICAST
;
2344 rtm
->rtm_scope
= RT_SCOPE_UNIVERSE
;
2345 if (c
->_c
.mfc_flags
& MFC_STATIC
)
2346 rtm
->rtm_protocol
= RTPROT_STATIC
;
2348 rtm
->rtm_protocol
= RTPROT_MROUTED
;
2351 if (nla_put_in6_addr(skb
, RTA_SRC
, &c
->mf6c_origin
) ||
2352 nla_put_in6_addr(skb
, RTA_DST
, &c
->mf6c_mcastgrp
))
2353 goto nla_put_failure
;
2354 err
= mr_fill_mroute(mrt
, skb
, &c
->_c
, rtm
);
2355 /* do not break the dump if cache is unresolved */
2356 if (err
< 0 && err
!= -ENOENT
)
2357 goto nla_put_failure
;
2359 nlmsg_end(skb
, nlh
);
2363 nlmsg_cancel(skb
, nlh
);
2367 static int _ip6mr_fill_mroute(struct mr_table
*mrt
, struct sk_buff
*skb
,
2368 u32 portid
, u32 seq
, struct mr_mfc
*c
,
2371 return ip6mr_fill_mroute(mrt
, skb
, portid
, seq
, (struct mfc6_cache
*)c
,
2375 static int mr6_msgsize(bool unresolved
, int maxvif
)
2378 NLMSG_ALIGN(sizeof(struct rtmsg
))
2379 + nla_total_size(4) /* RTA_TABLE */
2380 + nla_total_size(sizeof(struct in6_addr
)) /* RTA_SRC */
2381 + nla_total_size(sizeof(struct in6_addr
)) /* RTA_DST */
2386 + nla_total_size(4) /* RTA_IIF */
2387 + nla_total_size(0) /* RTA_MULTIPATH */
2388 + maxvif
* NLA_ALIGN(sizeof(struct rtnexthop
))
2390 + nla_total_size_64bit(sizeof(struct rta_mfc_stats
))
2396 static void mr6_netlink_event(struct mr_table
*mrt
, struct mfc6_cache
*mfc
,
2399 struct net
*net
= read_pnet(&mrt
->net
);
2400 struct sk_buff
*skb
;
2403 skb
= nlmsg_new(mr6_msgsize(mfc
->_c
.mfc_parent
>= MAXMIFS
, mrt
->maxvif
),
2408 err
= ip6mr_fill_mroute(mrt
, skb
, 0, 0, mfc
, cmd
, 0);
2412 rtnl_notify(skb
, net
, 0, RTNLGRP_IPV6_MROUTE
, NULL
, GFP_ATOMIC
);
2418 rtnl_set_sk_err(net
, RTNLGRP_IPV6_MROUTE
, err
);
2421 static size_t mrt6msg_netlink_msgsize(size_t payloadlen
)
2424 NLMSG_ALIGN(sizeof(struct rtgenmsg
))
2425 + nla_total_size(1) /* IP6MRA_CREPORT_MSGTYPE */
2426 + nla_total_size(4) /* IP6MRA_CREPORT_MIF_ID */
2427 /* IP6MRA_CREPORT_SRC_ADDR */
2428 + nla_total_size(sizeof(struct in6_addr
))
2429 /* IP6MRA_CREPORT_DST_ADDR */
2430 + nla_total_size(sizeof(struct in6_addr
))
2431 /* IP6MRA_CREPORT_PKT */
2432 + nla_total_size(payloadlen
)
2438 static void mrt6msg_netlink_event(struct mr_table
*mrt
, struct sk_buff
*pkt
)
2440 struct net
*net
= read_pnet(&mrt
->net
);
2441 struct nlmsghdr
*nlh
;
2442 struct rtgenmsg
*rtgenm
;
2443 struct mrt6msg
*msg
;
2444 struct sk_buff
*skb
;
2448 payloadlen
= pkt
->len
- sizeof(struct mrt6msg
);
2449 msg
= (struct mrt6msg
*)skb_transport_header(pkt
);
2451 skb
= nlmsg_new(mrt6msg_netlink_msgsize(payloadlen
), GFP_ATOMIC
);
2455 nlh
= nlmsg_put(skb
, 0, 0, RTM_NEWCACHEREPORT
,
2456 sizeof(struct rtgenmsg
), 0);
2459 rtgenm
= nlmsg_data(nlh
);
2460 rtgenm
->rtgen_family
= RTNL_FAMILY_IP6MR
;
2461 if (nla_put_u8(skb
, IP6MRA_CREPORT_MSGTYPE
, msg
->im6_msgtype
) ||
2462 nla_put_u32(skb
, IP6MRA_CREPORT_MIF_ID
, msg
->im6_mif
) ||
2463 nla_put_in6_addr(skb
, IP6MRA_CREPORT_SRC_ADDR
,
2465 nla_put_in6_addr(skb
, IP6MRA_CREPORT_DST_ADDR
,
2467 goto nla_put_failure
;
2469 nla
= nla_reserve(skb
, IP6MRA_CREPORT_PKT
, payloadlen
);
2470 if (!nla
|| skb_copy_bits(pkt
, sizeof(struct mrt6msg
),
2471 nla_data(nla
), payloadlen
))
2472 goto nla_put_failure
;
2474 nlmsg_end(skb
, nlh
);
2476 rtnl_notify(skb
, net
, 0, RTNLGRP_IPV6_MROUTE_R
, NULL
, GFP_ATOMIC
);
2480 nlmsg_cancel(skb
, nlh
);
2483 rtnl_set_sk_err(net
, RTNLGRP_IPV6_MROUTE_R
, -ENOBUFS
);
2486 static int ip6mr_rtm_dumproute(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2488 const struct nlmsghdr
*nlh
= cb
->nlh
;
2489 struct fib_dump_filter filter
= {};
2492 if (cb
->strict_check
) {
2493 err
= ip_valid_fib_dump_req(sock_net(skb
->sk
), nlh
,
2499 if (filter
.table_id
) {
2500 struct mr_table
*mrt
;
2502 mrt
= ip6mr_get_table(sock_net(skb
->sk
), filter
.table_id
);
2504 if (filter
.dump_all_families
)
2507 NL_SET_ERR_MSG_MOD(cb
->extack
, "MR table does not exist");
2510 err
= mr_table_dump(mrt
, skb
, cb
, _ip6mr_fill_mroute
,
2511 &mfc_unres_lock
, &filter
);
2512 return skb
->len
? : err
;
2515 return mr_rtm_dumproute(skb
, cb
, ip6mr_mr_table_iter
,
2516 _ip6mr_fill_mroute
, &mfc_unres_lock
, &filter
);