1 /* Linux multicast routing support
2 * Common logic shared by IPv4 [ipmr] and IPv6 [ip6mr] implementation
5 #include <linux/rhashtable.h>
6 #include <linux/mroute_base.h>
8 /* Sets everything common except 'dev', since that is done under locking */
9 void vif_device_init(struct vif_device
*v
,
10 struct net_device
*dev
,
11 unsigned long rate_limit
,
12 unsigned char threshold
,
14 unsigned short get_iflink_mask
)
21 v
->rate_limit
= rate_limit
;
23 v
->threshold
= threshold
;
24 if (v
->flags
& get_iflink_mask
)
25 v
->link
= dev_get_iflink(dev
);
27 v
->link
= dev
->ifindex
;
29 EXPORT_SYMBOL(vif_device_init
);
32 mr_table_alloc(struct net
*net
, u32 id
,
33 struct mr_table_ops
*ops
,
34 void (*expire_func
)(struct timer_list
*t
),
35 void (*table_set
)(struct mr_table
*mrt
,
41 mrt
= kzalloc(sizeof(*mrt
), GFP_KERNEL
);
43 return ERR_PTR(-ENOMEM
);
45 write_pnet(&mrt
->net
, net
);
48 err
= rhltable_init(&mrt
->mfc_hash
, mrt
->ops
.rht_params
);
53 INIT_LIST_HEAD(&mrt
->mfc_cache_list
);
54 INIT_LIST_HEAD(&mrt
->mfc_unres_queue
);
56 timer_setup(&mrt
->ipmr_expire_timer
, expire_func
, 0);
58 mrt
->mroute_reg_vif_num
= -1;
62 EXPORT_SYMBOL(mr_table_alloc
);
64 void *mr_mfc_find_parent(struct mr_table
*mrt
, void *hasharg
, int parent
)
66 struct rhlist_head
*tmp
, *list
;
69 list
= rhltable_lookup(&mrt
->mfc_hash
, hasharg
, *mrt
->ops
.rht_params
);
70 rhl_for_each_entry_rcu(c
, tmp
, list
, mnode
)
71 if (parent
== -1 || parent
== c
->mfc_parent
)
76 EXPORT_SYMBOL(mr_mfc_find_parent
);
78 void *mr_mfc_find_any_parent(struct mr_table
*mrt
, int vifi
)
80 struct rhlist_head
*tmp
, *list
;
83 list
= rhltable_lookup(&mrt
->mfc_hash
, mrt
->ops
.cmparg_any
,
84 *mrt
->ops
.rht_params
);
85 rhl_for_each_entry_rcu(c
, tmp
, list
, mnode
)
86 if (c
->mfc_un
.res
.ttls
[vifi
] < 255)
91 EXPORT_SYMBOL(mr_mfc_find_any_parent
);
93 void *mr_mfc_find_any(struct mr_table
*mrt
, int vifi
, void *hasharg
)
95 struct rhlist_head
*tmp
, *list
;
96 struct mr_mfc
*c
, *proxy
;
98 list
= rhltable_lookup(&mrt
->mfc_hash
, hasharg
, *mrt
->ops
.rht_params
);
99 rhl_for_each_entry_rcu(c
, tmp
, list
, mnode
) {
100 if (c
->mfc_un
.res
.ttls
[vifi
] < 255)
103 /* It's ok if the vifi is part of the static tree */
104 proxy
= mr_mfc_find_any_parent(mrt
, c
->mfc_parent
);
105 if (proxy
&& proxy
->mfc_un
.res
.ttls
[vifi
] < 255)
109 return mr_mfc_find_any_parent(mrt
, vifi
);
111 EXPORT_SYMBOL(mr_mfc_find_any
);
113 #ifdef CONFIG_PROC_FS
114 void *mr_vif_seq_idx(struct net
*net
, struct mr_vif_iter
*iter
, loff_t pos
)
116 struct mr_table
*mrt
= iter
->mrt
;
118 for (iter
->ct
= 0; iter
->ct
< mrt
->maxvif
; ++iter
->ct
) {
119 if (!VIF_EXISTS(mrt
, iter
->ct
))
122 return &mrt
->vif_table
[iter
->ct
];
126 EXPORT_SYMBOL(mr_vif_seq_idx
);
128 void *mr_vif_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
130 struct mr_vif_iter
*iter
= seq
->private;
131 struct net
*net
= seq_file_net(seq
);
132 struct mr_table
*mrt
= iter
->mrt
;
135 if (v
== SEQ_START_TOKEN
)
136 return mr_vif_seq_idx(net
, iter
, 0);
138 while (++iter
->ct
< mrt
->maxvif
) {
139 if (!VIF_EXISTS(mrt
, iter
->ct
))
141 return &mrt
->vif_table
[iter
->ct
];
145 EXPORT_SYMBOL(mr_vif_seq_next
);
147 void *mr_mfc_seq_idx(struct net
*net
,
148 struct mr_mfc_iter
*it
, loff_t pos
)
150 struct mr_table
*mrt
= it
->mrt
;
154 it
->cache
= &mrt
->mfc_cache_list
;
155 list_for_each_entry_rcu(mfc
, &mrt
->mfc_cache_list
, list
)
160 spin_lock_bh(it
->lock
);
161 it
->cache
= &mrt
->mfc_unres_queue
;
162 list_for_each_entry(mfc
, it
->cache
, list
)
165 spin_unlock_bh(it
->lock
);
170 EXPORT_SYMBOL(mr_mfc_seq_idx
);
172 void *mr_mfc_seq_next(struct seq_file
*seq
, void *v
,
175 struct mr_mfc_iter
*it
= seq
->private;
176 struct net
*net
= seq_file_net(seq
);
177 struct mr_table
*mrt
= it
->mrt
;
178 struct mr_mfc
*c
= v
;
182 if (v
== SEQ_START_TOKEN
)
183 return mr_mfc_seq_idx(net
, seq
->private, 0);
185 if (c
->list
.next
!= it
->cache
)
186 return list_entry(c
->list
.next
, struct mr_mfc
, list
);
188 if (it
->cache
== &mrt
->mfc_unres_queue
)
191 /* exhausted cache_array, show unresolved */
193 it
->cache
= &mrt
->mfc_unres_queue
;
195 spin_lock_bh(it
->lock
);
196 if (!list_empty(it
->cache
))
197 return list_first_entry(it
->cache
, struct mr_mfc
, list
);
200 spin_unlock_bh(it
->lock
);
205 EXPORT_SYMBOL(mr_mfc_seq_next
);
208 int mr_fill_mroute(struct mr_table
*mrt
, struct sk_buff
*skb
,
209 struct mr_mfc
*c
, struct rtmsg
*rtm
)
211 struct rta_mfc_stats mfcs
;
212 struct nlattr
*mp_attr
;
213 struct rtnexthop
*nhp
;
214 unsigned long lastuse
;
217 /* If cache is unresolved, don't try to parse IIF and OIF */
218 if (c
->mfc_parent
>= MAXVIFS
) {
219 rtm
->rtm_flags
|= RTNH_F_UNRESOLVED
;
223 if (VIF_EXISTS(mrt
, c
->mfc_parent
) &&
224 nla_put_u32(skb
, RTA_IIF
,
225 mrt
->vif_table
[c
->mfc_parent
].dev
->ifindex
) < 0)
228 if (c
->mfc_flags
& MFC_OFFLOAD
)
229 rtm
->rtm_flags
|= RTNH_F_OFFLOAD
;
231 mp_attr
= nla_nest_start_noflag(skb
, RTA_MULTIPATH
);
235 for (ct
= c
->mfc_un
.res
.minvif
; ct
< c
->mfc_un
.res
.maxvif
; ct
++) {
236 if (VIF_EXISTS(mrt
, ct
) && c
->mfc_un
.res
.ttls
[ct
] < 255) {
237 struct vif_device
*vif
;
239 nhp
= nla_reserve_nohdr(skb
, sizeof(*nhp
));
241 nla_nest_cancel(skb
, mp_attr
);
246 nhp
->rtnh_hops
= c
->mfc_un
.res
.ttls
[ct
];
247 vif
= &mrt
->vif_table
[ct
];
248 nhp
->rtnh_ifindex
= vif
->dev
->ifindex
;
249 nhp
->rtnh_len
= sizeof(*nhp
);
253 nla_nest_end(skb
, mp_attr
);
255 lastuse
= READ_ONCE(c
->mfc_un
.res
.lastuse
);
256 lastuse
= time_after_eq(jiffies
, lastuse
) ? jiffies
- lastuse
: 0;
258 mfcs
.mfcs_packets
= c
->mfc_un
.res
.pkt
;
259 mfcs
.mfcs_bytes
= c
->mfc_un
.res
.bytes
;
260 mfcs
.mfcs_wrong_if
= c
->mfc_un
.res
.wrong_if
;
261 if (nla_put_64bit(skb
, RTA_MFC_STATS
, sizeof(mfcs
), &mfcs
, RTA_PAD
) ||
262 nla_put_u64_64bit(skb
, RTA_EXPIRES
, jiffies_to_clock_t(lastuse
),
266 rtm
->rtm_type
= RTN_MULTICAST
;
269 EXPORT_SYMBOL(mr_fill_mroute
);
271 static bool mr_mfc_uses_dev(const struct mr_table
*mrt
,
272 const struct mr_mfc
*c
,
273 const struct net_device
*dev
)
277 for (ct
= c
->mfc_un
.res
.minvif
; ct
< c
->mfc_un
.res
.maxvif
; ct
++) {
278 if (VIF_EXISTS(mrt
, ct
) && c
->mfc_un
.res
.ttls
[ct
] < 255) {
279 const struct vif_device
*vif
;
281 vif
= &mrt
->vif_table
[ct
];
289 int mr_table_dump(struct mr_table
*mrt
, struct sk_buff
*skb
,
290 struct netlink_callback
*cb
,
291 int (*fill
)(struct mr_table
*mrt
, struct sk_buff
*skb
,
292 u32 portid
, u32 seq
, struct mr_mfc
*c
,
294 spinlock_t
*lock
, struct fib_dump_filter
*filter
)
296 unsigned int e
= 0, s_e
= cb
->args
[1];
297 unsigned int flags
= NLM_F_MULTI
;
301 if (filter
->filter_set
)
302 flags
|= NLM_F_DUMP_FILTERED
;
304 list_for_each_entry_rcu(mfc
, &mrt
->mfc_cache_list
, list
) {
308 !mr_mfc_uses_dev(mrt
, mfc
, filter
->dev
))
311 err
= fill(mrt
, skb
, NETLINK_CB(cb
->skb
).portid
,
312 cb
->nlh
->nlmsg_seq
, mfc
, RTM_NEWROUTE
, flags
);
320 list_for_each_entry(mfc
, &mrt
->mfc_unres_queue
, list
) {
324 !mr_mfc_uses_dev(mrt
, mfc
, filter
->dev
))
327 err
= fill(mrt
, skb
, NETLINK_CB(cb
->skb
).portid
,
328 cb
->nlh
->nlmsg_seq
, mfc
, RTM_NEWROUTE
, flags
);
330 spin_unlock_bh(lock
);
336 spin_unlock_bh(lock
);
342 EXPORT_SYMBOL(mr_table_dump
);
344 int mr_rtm_dumproute(struct sk_buff
*skb
, struct netlink_callback
*cb
,
345 struct mr_table
*(*iter
)(struct net
*net
,
346 struct mr_table
*mrt
),
347 int (*fill
)(struct mr_table
*mrt
,
349 u32 portid
, u32 seq
, struct mr_mfc
*c
,
351 spinlock_t
*lock
, struct fib_dump_filter
*filter
)
353 unsigned int t
= 0, s_t
= cb
->args
[0];
354 struct net
*net
= sock_net(skb
->sk
);
355 struct mr_table
*mrt
;
358 /* multicast does not track protocol or have route type other
361 if (filter
->filter_set
) {
362 if (filter
->protocol
|| filter
->flags
||
363 (filter
->rt_type
&& filter
->rt_type
!= RTN_MULTICAST
))
368 for (mrt
= iter(net
, NULL
); mrt
; mrt
= iter(net
, mrt
)) {
372 err
= mr_table_dump(mrt
, skb
, cb
, fill
, lock
, filter
);
385 EXPORT_SYMBOL(mr_rtm_dumproute
);
387 int mr_dump(struct net
*net
, struct notifier_block
*nb
, unsigned short family
,
388 int (*rules_dump
)(struct net
*net
,
389 struct notifier_block
*nb
),
390 struct mr_table
*(*mr_iter
)(struct net
*net
,
391 struct mr_table
*mrt
),
394 struct mr_table
*mrt
;
397 err
= rules_dump(net
, nb
);
401 for (mrt
= mr_iter(net
, NULL
); mrt
; mrt
= mr_iter(net
, mrt
)) {
402 struct vif_device
*v
= &mrt
->vif_table
[0];
406 /* Notifiy on table VIF entries */
408 for (vifi
= 0; vifi
< mrt
->maxvif
; vifi
++, v
++) {
412 mr_call_vif_notifier(nb
, net
, family
,
416 read_unlock(mrt_lock
);
418 /* Notify on table MFC entries */
419 list_for_each_entry_rcu(mfc
, &mrt
->mfc_cache_list
, list
)
420 mr_call_mfc_notifier(nb
, net
, family
,
427 EXPORT_SYMBOL(mr_dump
);