1 /* Linux multicast routing support
2 * Common logic shared by IPv4 [ipmr] and IPv6 [ip6mr] implementation
5 #include <linux/rhashtable.h>
6 #include <linux/mroute_base.h>
8 /* Sets everything common except 'dev', since that is done under locking */
9 void vif_device_init(struct vif_device
*v
,
10 struct net_device
*dev
,
11 unsigned long rate_limit
,
12 unsigned char threshold
,
14 unsigned short get_iflink_mask
)
21 v
->rate_limit
= rate_limit
;
23 v
->threshold
= threshold
;
24 if (v
->flags
& get_iflink_mask
)
25 v
->link
= dev_get_iflink(dev
);
27 v
->link
= dev
->ifindex
;
29 EXPORT_SYMBOL(vif_device_init
);
32 mr_table_alloc(struct net
*net
, u32 id
,
33 struct mr_table_ops
*ops
,
34 void (*expire_func
)(struct timer_list
*t
),
35 void (*table_set
)(struct mr_table
*mrt
,
41 mrt
= kzalloc(sizeof(*mrt
), GFP_KERNEL
);
43 return ERR_PTR(-ENOMEM
);
45 write_pnet(&mrt
->net
, net
);
48 err
= rhltable_init(&mrt
->mfc_hash
, mrt
->ops
.rht_params
);
53 INIT_LIST_HEAD(&mrt
->mfc_cache_list
);
54 INIT_LIST_HEAD(&mrt
->mfc_unres_queue
);
56 timer_setup(&mrt
->ipmr_expire_timer
, expire_func
, 0);
58 mrt
->mroute_reg_vif_num
= -1;
62 EXPORT_SYMBOL(mr_table_alloc
);
64 void *mr_mfc_find_parent(struct mr_table
*mrt
, void *hasharg
, int parent
)
66 struct rhlist_head
*tmp
, *list
;
69 list
= rhltable_lookup(&mrt
->mfc_hash
, hasharg
, *mrt
->ops
.rht_params
);
70 rhl_for_each_entry_rcu(c
, tmp
, list
, mnode
)
71 if (parent
== -1 || parent
== c
->mfc_parent
)
76 EXPORT_SYMBOL(mr_mfc_find_parent
);
78 void *mr_mfc_find_any_parent(struct mr_table
*mrt
, int vifi
)
80 struct rhlist_head
*tmp
, *list
;
83 list
= rhltable_lookup(&mrt
->mfc_hash
, mrt
->ops
.cmparg_any
,
84 *mrt
->ops
.rht_params
);
85 rhl_for_each_entry_rcu(c
, tmp
, list
, mnode
)
86 if (c
->mfc_un
.res
.ttls
[vifi
] < 255)
91 EXPORT_SYMBOL(mr_mfc_find_any_parent
);
93 void *mr_mfc_find_any(struct mr_table
*mrt
, int vifi
, void *hasharg
)
95 struct rhlist_head
*tmp
, *list
;
96 struct mr_mfc
*c
, *proxy
;
98 list
= rhltable_lookup(&mrt
->mfc_hash
, hasharg
, *mrt
->ops
.rht_params
);
99 rhl_for_each_entry_rcu(c
, tmp
, list
, mnode
) {
100 if (c
->mfc_un
.res
.ttls
[vifi
] < 255)
103 /* It's ok if the vifi is part of the static tree */
104 proxy
= mr_mfc_find_any_parent(mrt
, c
->mfc_parent
);
105 if (proxy
&& proxy
->mfc_un
.res
.ttls
[vifi
] < 255)
109 return mr_mfc_find_any_parent(mrt
, vifi
);
111 EXPORT_SYMBOL(mr_mfc_find_any
);
113 #ifdef CONFIG_PROC_FS
114 void *mr_vif_seq_idx(struct net
*net
, struct mr_vif_iter
*iter
, loff_t pos
)
116 struct mr_table
*mrt
= iter
->mrt
;
118 for (iter
->ct
= 0; iter
->ct
< mrt
->maxvif
; ++iter
->ct
) {
119 if (!VIF_EXISTS(mrt
, iter
->ct
))
122 return &mrt
->vif_table
[iter
->ct
];
126 EXPORT_SYMBOL(mr_vif_seq_idx
);
128 void *mr_vif_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
130 struct mr_vif_iter
*iter
= seq
->private;
131 struct net
*net
= seq_file_net(seq
);
132 struct mr_table
*mrt
= iter
->mrt
;
135 if (v
== SEQ_START_TOKEN
)
136 return mr_vif_seq_idx(net
, iter
, 0);
138 while (++iter
->ct
< mrt
->maxvif
) {
139 if (!VIF_EXISTS(mrt
, iter
->ct
))
141 return &mrt
->vif_table
[iter
->ct
];
145 EXPORT_SYMBOL(mr_vif_seq_next
);
147 void *mr_mfc_seq_idx(struct net
*net
,
148 struct mr_mfc_iter
*it
, loff_t pos
)
150 struct mr_table
*mrt
= it
->mrt
;
154 it
->cache
= &mrt
->mfc_cache_list
;
155 list_for_each_entry_rcu(mfc
, &mrt
->mfc_cache_list
, list
)
160 spin_lock_bh(it
->lock
);
161 it
->cache
= &mrt
->mfc_unres_queue
;
162 list_for_each_entry(mfc
, it
->cache
, list
)
165 spin_unlock_bh(it
->lock
);
170 EXPORT_SYMBOL(mr_mfc_seq_idx
);
172 void *mr_mfc_seq_next(struct seq_file
*seq
, void *v
,
175 struct mr_mfc_iter
*it
= seq
->private;
176 struct net
*net
= seq_file_net(seq
);
177 struct mr_table
*mrt
= it
->mrt
;
178 struct mr_mfc
*c
= v
;
182 if (v
== SEQ_START_TOKEN
)
183 return mr_mfc_seq_idx(net
, seq
->private, 0);
185 if (c
->list
.next
!= it
->cache
)
186 return list_entry(c
->list
.next
, struct mr_mfc
, list
);
188 if (it
->cache
== &mrt
->mfc_unres_queue
)
191 /* exhausted cache_array, show unresolved */
193 it
->cache
= &mrt
->mfc_unres_queue
;
195 spin_lock_bh(it
->lock
);
196 if (!list_empty(it
->cache
))
197 return list_first_entry(it
->cache
, struct mr_mfc
, list
);
200 spin_unlock_bh(it
->lock
);
205 EXPORT_SYMBOL(mr_mfc_seq_next
);
208 int mr_fill_mroute(struct mr_table
*mrt
, struct sk_buff
*skb
,
209 struct mr_mfc
*c
, struct rtmsg
*rtm
)
211 struct rta_mfc_stats mfcs
;
212 struct nlattr
*mp_attr
;
213 struct rtnexthop
*nhp
;
214 unsigned long lastuse
;
217 /* If cache is unresolved, don't try to parse IIF and OIF */
218 if (c
->mfc_parent
>= MAXVIFS
) {
219 rtm
->rtm_flags
|= RTNH_F_UNRESOLVED
;
223 if (VIF_EXISTS(mrt
, c
->mfc_parent
) &&
224 nla_put_u32(skb
, RTA_IIF
,
225 mrt
->vif_table
[c
->mfc_parent
].dev
->ifindex
) < 0)
228 if (c
->mfc_flags
& MFC_OFFLOAD
)
229 rtm
->rtm_flags
|= RTNH_F_OFFLOAD
;
231 mp_attr
= nla_nest_start(skb
, RTA_MULTIPATH
);
235 for (ct
= c
->mfc_un
.res
.minvif
; ct
< c
->mfc_un
.res
.maxvif
; ct
++) {
236 if (VIF_EXISTS(mrt
, ct
) && c
->mfc_un
.res
.ttls
[ct
] < 255) {
237 struct vif_device
*vif
;
239 nhp
= nla_reserve_nohdr(skb
, sizeof(*nhp
));
241 nla_nest_cancel(skb
, mp_attr
);
246 nhp
->rtnh_hops
= c
->mfc_un
.res
.ttls
[ct
];
247 vif
= &mrt
->vif_table
[ct
];
248 nhp
->rtnh_ifindex
= vif
->dev
->ifindex
;
249 nhp
->rtnh_len
= sizeof(*nhp
);
253 nla_nest_end(skb
, mp_attr
);
255 lastuse
= READ_ONCE(c
->mfc_un
.res
.lastuse
);
256 lastuse
= time_after_eq(jiffies
, lastuse
) ? jiffies
- lastuse
: 0;
258 mfcs
.mfcs_packets
= c
->mfc_un
.res
.pkt
;
259 mfcs
.mfcs_bytes
= c
->mfc_un
.res
.bytes
;
260 mfcs
.mfcs_wrong_if
= c
->mfc_un
.res
.wrong_if
;
261 if (nla_put_64bit(skb
, RTA_MFC_STATS
, sizeof(mfcs
), &mfcs
, RTA_PAD
) ||
262 nla_put_u64_64bit(skb
, RTA_EXPIRES
, jiffies_to_clock_t(lastuse
),
266 rtm
->rtm_type
= RTN_MULTICAST
;
269 EXPORT_SYMBOL(mr_fill_mroute
);
271 int mr_rtm_dumproute(struct sk_buff
*skb
, struct netlink_callback
*cb
,
272 struct mr_table
*(*iter
)(struct net
*net
,
273 struct mr_table
*mrt
),
274 int (*fill
)(struct mr_table
*mrt
,
276 u32 portid
, u32 seq
, struct mr_mfc
*c
,
280 unsigned int t
= 0, e
= 0, s_t
= cb
->args
[0], s_e
= cb
->args
[1];
281 struct net
*net
= sock_net(skb
->sk
);
282 struct mr_table
*mrt
;
286 for (mrt
= iter(net
, NULL
); mrt
; mrt
= iter(net
, mrt
)) {
289 list_for_each_entry_rcu(mfc
, &mrt
->mfc_cache_list
, list
) {
292 if (fill(mrt
, skb
, NETLINK_CB(cb
->skb
).portid
,
293 cb
->nlh
->nlmsg_seq
, mfc
,
294 RTM_NEWROUTE
, NLM_F_MULTI
) < 0)
301 list_for_each_entry(mfc
, &mrt
->mfc_unres_queue
, list
) {
304 if (fill(mrt
, skb
, NETLINK_CB(cb
->skb
).portid
,
305 cb
->nlh
->nlmsg_seq
, mfc
,
306 RTM_NEWROUTE
, NLM_F_MULTI
) < 0) {
307 spin_unlock_bh(lock
);
313 spin_unlock_bh(lock
);
327 EXPORT_SYMBOL(mr_rtm_dumproute
);
329 int mr_dump(struct net
*net
, struct notifier_block
*nb
, unsigned short family
,
330 int (*rules_dump
)(struct net
*net
,
331 struct notifier_block
*nb
),
332 struct mr_table
*(*mr_iter
)(struct net
*net
,
333 struct mr_table
*mrt
),
336 struct mr_table
*mrt
;
339 err
= rules_dump(net
, nb
);
343 for (mrt
= mr_iter(net
, NULL
); mrt
; mrt
= mr_iter(net
, mrt
)) {
344 struct vif_device
*v
= &mrt
->vif_table
[0];
348 /* Notifiy on table VIF entries */
350 for (vifi
= 0; vifi
< mrt
->maxvif
; vifi
++, v
++) {
354 mr_call_vif_notifier(nb
, net
, family
,
358 read_unlock(mrt_lock
);
360 /* Notify on table MFC entries */
361 list_for_each_entry_rcu(mfc
, &mrt
->mfc_cache_list
, list
)
362 mr_call_mfc_notifier(nb
, net
, family
,
369 EXPORT_SYMBOL(mr_dump
);