Linux 4.18.10
[linux/fpc-iii.git] / net / ipv4 / ipmr_base.c
blobcafb0506c8c99d57606c314863e7c05c2a81ba69
1 /* Linux multicast routing support
2 * Common logic shared by IPv4 [ipmr] and IPv6 [ip6mr] implementation
3 */
5 #include <linux/mroute_base.h>
7 /* Sets everything common except 'dev', since that is done under locking */
8 void vif_device_init(struct vif_device *v,
9 struct net_device *dev,
10 unsigned long rate_limit,
11 unsigned char threshold,
12 unsigned short flags,
13 unsigned short get_iflink_mask)
15 v->dev = NULL;
16 v->bytes_in = 0;
17 v->bytes_out = 0;
18 v->pkt_in = 0;
19 v->pkt_out = 0;
20 v->rate_limit = rate_limit;
21 v->flags = flags;
22 v->threshold = threshold;
23 if (v->flags & get_iflink_mask)
24 v->link = dev_get_iflink(dev);
25 else
26 v->link = dev->ifindex;
28 EXPORT_SYMBOL(vif_device_init);
30 struct mr_table *
31 mr_table_alloc(struct net *net, u32 id,
32 struct mr_table_ops *ops,
33 void (*expire_func)(struct timer_list *t),
34 void (*table_set)(struct mr_table *mrt,
35 struct net *net))
37 struct mr_table *mrt;
38 int err;
40 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
41 if (!mrt)
42 return ERR_PTR(-ENOMEM);
43 mrt->id = id;
44 write_pnet(&mrt->net, net);
46 mrt->ops = *ops;
47 err = rhltable_init(&mrt->mfc_hash, mrt->ops.rht_params);
48 if (err) {
49 kfree(mrt);
50 return ERR_PTR(err);
52 INIT_LIST_HEAD(&mrt->mfc_cache_list);
53 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
55 timer_setup(&mrt->ipmr_expire_timer, expire_func, 0);
57 mrt->mroute_reg_vif_num = -1;
58 table_set(mrt, net);
59 return mrt;
61 EXPORT_SYMBOL(mr_table_alloc);
63 void *mr_mfc_find_parent(struct mr_table *mrt, void *hasharg, int parent)
65 struct rhlist_head *tmp, *list;
66 struct mr_mfc *c;
68 list = rhltable_lookup(&mrt->mfc_hash, hasharg, *mrt->ops.rht_params);
69 rhl_for_each_entry_rcu(c, tmp, list, mnode)
70 if (parent == -1 || parent == c->mfc_parent)
71 return c;
73 return NULL;
75 EXPORT_SYMBOL(mr_mfc_find_parent);
77 void *mr_mfc_find_any_parent(struct mr_table *mrt, int vifi)
79 struct rhlist_head *tmp, *list;
80 struct mr_mfc *c;
82 list = rhltable_lookup(&mrt->mfc_hash, mrt->ops.cmparg_any,
83 *mrt->ops.rht_params);
84 rhl_for_each_entry_rcu(c, tmp, list, mnode)
85 if (c->mfc_un.res.ttls[vifi] < 255)
86 return c;
88 return NULL;
90 EXPORT_SYMBOL(mr_mfc_find_any_parent);
92 void *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg)
94 struct rhlist_head *tmp, *list;
95 struct mr_mfc *c, *proxy;
97 list = rhltable_lookup(&mrt->mfc_hash, hasharg, *mrt->ops.rht_params);
98 rhl_for_each_entry_rcu(c, tmp, list, mnode) {
99 if (c->mfc_un.res.ttls[vifi] < 255)
100 return c;
102 /* It's ok if the vifi is part of the static tree */
103 proxy = mr_mfc_find_any_parent(mrt, c->mfc_parent);
104 if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
105 return c;
108 return mr_mfc_find_any_parent(mrt, vifi);
110 EXPORT_SYMBOL(mr_mfc_find_any);
112 #ifdef CONFIG_PROC_FS
113 void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, loff_t pos)
115 struct mr_table *mrt = iter->mrt;
117 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
118 if (!VIF_EXISTS(mrt, iter->ct))
119 continue;
120 if (pos-- == 0)
121 return &mrt->vif_table[iter->ct];
123 return NULL;
125 EXPORT_SYMBOL(mr_vif_seq_idx);
127 void *mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
129 struct mr_vif_iter *iter = seq->private;
130 struct net *net = seq_file_net(seq);
131 struct mr_table *mrt = iter->mrt;
133 ++*pos;
134 if (v == SEQ_START_TOKEN)
135 return mr_vif_seq_idx(net, iter, 0);
137 while (++iter->ct < mrt->maxvif) {
138 if (!VIF_EXISTS(mrt, iter->ct))
139 continue;
140 return &mrt->vif_table[iter->ct];
142 return NULL;
144 EXPORT_SYMBOL(mr_vif_seq_next);
146 void *mr_mfc_seq_idx(struct net *net,
147 struct mr_mfc_iter *it, loff_t pos)
149 struct mr_table *mrt = it->mrt;
150 struct mr_mfc *mfc;
152 rcu_read_lock();
153 it->cache = &mrt->mfc_cache_list;
154 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list)
155 if (pos-- == 0)
156 return mfc;
157 rcu_read_unlock();
159 spin_lock_bh(it->lock);
160 it->cache = &mrt->mfc_unres_queue;
161 list_for_each_entry(mfc, it->cache, list)
162 if (pos-- == 0)
163 return mfc;
164 spin_unlock_bh(it->lock);
166 it->cache = NULL;
167 return NULL;
169 EXPORT_SYMBOL(mr_mfc_seq_idx);
171 void *mr_mfc_seq_next(struct seq_file *seq, void *v,
172 loff_t *pos)
174 struct mr_mfc_iter *it = seq->private;
175 struct net *net = seq_file_net(seq);
176 struct mr_table *mrt = it->mrt;
177 struct mr_mfc *c = v;
179 ++*pos;
181 if (v == SEQ_START_TOKEN)
182 return mr_mfc_seq_idx(net, seq->private, 0);
184 if (c->list.next != it->cache)
185 return list_entry(c->list.next, struct mr_mfc, list);
187 if (it->cache == &mrt->mfc_unres_queue)
188 goto end_of_list;
190 /* exhausted cache_array, show unresolved */
191 rcu_read_unlock();
192 it->cache = &mrt->mfc_unres_queue;
194 spin_lock_bh(it->lock);
195 if (!list_empty(it->cache))
196 return list_first_entry(it->cache, struct mr_mfc, list);
198 end_of_list:
199 spin_unlock_bh(it->lock);
200 it->cache = NULL;
202 return NULL;
204 EXPORT_SYMBOL(mr_mfc_seq_next);
205 #endif
207 int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
208 struct mr_mfc *c, struct rtmsg *rtm)
210 struct rta_mfc_stats mfcs;
211 struct nlattr *mp_attr;
212 struct rtnexthop *nhp;
213 unsigned long lastuse;
214 int ct;
216 /* If cache is unresolved, don't try to parse IIF and OIF */
217 if (c->mfc_parent >= MAXVIFS) {
218 rtm->rtm_flags |= RTNH_F_UNRESOLVED;
219 return -ENOENT;
222 if (VIF_EXISTS(mrt, c->mfc_parent) &&
223 nla_put_u32(skb, RTA_IIF,
224 mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
225 return -EMSGSIZE;
227 if (c->mfc_flags & MFC_OFFLOAD)
228 rtm->rtm_flags |= RTNH_F_OFFLOAD;
230 mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
231 if (!mp_attr)
232 return -EMSGSIZE;
234 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
235 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
236 struct vif_device *vif;
238 nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
239 if (!nhp) {
240 nla_nest_cancel(skb, mp_attr);
241 return -EMSGSIZE;
244 nhp->rtnh_flags = 0;
245 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
246 vif = &mrt->vif_table[ct];
247 nhp->rtnh_ifindex = vif->dev->ifindex;
248 nhp->rtnh_len = sizeof(*nhp);
252 nla_nest_end(skb, mp_attr);
254 lastuse = READ_ONCE(c->mfc_un.res.lastuse);
255 lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
257 mfcs.mfcs_packets = c->mfc_un.res.pkt;
258 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
259 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
260 if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
261 nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
262 RTA_PAD))
263 return -EMSGSIZE;
265 rtm->rtm_type = RTN_MULTICAST;
266 return 1;
268 EXPORT_SYMBOL(mr_fill_mroute);
270 int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
271 struct mr_table *(*iter)(struct net *net,
272 struct mr_table *mrt),
273 int (*fill)(struct mr_table *mrt,
274 struct sk_buff *skb,
275 u32 portid, u32 seq, struct mr_mfc *c,
276 int cmd, int flags),
277 spinlock_t *lock)
279 unsigned int t = 0, e = 0, s_t = cb->args[0], s_e = cb->args[1];
280 struct net *net = sock_net(skb->sk);
281 struct mr_table *mrt;
282 struct mr_mfc *mfc;
284 rcu_read_lock();
285 for (mrt = iter(net, NULL); mrt; mrt = iter(net, mrt)) {
286 if (t < s_t)
287 goto next_table;
288 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
289 if (e < s_e)
290 goto next_entry;
291 if (fill(mrt, skb, NETLINK_CB(cb->skb).portid,
292 cb->nlh->nlmsg_seq, mfc,
293 RTM_NEWROUTE, NLM_F_MULTI) < 0)
294 goto done;
295 next_entry:
296 e++;
298 e = 0;
299 s_e = 0;
301 spin_lock_bh(lock);
302 list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
303 if (e < s_e)
304 goto next_entry2;
305 if (fill(mrt, skb, NETLINK_CB(cb->skb).portid,
306 cb->nlh->nlmsg_seq, mfc,
307 RTM_NEWROUTE, NLM_F_MULTI) < 0) {
308 spin_unlock_bh(lock);
309 goto done;
311 next_entry2:
312 e++;
314 spin_unlock_bh(lock);
315 e = 0;
316 s_e = 0;
317 next_table:
318 t++;
320 done:
321 rcu_read_unlock();
323 cb->args[1] = e;
324 cb->args[0] = t;
326 return skb->len;
328 EXPORT_SYMBOL(mr_rtm_dumproute);
330 int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
331 int (*rules_dump)(struct net *net,
332 struct notifier_block *nb),
333 struct mr_table *(*mr_iter)(struct net *net,
334 struct mr_table *mrt),
335 rwlock_t *mrt_lock)
337 struct mr_table *mrt;
338 int err;
340 err = rules_dump(net, nb);
341 if (err)
342 return err;
344 for (mrt = mr_iter(net, NULL); mrt; mrt = mr_iter(net, mrt)) {
345 struct vif_device *v = &mrt->vif_table[0];
346 struct mr_mfc *mfc;
347 int vifi;
349 /* Notifiy on table VIF entries */
350 read_lock(mrt_lock);
351 for (vifi = 0; vifi < mrt->maxvif; vifi++, v++) {
352 if (!v->dev)
353 continue;
355 mr_call_vif_notifier(nb, net, family,
356 FIB_EVENT_VIF_ADD,
357 v, vifi, mrt->id);
359 read_unlock(mrt_lock);
361 /* Notify on table MFC entries */
362 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list)
363 mr_call_mfc_notifier(nb, net, family,
364 FIB_EVENT_ENTRY_ADD,
365 mfc, mrt->id);
368 return 0;
370 EXPORT_SYMBOL(mr_dump);