scsi: ufs: fix race between clock gating and devfreq scaling work
[linux/fpc-iii.git] / net / core / lwtunnel.c
blobafa64f086d8720ac13e7302fed043f9c6bf49d47
1 /*
2 * lwtunnel Infrastructure for light weight tunnels like mpls
4 * Authors: Roopa Prabhu, <roopa@cumulusnetworks.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/capability.h>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/uaccess.h>
19 #include <linux/skbuff.h>
20 #include <linux/netdevice.h>
21 #include <linux/lwtunnel.h>
22 #include <linux/in.h>
23 #include <linux/init.h>
24 #include <linux/err.h>
26 #include <net/lwtunnel.h>
27 #include <net/rtnetlink.h>
28 #include <net/ip6_fib.h>
29 #include <net/nexthop.h>
31 #ifdef CONFIG_MODULES
33 static const char *lwtunnel_encap_str(enum lwtunnel_encap_types encap_type)
35 /* Only lwt encaps implemented without using an interface for
36 * the encap need to return a string here.
38 switch (encap_type) {
39 case LWTUNNEL_ENCAP_MPLS:
40 return "MPLS";
41 case LWTUNNEL_ENCAP_ILA:
42 return "ILA";
43 case LWTUNNEL_ENCAP_IP6:
44 case LWTUNNEL_ENCAP_IP:
45 case LWTUNNEL_ENCAP_NONE:
46 case __LWTUNNEL_ENCAP_MAX:
47 /* should not have got here */
48 WARN_ON(1);
49 break;
51 return NULL;
54 #endif /* CONFIG_MODULES */
56 struct lwtunnel_state *lwtunnel_state_alloc(int encap_len)
58 struct lwtunnel_state *lws;
60 lws = kzalloc(sizeof(*lws) + encap_len, GFP_ATOMIC);
62 return lws;
64 EXPORT_SYMBOL(lwtunnel_state_alloc);
66 static const struct lwtunnel_encap_ops __rcu *
67 lwtun_encaps[LWTUNNEL_ENCAP_MAX + 1] __read_mostly;
69 void lwtstate_free(struct lwtunnel_state *lws)
71 const struct lwtunnel_encap_ops *ops = lwtun_encaps[lws->type];
73 kfree(lws);
74 module_put(ops->owner);
76 EXPORT_SYMBOL(lwtstate_free);
78 int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *ops,
79 unsigned int num)
81 if (num > LWTUNNEL_ENCAP_MAX)
82 return -ERANGE;
84 return !cmpxchg((const struct lwtunnel_encap_ops **)
85 &lwtun_encaps[num],
86 NULL, ops) ? 0 : -1;
88 EXPORT_SYMBOL(lwtunnel_encap_add_ops);
90 int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *ops,
91 unsigned int encap_type)
93 int ret;
95 if (encap_type == LWTUNNEL_ENCAP_NONE ||
96 encap_type > LWTUNNEL_ENCAP_MAX)
97 return -ERANGE;
99 ret = (cmpxchg((const struct lwtunnel_encap_ops **)
100 &lwtun_encaps[encap_type],
101 ops, NULL) == ops) ? 0 : -1;
103 synchronize_net();
105 return ret;
107 EXPORT_SYMBOL(lwtunnel_encap_del_ops);
109 int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
110 struct nlattr *encap, unsigned int family,
111 const void *cfg, struct lwtunnel_state **lws)
113 const struct lwtunnel_encap_ops *ops;
114 int ret = -EINVAL;
116 if (encap_type == LWTUNNEL_ENCAP_NONE ||
117 encap_type > LWTUNNEL_ENCAP_MAX)
118 return ret;
120 ret = -EOPNOTSUPP;
121 rcu_read_lock();
122 ops = rcu_dereference(lwtun_encaps[encap_type]);
123 if (likely(ops && ops->build_state && try_module_get(ops->owner))) {
124 ret = ops->build_state(dev, encap, family, cfg, lws);
125 if (ret)
126 module_put(ops->owner);
128 rcu_read_unlock();
130 return ret;
132 EXPORT_SYMBOL(lwtunnel_build_state);
134 int lwtunnel_valid_encap_type(u16 encap_type)
136 const struct lwtunnel_encap_ops *ops;
137 int ret = -EINVAL;
139 if (encap_type == LWTUNNEL_ENCAP_NONE ||
140 encap_type > LWTUNNEL_ENCAP_MAX)
141 return ret;
143 rcu_read_lock();
144 ops = rcu_dereference(lwtun_encaps[encap_type]);
145 rcu_read_unlock();
146 #ifdef CONFIG_MODULES
147 if (!ops) {
148 const char *encap_type_str = lwtunnel_encap_str(encap_type);
150 if (encap_type_str) {
151 __rtnl_unlock();
152 request_module("rtnl-lwt-%s", encap_type_str);
153 rtnl_lock();
155 rcu_read_lock();
156 ops = rcu_dereference(lwtun_encaps[encap_type]);
157 rcu_read_unlock();
160 #endif
161 return ops ? 0 : -EOPNOTSUPP;
163 EXPORT_SYMBOL(lwtunnel_valid_encap_type);
165 int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining)
167 struct rtnexthop *rtnh = (struct rtnexthop *)attr;
168 struct nlattr *nla_entype;
169 struct nlattr *attrs;
170 struct nlattr *nla;
171 u16 encap_type;
172 int attrlen;
174 while (rtnh_ok(rtnh, remaining)) {
175 attrlen = rtnh_attrlen(rtnh);
176 if (attrlen > 0) {
177 attrs = rtnh_attrs(rtnh);
178 nla = nla_find(attrs, attrlen, RTA_ENCAP);
179 nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
181 if (nla_entype) {
182 encap_type = nla_get_u16(nla_entype);
184 if (lwtunnel_valid_encap_type(encap_type) != 0)
185 return -EOPNOTSUPP;
188 rtnh = rtnh_next(rtnh, &remaining);
191 return 0;
193 EXPORT_SYMBOL(lwtunnel_valid_encap_type_attr);
195 int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate)
197 const struct lwtunnel_encap_ops *ops;
198 struct nlattr *nest;
199 int ret = -EINVAL;
201 if (!lwtstate)
202 return 0;
204 if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
205 lwtstate->type > LWTUNNEL_ENCAP_MAX)
206 return 0;
208 ret = -EOPNOTSUPP;
209 nest = nla_nest_start(skb, RTA_ENCAP);
210 rcu_read_lock();
211 ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
212 if (likely(ops && ops->fill_encap))
213 ret = ops->fill_encap(skb, lwtstate);
214 rcu_read_unlock();
216 if (ret)
217 goto nla_put_failure;
218 nla_nest_end(skb, nest);
219 ret = nla_put_u16(skb, RTA_ENCAP_TYPE, lwtstate->type);
220 if (ret)
221 goto nla_put_failure;
223 return 0;
225 nla_put_failure:
226 nla_nest_cancel(skb, nest);
228 return (ret == -EOPNOTSUPP ? 0 : ret);
230 EXPORT_SYMBOL(lwtunnel_fill_encap);
232 int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate)
234 const struct lwtunnel_encap_ops *ops;
235 int ret = 0;
237 if (!lwtstate)
238 return 0;
240 if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
241 lwtstate->type > LWTUNNEL_ENCAP_MAX)
242 return 0;
244 rcu_read_lock();
245 ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
246 if (likely(ops && ops->get_encap_size))
247 ret = nla_total_size(ops->get_encap_size(lwtstate));
248 rcu_read_unlock();
250 return ret;
252 EXPORT_SYMBOL(lwtunnel_get_encap_size);
254 int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
256 const struct lwtunnel_encap_ops *ops;
257 int ret = 0;
259 if (!a && !b)
260 return 0;
262 if (!a || !b)
263 return 1;
265 if (a->type != b->type)
266 return 1;
268 if (a->type == LWTUNNEL_ENCAP_NONE ||
269 a->type > LWTUNNEL_ENCAP_MAX)
270 return 0;
272 rcu_read_lock();
273 ops = rcu_dereference(lwtun_encaps[a->type]);
274 if (likely(ops && ops->cmp_encap))
275 ret = ops->cmp_encap(a, b);
276 rcu_read_unlock();
278 return ret;
280 EXPORT_SYMBOL(lwtunnel_cmp_encap);
282 int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
284 struct dst_entry *dst = skb_dst(skb);
285 const struct lwtunnel_encap_ops *ops;
286 struct lwtunnel_state *lwtstate;
287 int ret = -EINVAL;
289 if (!dst)
290 goto drop;
291 lwtstate = dst->lwtstate;
293 if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
294 lwtstate->type > LWTUNNEL_ENCAP_MAX)
295 return 0;
297 ret = -EOPNOTSUPP;
298 rcu_read_lock();
299 ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
300 if (likely(ops && ops->output))
301 ret = ops->output(net, sk, skb);
302 rcu_read_unlock();
304 if (ret == -EOPNOTSUPP)
305 goto drop;
307 return ret;
309 drop:
310 kfree_skb(skb);
312 return ret;
314 EXPORT_SYMBOL(lwtunnel_output);
316 int lwtunnel_xmit(struct sk_buff *skb)
318 struct dst_entry *dst = skb_dst(skb);
319 const struct lwtunnel_encap_ops *ops;
320 struct lwtunnel_state *lwtstate;
321 int ret = -EINVAL;
323 if (!dst)
324 goto drop;
326 lwtstate = dst->lwtstate;
328 if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
329 lwtstate->type > LWTUNNEL_ENCAP_MAX)
330 return 0;
332 ret = -EOPNOTSUPP;
333 rcu_read_lock();
334 ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
335 if (likely(ops && ops->xmit))
336 ret = ops->xmit(skb);
337 rcu_read_unlock();
339 if (ret == -EOPNOTSUPP)
340 goto drop;
342 return ret;
344 drop:
345 kfree_skb(skb);
347 return ret;
349 EXPORT_SYMBOL(lwtunnel_xmit);
351 int lwtunnel_input(struct sk_buff *skb)
353 struct dst_entry *dst = skb_dst(skb);
354 const struct lwtunnel_encap_ops *ops;
355 struct lwtunnel_state *lwtstate;
356 int ret = -EINVAL;
358 if (!dst)
359 goto drop;
360 lwtstate = dst->lwtstate;
362 if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
363 lwtstate->type > LWTUNNEL_ENCAP_MAX)
364 return 0;
366 ret = -EOPNOTSUPP;
367 rcu_read_lock();
368 ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
369 if (likely(ops && ops->input))
370 ret = ops->input(skb);
371 rcu_read_unlock();
373 if (ret == -EOPNOTSUPP)
374 goto drop;
376 return ret;
378 drop:
379 kfree_skb(skb);
381 return ret;
383 EXPORT_SYMBOL(lwtunnel_input);