page allocator: inline __rmqueue_fallback()
[linux/fpc-iii.git] / net / ipv6 / xfrm6_tunnel.c
blob80193db224d9070e2ca57982122fdf8384ba5d9f
1 /*
2 * Copyright (C)2003,2004 USAGI/WIDE Project
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 * Authors Mitsuru KANDA <mk@linux-ipv6.org>
19 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
21 * Based on net/ipv4/xfrm4_tunnel.c
24 #include <linux/module.h>
25 #include <linux/xfrm.h>
26 #include <linux/list.h>
27 #include <net/ip.h>
28 #include <net/xfrm.h>
29 #include <net/ipv6.h>
30 #include <linux/ipv6.h>
31 #include <linux/icmpv6.h>
32 #include <linux/mutex.h>
35 * xfrm_tunnel_spi things are for allocating unique id ("spi")
36 * per xfrm_address_t.
38 struct xfrm6_tunnel_spi {
39 struct hlist_node list_byaddr;
40 struct hlist_node list_byspi;
41 xfrm_address_t addr;
42 u32 spi;
43 atomic_t refcnt;
46 static DEFINE_RWLOCK(xfrm6_tunnel_spi_lock);
48 static u32 xfrm6_tunnel_spi;
50 #define XFRM6_TUNNEL_SPI_MIN 1
51 #define XFRM6_TUNNEL_SPI_MAX 0xffffffff
53 static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
55 #define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
56 #define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
58 static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
59 static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
61 static inline unsigned xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr)
63 unsigned h;
65 h = (__force u32)(addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]);
66 h ^= h >> 16;
67 h ^= h >> 8;
68 h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1;
70 return h;
73 static inline unsigned xfrm6_tunnel_spi_hash_byspi(u32 spi)
75 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
79 static int xfrm6_tunnel_spi_init(void)
81 int i;
83 xfrm6_tunnel_spi = 0;
84 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
85 sizeof(struct xfrm6_tunnel_spi),
86 0, SLAB_HWCACHE_ALIGN,
87 NULL);
88 if (!xfrm6_tunnel_spi_kmem)
89 return -ENOMEM;
91 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
92 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr[i]);
93 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
94 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byspi[i]);
95 return 0;
98 static void xfrm6_tunnel_spi_fini(void)
100 int i;
102 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) {
103 if (!hlist_empty(&xfrm6_tunnel_spi_byaddr[i]))
104 return;
106 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) {
107 if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i]))
108 return;
110 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
111 xfrm6_tunnel_spi_kmem = NULL;
114 static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
116 struct xfrm6_tunnel_spi *x6spi;
117 struct hlist_node *pos;
119 hlist_for_each_entry(x6spi, pos,
120 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
121 list_byaddr) {
122 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0)
123 return x6spi;
126 return NULL;
129 __be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
131 struct xfrm6_tunnel_spi *x6spi;
132 u32 spi;
134 read_lock_bh(&xfrm6_tunnel_spi_lock);
135 x6spi = __xfrm6_tunnel_spi_lookup(saddr);
136 spi = x6spi ? x6spi->spi : 0;
137 read_unlock_bh(&xfrm6_tunnel_spi_lock);
138 return htonl(spi);
141 EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup);
143 static int __xfrm6_tunnel_spi_check(u32 spi)
145 struct xfrm6_tunnel_spi *x6spi;
146 int index = xfrm6_tunnel_spi_hash_byspi(spi);
147 struct hlist_node *pos;
149 hlist_for_each_entry(x6spi, pos,
150 &xfrm6_tunnel_spi_byspi[index],
151 list_byspi) {
152 if (x6spi->spi == spi)
153 return -1;
155 return index;
158 static u32 __xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
160 u32 spi;
161 struct xfrm6_tunnel_spi *x6spi;
162 int index;
164 if (xfrm6_tunnel_spi < XFRM6_TUNNEL_SPI_MIN ||
165 xfrm6_tunnel_spi >= XFRM6_TUNNEL_SPI_MAX)
166 xfrm6_tunnel_spi = XFRM6_TUNNEL_SPI_MIN;
167 else
168 xfrm6_tunnel_spi++;
170 for (spi = xfrm6_tunnel_spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) {
171 index = __xfrm6_tunnel_spi_check(spi);
172 if (index >= 0)
173 goto alloc_spi;
175 for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tunnel_spi; spi++) {
176 index = __xfrm6_tunnel_spi_check(spi);
177 if (index >= 0)
178 goto alloc_spi;
180 spi = 0;
181 goto out;
182 alloc_spi:
183 xfrm6_tunnel_spi = spi;
184 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC);
185 if (!x6spi)
186 goto out;
188 memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr));
189 x6spi->spi = spi;
190 atomic_set(&x6spi->refcnt, 1);
192 hlist_add_head(&x6spi->list_byspi, &xfrm6_tunnel_spi_byspi[index]);
194 index = xfrm6_tunnel_spi_hash_byaddr(saddr);
195 hlist_add_head(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]);
196 out:
197 return spi;
200 __be32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
202 struct xfrm6_tunnel_spi *x6spi;
203 u32 spi;
205 write_lock_bh(&xfrm6_tunnel_spi_lock);
206 x6spi = __xfrm6_tunnel_spi_lookup(saddr);
207 if (x6spi) {
208 atomic_inc(&x6spi->refcnt);
209 spi = x6spi->spi;
210 } else
211 spi = __xfrm6_tunnel_alloc_spi(saddr);
212 write_unlock_bh(&xfrm6_tunnel_spi_lock);
214 return htonl(spi);
217 EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi);
219 void xfrm6_tunnel_free_spi(xfrm_address_t *saddr)
221 struct xfrm6_tunnel_spi *x6spi;
222 struct hlist_node *pos, *n;
224 write_lock_bh(&xfrm6_tunnel_spi_lock);
226 hlist_for_each_entry_safe(x6spi, pos, n,
227 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
228 list_byaddr)
230 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
231 if (atomic_dec_and_test(&x6spi->refcnt)) {
232 hlist_del(&x6spi->list_byaddr);
233 hlist_del(&x6spi->list_byspi);
234 kmem_cache_free(xfrm6_tunnel_spi_kmem, x6spi);
235 break;
239 write_unlock_bh(&xfrm6_tunnel_spi_lock);
242 EXPORT_SYMBOL(xfrm6_tunnel_free_spi);
244 static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
246 skb_push(skb, -skb_network_offset(skb));
247 return 0;
250 static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
252 return skb_network_header(skb)[IP6CB(skb)->nhoff];
255 static int xfrm6_tunnel_rcv(struct sk_buff *skb)
257 struct ipv6hdr *iph = ipv6_hdr(skb);
258 __be32 spi;
260 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
261 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0;
264 static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
265 int type, int code, int offset, __be32 info)
267 /* xfrm6_tunnel native err handling */
268 switch (type) {
269 case ICMPV6_DEST_UNREACH:
270 switch (code) {
271 case ICMPV6_NOROUTE:
272 case ICMPV6_ADM_PROHIBITED:
273 case ICMPV6_NOT_NEIGHBOUR:
274 case ICMPV6_ADDR_UNREACH:
275 case ICMPV6_PORT_UNREACH:
276 default:
277 break;
279 break;
280 case ICMPV6_PKT_TOOBIG:
281 break;
282 case ICMPV6_TIME_EXCEED:
283 switch (code) {
284 case ICMPV6_EXC_HOPLIMIT:
285 break;
286 case ICMPV6_EXC_FRAGTIME:
287 default:
288 break;
290 break;
291 case ICMPV6_PARAMPROB:
292 switch (code) {
293 case ICMPV6_HDR_FIELD: break;
294 case ICMPV6_UNK_NEXTHDR: break;
295 case ICMPV6_UNK_OPTION: break;
297 break;
298 default:
299 break;
302 return 0;
305 static int xfrm6_tunnel_init_state(struct xfrm_state *x)
307 if (x->props.mode != XFRM_MODE_TUNNEL)
308 return -EINVAL;
310 if (x->encap)
311 return -EINVAL;
313 x->props.header_len = sizeof(struct ipv6hdr);
315 return 0;
318 static void xfrm6_tunnel_destroy(struct xfrm_state *x)
320 xfrm6_tunnel_free_spi((xfrm_address_t *)&x->props.saddr);
323 static const struct xfrm_type xfrm6_tunnel_type = {
324 .description = "IP6IP6",
325 .owner = THIS_MODULE,
326 .proto = IPPROTO_IPV6,
327 .init_state = xfrm6_tunnel_init_state,
328 .destructor = xfrm6_tunnel_destroy,
329 .input = xfrm6_tunnel_input,
330 .output = xfrm6_tunnel_output,
333 static struct xfrm6_tunnel xfrm6_tunnel_handler = {
334 .handler = xfrm6_tunnel_rcv,
335 .err_handler = xfrm6_tunnel_err,
336 .priority = 2,
339 static struct xfrm6_tunnel xfrm46_tunnel_handler = {
340 .handler = xfrm6_tunnel_rcv,
341 .err_handler = xfrm6_tunnel_err,
342 .priority = 2,
345 static int __init xfrm6_tunnel_init(void)
347 if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0)
348 goto err;
349 if (xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6))
350 goto unreg;
351 if (xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET))
352 goto dereg6;
353 if (xfrm6_tunnel_spi_init() < 0)
354 goto dereg46;
355 return 0;
357 dereg46:
358 xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
359 dereg6:
360 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
361 unreg:
362 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
363 err:
364 return -EAGAIN;
367 static void __exit xfrm6_tunnel_fini(void)
369 xfrm6_tunnel_spi_fini();
370 xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
371 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
372 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
375 module_init(xfrm6_tunnel_init);
376 module_exit(xfrm6_tunnel_fini);
377 MODULE_LICENSE("GPL");
378 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_IPV6);