2 * Copyright (C)2003,2004 USAGI/WIDE Project
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 * Authors Mitsuru KANDA <mk@linux-ipv6.org>
19 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
21 * Based on net/ipv4/xfrm4_tunnel.c
24 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/xfrm.h>
27 #include <linux/list.h>
31 #include <net/protocol.h>
32 #include <linux/ipv6.h>
33 #include <linux/icmpv6.h>
35 #ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG
41 #define X6TPRINTK(fmt, args...) printk(fmt, ## args)
42 #define X6TNOPRINTK(fmt, args...) do { ; } while(0)
45 # define X6TPRINTK1 X6TPRINTK
47 # define X6TPRINTK1 X6TNOPRINTK
51 # define X6TPRINTK3 X6TPRINTK
53 # define X6TPRINTK3 X6TNOPRINTK
57 * xfrm_tunnel_spi things are for allocating unique id ("spi")
60 struct xfrm6_tunnel_spi
{
61 struct hlist_node list_byaddr
;
62 struct hlist_node list_byspi
;
66 #ifdef XFRM6_TUNNEL_SPI_MAGIC
71 #ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG
72 # define XFRM6_TUNNEL_SPI_MAGIC 0xdeadbeef
75 static DEFINE_RWLOCK(xfrm6_tunnel_spi_lock
);
77 static u32 xfrm6_tunnel_spi
;
79 #define XFRM6_TUNNEL_SPI_MIN 1
80 #define XFRM6_TUNNEL_SPI_MAX 0xffffffff
82 static kmem_cache_t
*xfrm6_tunnel_spi_kmem
;
84 #define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
85 #define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
87 static struct hlist_head xfrm6_tunnel_spi_byaddr
[XFRM6_TUNNEL_SPI_BYADDR_HSIZE
];
88 static struct hlist_head xfrm6_tunnel_spi_byspi
[XFRM6_TUNNEL_SPI_BYSPI_HSIZE
];
90 #ifdef XFRM6_TUNNEL_SPI_MAGIC
91 static int x6spi_check_magic(const struct xfrm6_tunnel_spi
*x6spi
,
94 if (unlikely(x6spi
->magic
!= XFRM6_TUNNEL_SPI_MAGIC
)) {
95 X6TPRINTK3(KERN_DEBUG
"%s(): x6spi object "
96 "at %p has corrupted magic %08x "
98 name
, x6spi
, x6spi
->magic
, XFRM6_TUNNEL_SPI_MAGIC
);
104 static int inline x6spi_check_magic(const struct xfrm6_tunnel_spi
*x6spi
,
111 #define X6SPI_CHECK_MAGIC(x6spi) x6spi_check_magic((x6spi), __FUNCTION__)
114 static unsigned inline xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t
*addr
)
118 X6TPRINTK3(KERN_DEBUG
"%s(addr=%p)\n", __FUNCTION__
, addr
);
120 h
= addr
->a6
[0] ^ addr
->a6
[1] ^ addr
->a6
[2] ^ addr
->a6
[3];
123 h
&= XFRM6_TUNNEL_SPI_BYADDR_HSIZE
- 1;
125 X6TPRINTK3(KERN_DEBUG
"%s() = %u\n", __FUNCTION__
, h
);
130 static unsigned inline xfrm6_tunnel_spi_hash_byspi(u32 spi
)
132 return spi
% XFRM6_TUNNEL_SPI_BYSPI_HSIZE
;
136 static int xfrm6_tunnel_spi_init(void)
140 X6TPRINTK3(KERN_DEBUG
"%s()\n", __FUNCTION__
);
142 xfrm6_tunnel_spi
= 0;
143 xfrm6_tunnel_spi_kmem
= kmem_cache_create("xfrm6_tunnel_spi",
144 sizeof(struct xfrm6_tunnel_spi
),
145 0, SLAB_HWCACHE_ALIGN
,
147 if (!xfrm6_tunnel_spi_kmem
) {
149 "%s(): failed to allocate xfrm6_tunnel_spi_kmem\n",
154 for (i
= 0; i
< XFRM6_TUNNEL_SPI_BYADDR_HSIZE
; i
++)
155 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr
[i
]);
156 for (i
= 0; i
< XFRM6_TUNNEL_SPI_BYSPI_HSIZE
; i
++)
157 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byspi
[i
]);
161 static void xfrm6_tunnel_spi_fini(void)
165 X6TPRINTK3(KERN_DEBUG
"%s()\n", __FUNCTION__
);
167 for (i
= 0; i
< XFRM6_TUNNEL_SPI_BYADDR_HSIZE
; i
++) {
168 if (!hlist_empty(&xfrm6_tunnel_spi_byaddr
[i
]))
171 for (i
= 0; i
< XFRM6_TUNNEL_SPI_BYSPI_HSIZE
; i
++) {
172 if (!hlist_empty(&xfrm6_tunnel_spi_byspi
[i
]))
175 kmem_cache_destroy(xfrm6_tunnel_spi_kmem
);
176 xfrm6_tunnel_spi_kmem
= NULL
;
179 X6TPRINTK1(KERN_ERR
"%s(): table is not empty\n", __FUNCTION__
);
183 static struct xfrm6_tunnel_spi
*__xfrm6_tunnel_spi_lookup(xfrm_address_t
*saddr
)
185 struct xfrm6_tunnel_spi
*x6spi
;
186 struct hlist_node
*pos
;
188 X6TPRINTK3(KERN_DEBUG
"%s(saddr=%p)\n", __FUNCTION__
, saddr
);
190 hlist_for_each_entry(x6spi
, pos
,
191 &xfrm6_tunnel_spi_byaddr
[xfrm6_tunnel_spi_hash_byaddr(saddr
)],
193 if (memcmp(&x6spi
->addr
, saddr
, sizeof(x6spi
->addr
)) == 0) {
194 X6SPI_CHECK_MAGIC(x6spi
);
195 X6TPRINTK3(KERN_DEBUG
"%s() = %p(%u)\n", __FUNCTION__
, x6spi
, x6spi
->spi
);
200 X6TPRINTK3(KERN_DEBUG
"%s() = NULL(0)\n", __FUNCTION__
);
204 u32
xfrm6_tunnel_spi_lookup(xfrm_address_t
*saddr
)
206 struct xfrm6_tunnel_spi
*x6spi
;
209 X6TPRINTK3(KERN_DEBUG
"%s(saddr=%p)\n", __FUNCTION__
, saddr
);
211 read_lock_bh(&xfrm6_tunnel_spi_lock
);
212 x6spi
= __xfrm6_tunnel_spi_lookup(saddr
);
213 spi
= x6spi
? x6spi
->spi
: 0;
214 read_unlock_bh(&xfrm6_tunnel_spi_lock
);
218 EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup
);
220 static u32
__xfrm6_tunnel_alloc_spi(xfrm_address_t
*saddr
)
223 struct xfrm6_tunnel_spi
*x6spi
;
224 struct hlist_node
*pos
;
227 X6TPRINTK3(KERN_DEBUG
"%s(saddr=%p)\n", __FUNCTION__
, saddr
);
229 if (xfrm6_tunnel_spi
< XFRM6_TUNNEL_SPI_MIN
||
230 xfrm6_tunnel_spi
>= XFRM6_TUNNEL_SPI_MAX
)
231 xfrm6_tunnel_spi
= XFRM6_TUNNEL_SPI_MIN
;
235 for (spi
= xfrm6_tunnel_spi
; spi
<= XFRM6_TUNNEL_SPI_MAX
; spi
++) {
236 index
= xfrm6_tunnel_spi_hash_byspi(spi
);
237 hlist_for_each_entry(x6spi
, pos
,
238 &xfrm6_tunnel_spi_byspi
[index
],
240 if (x6spi
->spi
== spi
)
243 xfrm6_tunnel_spi
= spi
;
247 for (spi
= XFRM6_TUNNEL_SPI_MIN
; spi
< xfrm6_tunnel_spi
; spi
++) {
248 index
= xfrm6_tunnel_spi_hash_byspi(spi
);
249 hlist_for_each_entry(x6spi
, pos
,
250 &xfrm6_tunnel_spi_byspi
[index
],
252 if (x6spi
->spi
== spi
)
255 xfrm6_tunnel_spi
= spi
;
262 X6TPRINTK3(KERN_DEBUG
"%s(): allocate new spi for "
263 "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
265 NIP6(*(struct in6_addr
*)saddr
));
266 x6spi
= kmem_cache_alloc(xfrm6_tunnel_spi_kmem
, SLAB_ATOMIC
);
268 X6TPRINTK1(KERN_ERR
"%s(): kmem_cache_alloc() failed\n",
272 #ifdef XFRM6_TUNNEL_SPI_MAGIC
273 x6spi
->magic
= XFRM6_TUNNEL_SPI_MAGIC
;
275 memcpy(&x6spi
->addr
, saddr
, sizeof(x6spi
->addr
));
277 atomic_set(&x6spi
->refcnt
, 1);
279 hlist_add_head(&x6spi
->list_byspi
, &xfrm6_tunnel_spi_byspi
[index
]);
281 index
= xfrm6_tunnel_spi_hash_byaddr(saddr
);
282 hlist_add_head(&x6spi
->list_byaddr
, &xfrm6_tunnel_spi_byaddr
[index
]);
283 X6SPI_CHECK_MAGIC(x6spi
);
285 X6TPRINTK3(KERN_DEBUG
"%s() = %u\n", __FUNCTION__
, spi
);
289 u32
xfrm6_tunnel_alloc_spi(xfrm_address_t
*saddr
)
291 struct xfrm6_tunnel_spi
*x6spi
;
294 X6TPRINTK3(KERN_DEBUG
"%s(saddr=%p)\n", __FUNCTION__
, saddr
);
296 write_lock_bh(&xfrm6_tunnel_spi_lock
);
297 x6spi
= __xfrm6_tunnel_spi_lookup(saddr
);
299 atomic_inc(&x6spi
->refcnt
);
302 spi
= __xfrm6_tunnel_alloc_spi(saddr
);
303 write_unlock_bh(&xfrm6_tunnel_spi_lock
);
305 X6TPRINTK3(KERN_DEBUG
"%s() = %u\n", __FUNCTION__
, spi
);
310 EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi
);
312 void xfrm6_tunnel_free_spi(xfrm_address_t
*saddr
)
314 struct xfrm6_tunnel_spi
*x6spi
;
315 struct hlist_node
*pos
, *n
;
317 X6TPRINTK3(KERN_DEBUG
"%s(saddr=%p)\n", __FUNCTION__
, saddr
);
319 write_lock_bh(&xfrm6_tunnel_spi_lock
);
321 hlist_for_each_entry_safe(x6spi
, pos
, n
,
322 &xfrm6_tunnel_spi_byaddr
[xfrm6_tunnel_spi_hash_byaddr(saddr
)],
325 if (memcmp(&x6spi
->addr
, saddr
, sizeof(x6spi
->addr
)) == 0) {
326 X6TPRINTK3(KERN_DEBUG
"%s(): x6spi object "
327 "for %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x "
330 NIP6(*(struct in6_addr
*)saddr
),
332 X6SPI_CHECK_MAGIC(x6spi
);
333 if (atomic_dec_and_test(&x6spi
->refcnt
)) {
334 hlist_del(&x6spi
->list_byaddr
);
335 hlist_del(&x6spi
->list_byspi
);
336 kmem_cache_free(xfrm6_tunnel_spi_kmem
, x6spi
);
341 write_unlock_bh(&xfrm6_tunnel_spi_lock
);
344 EXPORT_SYMBOL(xfrm6_tunnel_free_spi
);
346 static int xfrm6_tunnel_output(struct xfrm_state
*x
, struct sk_buff
*skb
)
348 struct ipv6hdr
*top_iph
;
350 top_iph
= (struct ipv6hdr
*)skb
->data
;
351 top_iph
->payload_len
= htons(skb
->len
- sizeof(struct ipv6hdr
));
356 static int xfrm6_tunnel_input(struct xfrm_state
*x
, struct xfrm_decap_state
*decap
, struct sk_buff
*skb
)
361 static struct xfrm6_tunnel
*xfrm6_tunnel_handler
;
362 static DECLARE_MUTEX(xfrm6_tunnel_sem
);
364 int xfrm6_tunnel_register(struct xfrm6_tunnel
*handler
)
368 down(&xfrm6_tunnel_sem
);
370 if (xfrm6_tunnel_handler
!= NULL
)
373 xfrm6_tunnel_handler
= handler
;
374 up(&xfrm6_tunnel_sem
);
379 EXPORT_SYMBOL(xfrm6_tunnel_register
);
381 int xfrm6_tunnel_deregister(struct xfrm6_tunnel
*handler
)
385 down(&xfrm6_tunnel_sem
);
387 if (xfrm6_tunnel_handler
!= handler
)
390 xfrm6_tunnel_handler
= NULL
;
391 up(&xfrm6_tunnel_sem
);
398 EXPORT_SYMBOL(xfrm6_tunnel_deregister
);
400 static int xfrm6_tunnel_rcv(struct sk_buff
**pskb
, unsigned int *nhoffp
)
402 struct sk_buff
*skb
= *pskb
;
403 struct xfrm6_tunnel
*handler
= xfrm6_tunnel_handler
;
404 struct ipv6hdr
*iph
= skb
->nh
.ipv6h
;
407 /* device-like_ip6ip6_handler() */
408 if (handler
&& handler
->handler(pskb
, nhoffp
) == 0)
411 spi
= xfrm6_tunnel_spi_lookup((xfrm_address_t
*)&iph
->saddr
);
412 return xfrm6_rcv_spi(pskb
, nhoffp
, spi
);
415 static void xfrm6_tunnel_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
416 int type
, int code
, int offset
, __u32 info
)
418 struct xfrm6_tunnel
*handler
= xfrm6_tunnel_handler
;
420 /* call here first for device-like ip6ip6 err handling */
422 handler
->err_handler(skb
, opt
, type
, code
, offset
, info
);
426 /* xfrm6_tunnel native err handling */
428 case ICMPV6_DEST_UNREACH
:
431 case ICMPV6_ADM_PROHIBITED
:
432 case ICMPV6_NOT_NEIGHBOUR
:
433 case ICMPV6_ADDR_UNREACH
:
434 case ICMPV6_PORT_UNREACH
:
436 X6TPRINTK3(KERN_DEBUG
437 "xfrm6_tunnel: Destination Unreach.\n");
441 case ICMPV6_PKT_TOOBIG
:
442 X6TPRINTK3(KERN_DEBUG
443 "xfrm6_tunnel: Packet Too Big.\n");
445 case ICMPV6_TIME_EXCEED
:
447 case ICMPV6_EXC_HOPLIMIT
:
448 X6TPRINTK3(KERN_DEBUG
449 "xfrm6_tunnel: Too small Hoplimit.\n");
451 case ICMPV6_EXC_FRAGTIME
:
456 case ICMPV6_PARAMPROB
:
458 case ICMPV6_HDR_FIELD
: break;
459 case ICMPV6_UNK_NEXTHDR
: break;
460 case ICMPV6_UNK_OPTION
: break;
469 static int xfrm6_tunnel_init_state(struct xfrm_state
*x
, void *args
)
477 x
->props
.header_len
= sizeof(struct ipv6hdr
);
482 static void xfrm6_tunnel_destroy(struct xfrm_state
*x
)
484 xfrm6_tunnel_free_spi((xfrm_address_t
*)&x
->props
.saddr
);
487 static struct xfrm_type xfrm6_tunnel_type
= {
488 .description
= "IP6IP6",
489 .owner
= THIS_MODULE
,
490 .proto
= IPPROTO_IPV6
,
491 .init_state
= xfrm6_tunnel_init_state
,
492 .destructor
= xfrm6_tunnel_destroy
,
493 .input
= xfrm6_tunnel_input
,
494 .output
= xfrm6_tunnel_output
,
497 static struct inet6_protocol xfrm6_tunnel_protocol
= {
498 .handler
= xfrm6_tunnel_rcv
,
499 .err_handler
= xfrm6_tunnel_err
,
500 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
503 static int __init
xfrm6_tunnel_init(void)
505 X6TPRINTK3(KERN_DEBUG
"%s()\n", __FUNCTION__
);
507 if (xfrm_register_type(&xfrm6_tunnel_type
, AF_INET6
) < 0) {
509 "xfrm6_tunnel init: can't add xfrm type\n");
512 if (inet6_add_protocol(&xfrm6_tunnel_protocol
, IPPROTO_IPV6
) < 0) {
514 "xfrm6_tunnel init(): can't add protocol\n");
515 xfrm_unregister_type(&xfrm6_tunnel_type
, AF_INET6
);
518 if (xfrm6_tunnel_spi_init() < 0) {
520 "xfrm6_tunnel init: failed to initialize spi\n");
521 inet6_del_protocol(&xfrm6_tunnel_protocol
, IPPROTO_IPV6
);
522 xfrm_unregister_type(&xfrm6_tunnel_type
, AF_INET6
);
528 static void __exit
xfrm6_tunnel_fini(void)
530 X6TPRINTK3(KERN_DEBUG
"%s()\n", __FUNCTION__
);
532 xfrm6_tunnel_spi_fini();
533 if (inet6_del_protocol(&xfrm6_tunnel_protocol
, IPPROTO_IPV6
) < 0)
535 "xfrm6_tunnel close: can't remove protocol\n");
536 if (xfrm_unregister_type(&xfrm6_tunnel_type
, AF_INET6
) < 0)
538 "xfrm6_tunnel close: can't remove xfrm type\n");
541 module_init(xfrm6_tunnel_init
);
542 module_exit(xfrm6_tunnel_fini
);
543 MODULE_LICENSE("GPL");