2 * IP Payload Compression Protocol (IPComp) - RFC3173.
4 * Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
12 * - Tunable compression parameters.
13 * - Compression stats.
14 * - Adaptive compression.
16 #include <linux/config.h>
17 #include <linux/module.h>
18 #include <asm/scatterlist.h>
19 #include <asm/semaphore.h>
20 #include <linux/crypto.h>
21 #include <linux/pfkeyv2.h>
22 #include <linux/percpu.h>
23 #include <linux/smp.h>
24 #include <linux/list.h>
25 #include <linux/vmalloc.h>
26 #include <linux/rtnetlink.h>
30 #include <net/ipcomp.h>
33 struct list_head list
;
34 struct crypto_tfm
**tfms
;
38 static DECLARE_MUTEX(ipcomp_resource_sem
);
39 static void **ipcomp_scratches
;
40 static int ipcomp_scratch_users
;
41 static LIST_HEAD(ipcomp_tfms_list
);
43 static int ipcomp_decompress(struct xfrm_state
*x
, struct sk_buff
*skb
)
47 struct ipcomp_data
*ipcd
= x
->data
;
49 struct crypto_tfm
*tfm
;
53 dlen
= IPCOMP_SCRATCH_SIZE
;
57 scratch
= *per_cpu_ptr(ipcomp_scratches
, cpu
);
58 tfm
= *per_cpu_ptr(ipcd
->tfms
, cpu
);
60 err
= crypto_comp_decompress(tfm
, start
, plen
, scratch
, &dlen
);
64 if (dlen
< (plen
+ sizeof(struct ip_comp_hdr
))) {
69 err
= pskb_expand_head(skb
, 0, dlen
- plen
, GFP_ATOMIC
);
73 skb_put(skb
, dlen
- plen
);
74 memcpy(skb
->data
, scratch
, dlen
);
76 iph
->tot_len
= htons(dlen
+ iph
->ihl
* 4);
82 static int ipcomp_input(struct xfrm_state
*x
,
83 struct xfrm_decap_state
*decap
, struct sk_buff
*skb
)
94 if ((skb_is_nonlinear(skb
) || skb_cloned(skb
)) &&
95 skb_linearize(skb
, GFP_ATOMIC
) != 0) {
100 skb
->ip_summed
= CHECKSUM_NONE
;
102 /* Remove ipcomp header and decompress original payload */
104 memcpy(&tmp_iph
, iph
, iph
->ihl
* 4);
105 nexthdr
= *(u8
*)skb
->data
;
106 skb_pull(skb
, sizeof(struct ip_comp_hdr
));
107 skb
->nh
.raw
+= sizeof(struct ip_comp_hdr
);
108 memcpy(skb
->nh
.raw
, &tmp_iph
, tmp_iph
.iph
.ihl
* 4);
110 iph
->tot_len
= htons(ntohs(iph
->tot_len
) - sizeof(struct ip_comp_hdr
));
111 iph
->protocol
= nexthdr
;
112 skb
->h
.raw
= skb
->data
;
113 err
= ipcomp_decompress(x
, skb
);
119 static int ipcomp_compress(struct xfrm_state
*x
, struct sk_buff
*skb
)
121 int err
, plen
, dlen
, ihlen
;
122 struct iphdr
*iph
= skb
->nh
.iph
;
123 struct ipcomp_data
*ipcd
= x
->data
;
125 struct crypto_tfm
*tfm
;
128 ihlen
= iph
->ihl
* 4;
129 plen
= skb
->len
- ihlen
;
130 dlen
= IPCOMP_SCRATCH_SIZE
;
131 start
= skb
->data
+ ihlen
;
134 scratch
= *per_cpu_ptr(ipcomp_scratches
, cpu
);
135 tfm
= *per_cpu_ptr(ipcd
->tfms
, cpu
);
137 err
= crypto_comp_compress(tfm
, start
, plen
, scratch
, &dlen
);
141 if ((dlen
+ sizeof(struct ip_comp_hdr
)) >= plen
) {
146 memcpy(start
+ sizeof(struct ip_comp_hdr
), scratch
, dlen
);
149 pskb_trim(skb
, ihlen
+ dlen
+ sizeof(struct ip_comp_hdr
));
157 static int ipcomp_output(struct xfrm_state
*x
, struct sk_buff
*skb
)
161 struct ip_comp_hdr
*ipch
;
162 struct ipcomp_data
*ipcd
= x
->data
;
166 iph
->tot_len
= htons(skb
->len
);
167 hdr_len
= iph
->ihl
* 4;
168 if ((skb
->len
- hdr_len
) < ipcd
->threshold
) {
169 /* Don't bother compressing */
173 if ((skb_is_nonlinear(skb
) || skb_cloned(skb
)) &&
174 skb_linearize(skb
, GFP_ATOMIC
) != 0) {
178 err
= ipcomp_compress(x
, skb
);
185 /* Install ipcomp header, convert into ipcomp datagram. */
186 iph
->tot_len
= htons(skb
->len
);
187 ipch
= (struct ip_comp_hdr
*)((char *)iph
+ iph
->ihl
* 4);
188 ipch
->nexthdr
= iph
->protocol
;
190 ipch
->cpi
= htons((u16
)ntohl(x
->id
.spi
));
191 iph
->protocol
= IPPROTO_COMP
;
201 static void ipcomp4_err(struct sk_buff
*skb
, u32 info
)
204 struct iphdr
*iph
= (struct iphdr
*)skb
->data
;
205 struct ip_comp_hdr
*ipch
= (struct ip_comp_hdr
*)(skb
->data
+(iph
->ihl
<<2));
206 struct xfrm_state
*x
;
208 if (skb
->h
.icmph
->type
!= ICMP_DEST_UNREACH
||
209 skb
->h
.icmph
->code
!= ICMP_FRAG_NEEDED
)
212 spi
= ntohl(ntohs(ipch
->cpi
));
213 x
= xfrm_state_lookup((xfrm_address_t
*)&iph
->daddr
,
214 spi
, IPPROTO_COMP
, AF_INET
);
217 NETDEBUG(printk(KERN_DEBUG
"pmtu discovery on SA IPCOMP/%08x/%u.%u.%u.%u\n",
218 spi
, NIPQUAD(iph
->daddr
)));
222 /* We always hold one tunnel user reference to indicate a tunnel */
223 static struct xfrm_state
*ipcomp_tunnel_create(struct xfrm_state
*x
)
225 struct xfrm_state
*t
;
227 t
= xfrm_state_alloc();
231 t
->id
.proto
= IPPROTO_IPIP
;
232 t
->id
.spi
= x
->props
.saddr
.a4
;
233 t
->id
.daddr
.a4
= x
->id
.daddr
.a4
;
234 memcpy(&t
->sel
, &x
->sel
, sizeof(t
->sel
));
235 t
->props
.family
= AF_INET
;
237 t
->props
.saddr
.a4
= x
->props
.saddr
.a4
;
238 t
->props
.flags
= x
->props
.flags
;
240 t
->type
= xfrm_get_type(IPPROTO_IPIP
, t
->props
.family
);
244 if (t
->type
->init_state(t
, NULL
))
247 t
->km
.state
= XFRM_STATE_VALID
;
248 atomic_set(&t
->tunnel_users
, 1);
253 t
->km
.state
= XFRM_STATE_DEAD
;
260 * Must be protected by xfrm_cfg_sem. State and tunnel user references are
261 * always incremented on success.
263 static int ipcomp_tunnel_attach(struct xfrm_state
*x
)
266 struct xfrm_state
*t
;
268 t
= xfrm_state_lookup((xfrm_address_t
*)&x
->id
.daddr
.a4
,
269 x
->props
.saddr
.a4
, IPPROTO_IPIP
, AF_INET
);
271 t
= ipcomp_tunnel_create(x
);
276 xfrm_state_insert(t
);
280 atomic_inc(&t
->tunnel_users
);
285 static void ipcomp_free_scratches(void)
290 if (--ipcomp_scratch_users
)
293 scratches
= ipcomp_scratches
;
298 void *scratch
= *per_cpu_ptr(scratches
, i
);
303 free_percpu(scratches
);
306 static void **ipcomp_alloc_scratches(void)
311 if (ipcomp_scratch_users
++)
312 return ipcomp_scratches
;
314 scratches
= alloc_percpu(void *);
318 ipcomp_scratches
= scratches
;
321 void *scratch
= vmalloc(IPCOMP_SCRATCH_SIZE
);
324 *per_cpu_ptr(scratches
, i
) = scratch
;
330 static void ipcomp_free_tfms(struct crypto_tfm
**tfms
)
332 struct ipcomp_tfms
*pos
;
335 list_for_each_entry(pos
, &ipcomp_tfms_list
, list
) {
336 if (pos
->tfms
== tfms
)
345 list_del(&pos
->list
);
352 struct crypto_tfm
*tfm
= *per_cpu_ptr(tfms
, cpu
);
354 crypto_free_tfm(tfm
);
359 static struct crypto_tfm
**ipcomp_alloc_tfms(const char *alg_name
)
361 struct ipcomp_tfms
*pos
;
362 struct crypto_tfm
**tfms
;
365 /* This can be any valid CPU ID so we don't need locking. */
366 cpu
= smp_processor_id();
368 list_for_each_entry(pos
, &ipcomp_tfms_list
, list
) {
369 struct crypto_tfm
*tfm
;
372 tfm
= *per_cpu_ptr(tfms
, cpu
);
374 if (!strcmp(crypto_tfm_alg_name(tfm
), alg_name
)) {
380 pos
= kmalloc(sizeof(*pos
), GFP_KERNEL
);
385 INIT_LIST_HEAD(&pos
->list
);
386 list_add(&pos
->list
, &ipcomp_tfms_list
);
388 pos
->tfms
= tfms
= alloc_percpu(struct crypto_tfm
*);
393 struct crypto_tfm
*tfm
= crypto_alloc_tfm(alg_name
, 0);
396 *per_cpu_ptr(tfms
, cpu
) = tfm
;
402 ipcomp_free_tfms(tfms
);
406 static void ipcomp_free_data(struct ipcomp_data
*ipcd
)
409 ipcomp_free_tfms(ipcd
->tfms
);
410 ipcomp_free_scratches();
413 static void ipcomp_destroy(struct xfrm_state
*x
)
415 struct ipcomp_data
*ipcd
= x
->data
;
418 xfrm_state_delete_tunnel(x
);
419 down(&ipcomp_resource_sem
);
420 ipcomp_free_data(ipcd
);
421 up(&ipcomp_resource_sem
);
425 static int ipcomp_init_state(struct xfrm_state
*x
, void *args
)
428 struct ipcomp_data
*ipcd
;
429 struct xfrm_algo_desc
*calg_desc
;
439 ipcd
= kmalloc(sizeof(*ipcd
), GFP_KERNEL
);
443 memset(ipcd
, 0, sizeof(*ipcd
));
444 x
->props
.header_len
= 0;
446 x
->props
.header_len
+= sizeof(struct iphdr
);
448 down(&ipcomp_resource_sem
);
449 if (!ipcomp_alloc_scratches())
452 ipcd
->tfms
= ipcomp_alloc_tfms(x
->calg
->alg_name
);
455 up(&ipcomp_resource_sem
);
458 err
= ipcomp_tunnel_attach(x
);
463 calg_desc
= xfrm_calg_get_byname(x
->calg
->alg_name
, 0);
465 ipcd
->threshold
= calg_desc
->uinfo
.comp
.threshold
;
472 down(&ipcomp_resource_sem
);
474 ipcomp_free_data(ipcd
);
475 up(&ipcomp_resource_sem
);
480 static struct xfrm_type ipcomp_type
= {
481 .description
= "IPCOMP4",
482 .owner
= THIS_MODULE
,
483 .proto
= IPPROTO_COMP
,
484 .init_state
= ipcomp_init_state
,
485 .destructor
= ipcomp_destroy
,
486 .input
= ipcomp_input
,
487 .output
= ipcomp_output
490 static struct net_protocol ipcomp4_protocol
= {
491 .handler
= xfrm4_rcv
,
492 .err_handler
= ipcomp4_err
,
496 static int __init
ipcomp4_init(void)
498 if (xfrm_register_type(&ipcomp_type
, AF_INET
) < 0) {
499 printk(KERN_INFO
"ipcomp init: can't add xfrm type\n");
502 if (inet_add_protocol(&ipcomp4_protocol
, IPPROTO_COMP
) < 0) {
503 printk(KERN_INFO
"ipcomp init: can't add protocol\n");
504 xfrm_unregister_type(&ipcomp_type
, AF_INET
);
510 static void __exit
ipcomp4_fini(void)
512 if (inet_del_protocol(&ipcomp4_protocol
, IPPROTO_COMP
) < 0)
513 printk(KERN_INFO
"ip ipcomp close: can't remove protocol\n");
514 if (xfrm_unregister_type(&ipcomp_type
, AF_INET
) < 0)
515 printk(KERN_INFO
"ip ipcomp close: can't remove xfrm type\n");
518 module_init(ipcomp4_init
);
519 module_exit(ipcomp4_fini
);
521 MODULE_LICENSE("GPL");
522 MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) - RFC3173");
523 MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");