2 * IP Payload Compression Protocol (IPComp) - RFC3173.
4 * Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
12 * - Tunable compression parameters.
13 * - Compression stats.
14 * - Adaptive compression.
16 #include <linux/module.h>
17 #include <asm/semaphore.h>
18 #include <linux/crypto.h>
19 #include <linux/err.h>
20 #include <linux/pfkeyv2.h>
21 #include <linux/percpu.h>
22 #include <linux/smp.h>
23 #include <linux/list.h>
24 #include <linux/vmalloc.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/mutex.h>
30 #include <net/ipcomp.h>
31 #include <net/protocol.h>
34 struct list_head list
;
35 struct crypto_comp
**tfms
;
39 static DEFINE_MUTEX(ipcomp_resource_mutex
);
40 static void **ipcomp_scratches
;
41 static int ipcomp_scratch_users
;
42 static LIST_HEAD(ipcomp_tfms_list
);
44 static int ipcomp_decompress(struct xfrm_state
*x
, struct sk_buff
*skb
)
46 struct ipcomp_data
*ipcd
= x
->data
;
47 const int plen
= skb
->len
;
48 int dlen
= IPCOMP_SCRATCH_SIZE
;
49 const u8
*start
= skb
->data
;
50 const int cpu
= get_cpu();
51 u8
*scratch
= *per_cpu_ptr(ipcomp_scratches
, cpu
);
52 struct crypto_comp
*tfm
= *per_cpu_ptr(ipcd
->tfms
, cpu
);
53 int err
= crypto_comp_decompress(tfm
, start
, plen
, scratch
, &dlen
);
58 if (dlen
< (plen
+ sizeof(struct ip_comp_hdr
))) {
63 err
= pskb_expand_head(skb
, 0, dlen
- plen
, GFP_ATOMIC
);
67 skb
->truesize
+= dlen
- plen
;
68 __skb_put(skb
, dlen
- plen
);
69 skb_copy_to_linear_data(skb
, scratch
, dlen
);
75 static int ipcomp_input(struct xfrm_state
*x
, struct sk_buff
*skb
)
78 struct ip_comp_hdr
*ipch
;
80 if (skb_linearize_cow(skb
))
83 skb
->ip_summed
= CHECKSUM_NONE
;
85 /* Remove ipcomp header and decompress original payload */
86 ipch
= (void *)skb
->data
;
87 skb
->transport_header
= skb
->network_header
+ sizeof(*ipch
);
88 __skb_pull(skb
, sizeof(*ipch
));
89 err
= ipcomp_decompress(x
, skb
);
99 static int ipcomp_compress(struct xfrm_state
*x
, struct sk_buff
*skb
)
101 struct ipcomp_data
*ipcd
= x
->data
;
102 const int plen
= skb
->len
;
103 int dlen
= IPCOMP_SCRATCH_SIZE
;
104 u8
*start
= skb
->data
;
105 const int cpu
= get_cpu();
106 u8
*scratch
= *per_cpu_ptr(ipcomp_scratches
, cpu
);
107 struct crypto_comp
*tfm
= *per_cpu_ptr(ipcd
->tfms
, cpu
);
108 int err
= crypto_comp_compress(tfm
, start
, plen
, scratch
, &dlen
);
113 if ((dlen
+ sizeof(struct ip_comp_hdr
)) >= plen
) {
118 memcpy(start
+ sizeof(struct ip_comp_hdr
), scratch
, dlen
);
121 pskb_trim(skb
, dlen
+ sizeof(struct ip_comp_hdr
));
129 static int ipcomp_output(struct xfrm_state
*x
, struct sk_buff
*skb
)
132 struct ip_comp_hdr
*ipch
;
133 struct ipcomp_data
*ipcd
= x
->data
;
135 if (skb
->len
< ipcd
->threshold
) {
136 /* Don't bother compressing */
140 if (skb_linearize_cow(skb
))
143 err
= ipcomp_compress(x
, skb
);
149 /* Install ipcomp header, convert into ipcomp datagram. */
150 ipch
= ip_comp_hdr(skb
);
151 ipch
->nexthdr
= *skb_mac_header(skb
);
153 ipch
->cpi
= htons((u16
)ntohl(x
->id
.spi
));
154 *skb_mac_header(skb
) = IPPROTO_COMP
;
156 skb_push(skb
, -skb_network_offset(skb
));
160 static void ipcomp4_err(struct sk_buff
*skb
, u32 info
)
163 struct iphdr
*iph
= (struct iphdr
*)skb
->data
;
164 struct ip_comp_hdr
*ipch
= (struct ip_comp_hdr
*)(skb
->data
+(iph
->ihl
<<2));
165 struct xfrm_state
*x
;
167 if (icmp_hdr(skb
)->type
!= ICMP_DEST_UNREACH
||
168 icmp_hdr(skb
)->code
!= ICMP_FRAG_NEEDED
)
171 spi
= htonl(ntohs(ipch
->cpi
));
172 x
= xfrm_state_lookup((xfrm_address_t
*)&iph
->daddr
,
173 spi
, IPPROTO_COMP
, AF_INET
);
176 NETDEBUG(KERN_DEBUG
"pmtu discovery on SA IPCOMP/%08x/%u.%u.%u.%u\n",
177 spi
, NIPQUAD(iph
->daddr
));
181 /* We always hold one tunnel user reference to indicate a tunnel */
182 static struct xfrm_state
*ipcomp_tunnel_create(struct xfrm_state
*x
)
184 struct xfrm_state
*t
;
185 u8 mode
= XFRM_MODE_TUNNEL
;
187 t
= xfrm_state_alloc();
191 t
->id
.proto
= IPPROTO_IPIP
;
192 t
->id
.spi
= x
->props
.saddr
.a4
;
193 t
->id
.daddr
.a4
= x
->id
.daddr
.a4
;
194 memcpy(&t
->sel
, &x
->sel
, sizeof(t
->sel
));
195 t
->props
.family
= AF_INET
;
196 if (x
->props
.mode
== XFRM_MODE_BEET
)
197 mode
= x
->props
.mode
;
198 t
->props
.mode
= mode
;
199 t
->props
.saddr
.a4
= x
->props
.saddr
.a4
;
200 t
->props
.flags
= x
->props
.flags
;
202 if (xfrm_init_state(t
))
205 atomic_set(&t
->tunnel_users
, 1);
210 t
->km
.state
= XFRM_STATE_DEAD
;
217 * Must be protected by xfrm_cfg_mutex. State and tunnel user references are
218 * always incremented on success.
220 static int ipcomp_tunnel_attach(struct xfrm_state
*x
)
223 struct xfrm_state
*t
;
225 t
= xfrm_state_lookup((xfrm_address_t
*)&x
->id
.daddr
.a4
,
226 x
->props
.saddr
.a4
, IPPROTO_IPIP
, AF_INET
);
228 t
= ipcomp_tunnel_create(x
);
233 xfrm_state_insert(t
);
237 atomic_inc(&t
->tunnel_users
);
242 static void ipcomp_free_scratches(void)
247 if (--ipcomp_scratch_users
)
250 scratches
= ipcomp_scratches
;
254 for_each_possible_cpu(i
)
255 vfree(*per_cpu_ptr(scratches
, i
));
257 free_percpu(scratches
);
260 static void **ipcomp_alloc_scratches(void)
265 if (ipcomp_scratch_users
++)
266 return ipcomp_scratches
;
268 scratches
= alloc_percpu(void *);
272 ipcomp_scratches
= scratches
;
274 for_each_possible_cpu(i
) {
275 void *scratch
= vmalloc(IPCOMP_SCRATCH_SIZE
);
278 *per_cpu_ptr(scratches
, i
) = scratch
;
284 static void ipcomp_free_tfms(struct crypto_comp
**tfms
)
286 struct ipcomp_tfms
*pos
;
289 list_for_each_entry(pos
, &ipcomp_tfms_list
, list
) {
290 if (pos
->tfms
== tfms
)
299 list_del(&pos
->list
);
305 for_each_possible_cpu(cpu
) {
306 struct crypto_comp
*tfm
= *per_cpu_ptr(tfms
, cpu
);
307 crypto_free_comp(tfm
);
312 static struct crypto_comp
**ipcomp_alloc_tfms(const char *alg_name
)
314 struct ipcomp_tfms
*pos
;
315 struct crypto_comp
**tfms
;
318 /* This can be any valid CPU ID so we don't need locking. */
319 cpu
= raw_smp_processor_id();
321 list_for_each_entry(pos
, &ipcomp_tfms_list
, list
) {
322 struct crypto_comp
*tfm
;
325 tfm
= *per_cpu_ptr(tfms
, cpu
);
327 if (!strcmp(crypto_comp_name(tfm
), alg_name
)) {
333 pos
= kmalloc(sizeof(*pos
), GFP_KERNEL
);
338 INIT_LIST_HEAD(&pos
->list
);
339 list_add(&pos
->list
, &ipcomp_tfms_list
);
341 pos
->tfms
= tfms
= alloc_percpu(struct crypto_comp
*);
345 for_each_possible_cpu(cpu
) {
346 struct crypto_comp
*tfm
= crypto_alloc_comp(alg_name
, 0,
350 *per_cpu_ptr(tfms
, cpu
) = tfm
;
356 ipcomp_free_tfms(tfms
);
360 static void ipcomp_free_data(struct ipcomp_data
*ipcd
)
363 ipcomp_free_tfms(ipcd
->tfms
);
364 ipcomp_free_scratches();
367 static void ipcomp_destroy(struct xfrm_state
*x
)
369 struct ipcomp_data
*ipcd
= x
->data
;
372 xfrm_state_delete_tunnel(x
);
373 mutex_lock(&ipcomp_resource_mutex
);
374 ipcomp_free_data(ipcd
);
375 mutex_unlock(&ipcomp_resource_mutex
);
379 static int ipcomp_init_state(struct xfrm_state
*x
)
382 struct ipcomp_data
*ipcd
;
383 struct xfrm_algo_desc
*calg_desc
;
393 ipcd
= kzalloc(sizeof(*ipcd
), GFP_KERNEL
);
397 x
->props
.header_len
= 0;
398 if (x
->props
.mode
== XFRM_MODE_TUNNEL
)
399 x
->props
.header_len
+= sizeof(struct iphdr
);
401 mutex_lock(&ipcomp_resource_mutex
);
402 if (!ipcomp_alloc_scratches())
405 ipcd
->tfms
= ipcomp_alloc_tfms(x
->calg
->alg_name
);
408 mutex_unlock(&ipcomp_resource_mutex
);
410 if (x
->props
.mode
== XFRM_MODE_TUNNEL
) {
411 err
= ipcomp_tunnel_attach(x
);
416 calg_desc
= xfrm_calg_get_byname(x
->calg
->alg_name
, 0);
418 ipcd
->threshold
= calg_desc
->uinfo
.comp
.threshold
;
425 mutex_lock(&ipcomp_resource_mutex
);
427 ipcomp_free_data(ipcd
);
428 mutex_unlock(&ipcomp_resource_mutex
);
433 static struct xfrm_type ipcomp_type
= {
434 .description
= "IPCOMP4",
435 .owner
= THIS_MODULE
,
436 .proto
= IPPROTO_COMP
,
437 .init_state
= ipcomp_init_state
,
438 .destructor
= ipcomp_destroy
,
439 .input
= ipcomp_input
,
440 .output
= ipcomp_output
443 static struct net_protocol ipcomp4_protocol
= {
444 .handler
= xfrm4_rcv
,
445 .err_handler
= ipcomp4_err
,
449 static int __init
ipcomp4_init(void)
451 if (xfrm_register_type(&ipcomp_type
, AF_INET
) < 0) {
452 printk(KERN_INFO
"ipcomp init: can't add xfrm type\n");
455 if (inet_add_protocol(&ipcomp4_protocol
, IPPROTO_COMP
) < 0) {
456 printk(KERN_INFO
"ipcomp init: can't add protocol\n");
457 xfrm_unregister_type(&ipcomp_type
, AF_INET
);
463 static void __exit
ipcomp4_fini(void)
465 if (inet_del_protocol(&ipcomp4_protocol
, IPPROTO_COMP
) < 0)
466 printk(KERN_INFO
"ip ipcomp close: can't remove protocol\n");
467 if (xfrm_unregister_type(&ipcomp_type
, AF_INET
) < 0)
468 printk(KERN_INFO
"ip ipcomp close: can't remove xfrm type\n");
471 module_init(ipcomp4_init
);
472 module_exit(ipcomp4_fini
);
474 MODULE_LICENSE("GPL");
475 MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) - RFC3173");
476 MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
478 MODULE_ALIAS_XFRM_TYPE(AF_INET
, XFRM_PROTO_COMP
);