2 * Copyright (C)2002 USAGI/WIDE Project
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 * Mitsuru KANDA @USAGI : IPv6 Support
20 * Kazunori MIYAZAWA @USAGI :
21 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
23 * This file is derived from net/ipv4/esp.c
26 #define pr_fmt(fmt) "IPv6: " fmt
28 #include <crypto/aead.h>
29 #include <crypto/authenc.h>
30 #include <linux/err.h>
31 #include <linux/module.h>
35 #include <linux/scatterlist.h>
36 #include <linux/kernel.h>
37 #include <linux/pfkeyv2.h>
38 #include <linux/random.h>
39 #include <linux/slab.h>
40 #include <linux/spinlock.h>
41 #include <net/ip6_route.h>
44 #include <net/protocol.h>
45 #include <linux/icmpv6.h>
48 struct xfrm_skb_cb xfrm
;
52 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
54 static u32
esp6_get_mtu(struct xfrm_state
*x
, int mtu
);
57 * Allocate an AEAD request structure with extra space for SG and IV.
59 * For alignment considerations the upper 32 bits of the sequence number are
60 * placed at the front, if present. Followed by the IV, the request and finally
63 * TODO: Use spare space in skb for this where possible.
65 static void *esp_alloc_tmp(struct crypto_aead
*aead
, int nfrags
, int seqihlen
)
71 len
+= crypto_aead_ivsize(aead
);
74 len
+= crypto_aead_alignmask(aead
) &
75 ~(crypto_tfm_ctx_alignment() - 1);
76 len
= ALIGN(len
, crypto_tfm_ctx_alignment());
79 len
+= sizeof(struct aead_request
) + crypto_aead_reqsize(aead
);
80 len
= ALIGN(len
, __alignof__(struct scatterlist
));
82 len
+= sizeof(struct scatterlist
) * nfrags
;
84 return kmalloc(len
, GFP_ATOMIC
);
87 static inline __be32
*esp_tmp_seqhi(void *tmp
)
89 return PTR_ALIGN((__be32
*)tmp
, __alignof__(__be32
));
92 static inline u8
*esp_tmp_iv(struct crypto_aead
*aead
, void *tmp
, int seqhilen
)
94 return crypto_aead_ivsize(aead
) ?
95 PTR_ALIGN((u8
*)tmp
+ seqhilen
,
96 crypto_aead_alignmask(aead
) + 1) : tmp
+ seqhilen
;
99 static inline struct aead_request
*esp_tmp_req(struct crypto_aead
*aead
, u8
*iv
)
101 struct aead_request
*req
;
103 req
= (void *)PTR_ALIGN(iv
+ crypto_aead_ivsize(aead
),
104 crypto_tfm_ctx_alignment());
105 aead_request_set_tfm(req
, aead
);
109 static inline struct scatterlist
*esp_req_sg(struct crypto_aead
*aead
,
110 struct aead_request
*req
)
112 return (void *)ALIGN((unsigned long)(req
+ 1) +
113 crypto_aead_reqsize(aead
),
114 __alignof__(struct scatterlist
));
117 static void esp_output_done(struct crypto_async_request
*base
, int err
)
119 struct sk_buff
*skb
= base
->data
;
121 kfree(ESP_SKB_CB(skb
)->tmp
);
122 xfrm_output_resume(skb
, err
);
125 /* Move ESP header back into place. */
126 static void esp_restore_header(struct sk_buff
*skb
, unsigned int offset
)
128 struct ip_esp_hdr
*esph
= (void *)(skb
->data
+ offset
);
129 void *tmp
= ESP_SKB_CB(skb
)->tmp
;
130 __be32
*seqhi
= esp_tmp_seqhi(tmp
);
132 esph
->seq_no
= esph
->spi
;
136 static void esp_output_restore_header(struct sk_buff
*skb
)
138 esp_restore_header(skb
, skb_transport_offset(skb
) - sizeof(__be32
));
141 static void esp_output_done_esn(struct crypto_async_request
*base
, int err
)
143 struct sk_buff
*skb
= base
->data
;
145 esp_output_restore_header(skb
);
146 esp_output_done(base
, err
);
149 static int esp6_output(struct xfrm_state
*x
, struct sk_buff
*skb
)
152 struct ip_esp_hdr
*esph
;
153 struct crypto_aead
*aead
;
154 struct aead_request
*req
;
155 struct scatterlist
*sg
;
156 struct sk_buff
*trailer
;
172 /* skb is pure payload to encrypt */
174 alen
= crypto_aead_authsize(aead
);
175 ivlen
= crypto_aead_ivsize(aead
);
179 struct xfrm_dst
*dst
= (struct xfrm_dst
*)skb_dst(skb
);
182 padto
= min(x
->tfcpad
, esp6_get_mtu(x
, dst
->child_mtu_cached
));
183 if (skb
->len
< padto
)
184 tfclen
= padto
- skb
->len
;
186 blksize
= ALIGN(crypto_aead_blocksize(aead
), 4);
187 clen
= ALIGN(skb
->len
+ 2 + tfclen
, blksize
);
188 plen
= clen
- skb
->len
- tfclen
;
190 err
= skb_cow_data(skb
, tfclen
+ plen
+ alen
, &trailer
);
195 assoclen
= sizeof(*esph
);
198 if (x
->props
.flags
& XFRM_STATE_ESN
) {
199 seqhilen
+= sizeof(__be32
);
200 assoclen
+= seqhilen
;
203 tmp
= esp_alloc_tmp(aead
, nfrags
, seqhilen
);
209 seqhi
= esp_tmp_seqhi(tmp
);
210 iv
= esp_tmp_iv(aead
, tmp
, seqhilen
);
211 req
= esp_tmp_req(aead
, iv
);
212 sg
= esp_req_sg(aead
, req
);
214 /* Fill padding... */
215 tail
= skb_tail_pointer(trailer
);
217 memset(tail
, 0, tfclen
);
222 for (i
= 0; i
< plen
- 2; i
++)
225 tail
[plen
- 2] = plen
- 2;
226 tail
[plen
- 1] = *skb_mac_header(skb
);
227 pskb_put(skb
, trailer
, clen
- skb
->len
+ alen
);
229 skb_push(skb
, -skb_network_offset(skb
));
230 esph
= ip_esp_hdr(skb
);
231 *skb_mac_header(skb
) = IPPROTO_ESP
;
233 esph
->seq_no
= htonl(XFRM_SKB_CB(skb
)->seq
.output
.low
);
235 aead_request_set_callback(req
, 0, esp_output_done
, skb
);
237 /* For ESN we move the header forward by 4 bytes to
238 * accomodate the high bits. We will move it back after
241 if ((x
->props
.flags
& XFRM_STATE_ESN
)) {
242 esph
= (void *)(skb_transport_header(skb
) - sizeof(__be32
));
244 esph
->seq_no
= htonl(XFRM_SKB_CB(skb
)->seq
.output
.hi
);
245 aead_request_set_callback(req
, 0, esp_output_done_esn
, skb
);
248 esph
->spi
= x
->id
.spi
;
250 sg_init_table(sg
, nfrags
);
251 err
= skb_to_sgvec(skb
, sg
,
252 (unsigned char *)esph
- skb
->data
,
253 assoclen
+ ivlen
+ clen
+ alen
);
254 if (unlikely(err
< 0))
257 aead_request_set_crypt(req
, sg
, sg
, ivlen
+ clen
, iv
);
258 aead_request_set_ad(req
, assoclen
);
260 seqno
= cpu_to_be64(XFRM_SKB_CB(skb
)->seq
.output
.low
+
261 ((u64
)XFRM_SKB_CB(skb
)->seq
.output
.hi
<< 32));
263 memset(iv
, 0, ivlen
);
264 memcpy(iv
+ ivlen
- min(ivlen
, 8), (u8
*)&seqno
+ 8 - min(ivlen
, 8),
267 ESP_SKB_CB(skb
)->tmp
= tmp
;
268 err
= crypto_aead_encrypt(req
);
279 if ((x
->props
.flags
& XFRM_STATE_ESN
))
280 esp_output_restore_header(skb
);
289 static int esp_input_done2(struct sk_buff
*skb
, int err
)
291 struct xfrm_state
*x
= xfrm_input_state(skb
);
292 struct crypto_aead
*aead
= x
->data
;
293 int alen
= crypto_aead_authsize(aead
);
294 int hlen
= sizeof(struct ip_esp_hdr
) + crypto_aead_ivsize(aead
);
295 int elen
= skb
->len
- hlen
;
296 int hdr_len
= skb_network_header_len(skb
);
300 kfree(ESP_SKB_CB(skb
)->tmp
);
305 if (skb_copy_bits(skb
, skb
->len
- alen
- 2, nexthdr
, 2))
310 if (padlen
+ 2 + alen
>= elen
) {
311 net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
312 padlen
+ 2, elen
- alen
);
316 /* ... check padding bits here. Silly. :-) */
318 pskb_trim(skb
, skb
->len
- alen
- padlen
- 2);
319 __skb_pull(skb
, hlen
);
320 if (x
->props
.mode
== XFRM_MODE_TUNNEL
)
321 skb_reset_transport_header(skb
);
323 skb_set_transport_header(skb
, -hdr_len
);
327 /* RFC4303: Drop dummy packets without any error */
328 if (err
== IPPROTO_NONE
)
335 static void esp_input_done(struct crypto_async_request
*base
, int err
)
337 struct sk_buff
*skb
= base
->data
;
339 xfrm_input_resume(skb
, esp_input_done2(skb
, err
));
342 static void esp_input_restore_header(struct sk_buff
*skb
)
344 esp_restore_header(skb
, 0);
348 static void esp_input_done_esn(struct crypto_async_request
*base
, int err
)
350 struct sk_buff
*skb
= base
->data
;
352 esp_input_restore_header(skb
);
353 esp_input_done(base
, err
);
356 static int esp6_input(struct xfrm_state
*x
, struct sk_buff
*skb
)
358 struct ip_esp_hdr
*esph
;
359 struct crypto_aead
*aead
= x
->data
;
360 struct aead_request
*req
;
361 struct sk_buff
*trailer
;
362 int ivlen
= crypto_aead_ivsize(aead
);
363 int elen
= skb
->len
- sizeof(*esph
) - ivlen
;
371 struct scatterlist
*sg
;
373 if (!pskb_may_pull(skb
, sizeof(*esph
) + ivlen
)) {
383 nfrags
= skb_cow_data(skb
, 0, &trailer
);
391 assoclen
= sizeof(*esph
);
394 if (x
->props
.flags
& XFRM_STATE_ESN
) {
395 seqhilen
+= sizeof(__be32
);
396 assoclen
+= seqhilen
;
399 tmp
= esp_alloc_tmp(aead
, nfrags
, seqhilen
);
403 ESP_SKB_CB(skb
)->tmp
= tmp
;
404 seqhi
= esp_tmp_seqhi(tmp
);
405 iv
= esp_tmp_iv(aead
, tmp
, seqhilen
);
406 req
= esp_tmp_req(aead
, iv
);
407 sg
= esp_req_sg(aead
, req
);
409 skb
->ip_summed
= CHECKSUM_NONE
;
411 esph
= (struct ip_esp_hdr
*)skb
->data
;
413 aead_request_set_callback(req
, 0, esp_input_done
, skb
);
415 /* For ESN we move the header forward by 4 bytes to
416 * accomodate the high bits. We will move it back after
419 if ((x
->props
.flags
& XFRM_STATE_ESN
)) {
420 esph
= (void *)skb_push(skb
, 4);
422 esph
->spi
= esph
->seq_no
;
423 esph
->seq_no
= XFRM_SKB_CB(skb
)->seq
.input
.hi
;
424 aead_request_set_callback(req
, 0, esp_input_done_esn
, skb
);
427 sg_init_table(sg
, nfrags
);
428 ret
= skb_to_sgvec(skb
, sg
, 0, skb
->len
);
429 if (unlikely(ret
< 0))
432 aead_request_set_crypt(req
, sg
, sg
, elen
+ ivlen
, iv
);
433 aead_request_set_ad(req
, assoclen
);
435 ret
= crypto_aead_decrypt(req
);
436 if (ret
== -EINPROGRESS
)
439 if ((x
->props
.flags
& XFRM_STATE_ESN
))
440 esp_input_restore_header(skb
);
442 ret
= esp_input_done2(skb
, ret
);
448 static u32
esp6_get_mtu(struct xfrm_state
*x
, int mtu
)
450 struct crypto_aead
*aead
= x
->data
;
451 u32 blksize
= ALIGN(crypto_aead_blocksize(aead
), 4);
452 unsigned int net_adj
;
454 if (x
->props
.mode
!= XFRM_MODE_TUNNEL
)
455 net_adj
= sizeof(struct ipv6hdr
);
459 return ((mtu
- x
->props
.header_len
- crypto_aead_authsize(aead
) -
460 net_adj
) & ~(blksize
- 1)) + net_adj
- 2;
463 static int esp6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
464 u8 type
, u8 code
, int offset
, __be32 info
)
466 struct net
*net
= dev_net(skb
->dev
);
467 const struct ipv6hdr
*iph
= (const struct ipv6hdr
*)skb
->data
;
468 struct ip_esp_hdr
*esph
= (struct ip_esp_hdr
*)(skb
->data
+ offset
);
469 struct xfrm_state
*x
;
471 if (type
!= ICMPV6_PKT_TOOBIG
&&
472 type
!= NDISC_REDIRECT
)
475 x
= xfrm_state_lookup(net
, skb
->mark
, (const xfrm_address_t
*)&iph
->daddr
,
476 esph
->spi
, IPPROTO_ESP
, AF_INET6
);
480 if (type
== NDISC_REDIRECT
)
481 ip6_redirect(skb
, net
, skb
->dev
->ifindex
, 0);
483 ip6_update_pmtu(skb
, net
, info
, 0, 0);
489 static void esp6_destroy(struct xfrm_state
*x
)
491 struct crypto_aead
*aead
= x
->data
;
496 crypto_free_aead(aead
);
499 static int esp_init_aead(struct xfrm_state
*x
)
501 char aead_name
[CRYPTO_MAX_ALG_NAME
];
502 struct crypto_aead
*aead
;
506 if (snprintf(aead_name
, CRYPTO_MAX_ALG_NAME
, "%s(%s)",
507 x
->geniv
, x
->aead
->alg_name
) >= CRYPTO_MAX_ALG_NAME
)
510 aead
= crypto_alloc_aead(aead_name
, 0, 0);
517 err
= crypto_aead_setkey(aead
, x
->aead
->alg_key
,
518 (x
->aead
->alg_key_len
+ 7) / 8);
522 err
= crypto_aead_setauthsize(aead
, x
->aead
->alg_icv_len
/ 8);
530 static int esp_init_authenc(struct xfrm_state
*x
)
532 struct crypto_aead
*aead
;
533 struct crypto_authenc_key_param
*param
;
537 char authenc_name
[CRYPTO_MAX_ALG_NAME
];
547 if ((x
->props
.flags
& XFRM_STATE_ESN
)) {
548 if (snprintf(authenc_name
, CRYPTO_MAX_ALG_NAME
,
549 "%s%sauthencesn(%s,%s)%s",
550 x
->geniv
?: "", x
->geniv
? "(" : "",
551 x
->aalg
? x
->aalg
->alg_name
: "digest_null",
553 x
->geniv
? ")" : "") >= CRYPTO_MAX_ALG_NAME
)
556 if (snprintf(authenc_name
, CRYPTO_MAX_ALG_NAME
,
557 "%s%sauthenc(%s,%s)%s",
558 x
->geniv
?: "", x
->geniv
? "(" : "",
559 x
->aalg
? x
->aalg
->alg_name
: "digest_null",
561 x
->geniv
? ")" : "") >= CRYPTO_MAX_ALG_NAME
)
565 aead
= crypto_alloc_aead(authenc_name
, 0, 0);
572 keylen
= (x
->aalg
? (x
->aalg
->alg_key_len
+ 7) / 8 : 0) +
573 (x
->ealg
->alg_key_len
+ 7) / 8 + RTA_SPACE(sizeof(*param
));
575 key
= kmalloc(keylen
, GFP_KERNEL
);
581 rta
->rta_type
= CRYPTO_AUTHENC_KEYA_PARAM
;
582 rta
->rta_len
= RTA_LENGTH(sizeof(*param
));
583 param
= RTA_DATA(rta
);
584 p
+= RTA_SPACE(sizeof(*param
));
587 struct xfrm_algo_desc
*aalg_desc
;
589 memcpy(p
, x
->aalg
->alg_key
, (x
->aalg
->alg_key_len
+ 7) / 8);
590 p
+= (x
->aalg
->alg_key_len
+ 7) / 8;
592 aalg_desc
= xfrm_aalg_get_byname(x
->aalg
->alg_name
, 0);
596 if (aalg_desc
->uinfo
.auth
.icv_fullbits
/ 8 !=
597 crypto_aead_authsize(aead
)) {
598 pr_info("ESP: %s digestsize %u != %hu\n",
600 crypto_aead_authsize(aead
),
601 aalg_desc
->uinfo
.auth
.icv_fullbits
/ 8);
605 err
= crypto_aead_setauthsize(
606 aead
, x
->aalg
->alg_trunc_len
/ 8);
611 param
->enckeylen
= cpu_to_be32((x
->ealg
->alg_key_len
+ 7) / 8);
612 memcpy(p
, x
->ealg
->alg_key
, (x
->ealg
->alg_key_len
+ 7) / 8);
614 err
= crypto_aead_setkey(aead
, key
, keylen
);
623 static int esp6_init_state(struct xfrm_state
*x
)
625 struct crypto_aead
*aead
;
635 err
= esp_init_aead(x
);
637 err
= esp_init_authenc(x
);
644 x
->props
.header_len
= sizeof(struct ip_esp_hdr
) +
645 crypto_aead_ivsize(aead
);
646 switch (x
->props
.mode
) {
648 if (x
->sel
.family
!= AF_INET6
)
649 x
->props
.header_len
+= IPV4_BEET_PHMAXLEN
+
650 (sizeof(struct ipv6hdr
) - sizeof(struct iphdr
));
652 case XFRM_MODE_TRANSPORT
:
654 case XFRM_MODE_TUNNEL
:
655 x
->props
.header_len
+= sizeof(struct ipv6hdr
);
661 align
= ALIGN(crypto_aead_blocksize(aead
), 4);
662 x
->props
.trailer_len
= align
+ 1 + crypto_aead_authsize(aead
);
668 static int esp6_rcv_cb(struct sk_buff
*skb
, int err
)
673 static const struct xfrm_type esp6_type
= {
674 .description
= "ESP6",
675 .owner
= THIS_MODULE
,
676 .proto
= IPPROTO_ESP
,
677 .flags
= XFRM_TYPE_REPLAY_PROT
,
678 .init_state
= esp6_init_state
,
679 .destructor
= esp6_destroy
,
680 .get_mtu
= esp6_get_mtu
,
682 .output
= esp6_output
,
683 .hdr_offset
= xfrm6_find_1stfragopt
,
686 static struct xfrm6_protocol esp6_protocol
= {
687 .handler
= xfrm6_rcv
,
688 .cb_handler
= esp6_rcv_cb
,
689 .err_handler
= esp6_err
,
693 static int __init
esp6_init(void)
695 if (xfrm_register_type(&esp6_type
, AF_INET6
) < 0) {
696 pr_info("%s: can't add xfrm type\n", __func__
);
699 if (xfrm6_protocol_register(&esp6_protocol
, IPPROTO_ESP
) < 0) {
700 pr_info("%s: can't add protocol\n", __func__
);
701 xfrm_unregister_type(&esp6_type
, AF_INET6
);
708 static void __exit
esp6_fini(void)
710 if (xfrm6_protocol_deregister(&esp6_protocol
, IPPROTO_ESP
) < 0)
711 pr_info("%s: can't remove protocol\n", __func__
);
712 if (xfrm_unregister_type(&esp6_type
, AF_INET6
) < 0)
713 pr_info("%s: can't remove xfrm type\n", __func__
);
716 module_init(esp6_init
);
717 module_exit(esp6_fini
);
719 MODULE_LICENSE("GPL");
720 MODULE_ALIAS_XFRM_TYPE(AF_INET6
, XFRM_PROTO_ESP
);