2 * Copyright (C)2002 USAGI/WIDE Project
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 * Mitsuru KANDA @USAGI : IPv6 Support
20 * Kazunori MIYAZAWA @USAGI :
21 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
23 * This file is derived from net/ipv4/esp.c
26 #define pr_fmt(fmt) "IPv6: " fmt
28 #include <crypto/aead.h>
29 #include <crypto/authenc.h>
30 #include <linux/err.h>
31 #include <linux/module.h>
35 #include <linux/scatterlist.h>
36 #include <linux/kernel.h>
37 #include <linux/pfkeyv2.h>
38 #include <linux/random.h>
39 #include <linux/slab.h>
40 #include <linux/spinlock.h>
41 #include <net/ip6_route.h>
44 #include <net/protocol.h>
45 #include <linux/icmpv6.h>
47 #include <linux/highmem.h>
50 struct xfrm_skb_cb xfrm
;
54 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
56 static u32
esp6_get_mtu(struct xfrm_state
*x
, int mtu
);
59 * Allocate an AEAD request structure with extra space for SG and IV.
61 * For alignment considerations the upper 32 bits of the sequence number are
62 * placed at the front, if present. Followed by the IV, the request and finally
65 * TODO: Use spare space in skb for this where possible.
67 static void *esp_alloc_tmp(struct crypto_aead
*aead
, int nfrags
, int seqihlen
)
73 len
+= crypto_aead_ivsize(aead
);
76 len
+= crypto_aead_alignmask(aead
) &
77 ~(crypto_tfm_ctx_alignment() - 1);
78 len
= ALIGN(len
, crypto_tfm_ctx_alignment());
81 len
+= sizeof(struct aead_request
) + crypto_aead_reqsize(aead
);
82 len
= ALIGN(len
, __alignof__(struct scatterlist
));
84 len
+= sizeof(struct scatterlist
) * nfrags
;
86 return kmalloc(len
, GFP_ATOMIC
);
89 static inline __be32
*esp_tmp_seqhi(void *tmp
)
91 return PTR_ALIGN((__be32
*)tmp
, __alignof__(__be32
));
94 static inline u8
*esp_tmp_iv(struct crypto_aead
*aead
, void *tmp
, int seqhilen
)
96 return crypto_aead_ivsize(aead
) ?
97 PTR_ALIGN((u8
*)tmp
+ seqhilen
,
98 crypto_aead_alignmask(aead
) + 1) : tmp
+ seqhilen
;
101 static inline struct aead_request
*esp_tmp_req(struct crypto_aead
*aead
, u8
*iv
)
103 struct aead_request
*req
;
105 req
= (void *)PTR_ALIGN(iv
+ crypto_aead_ivsize(aead
),
106 crypto_tfm_ctx_alignment());
107 aead_request_set_tfm(req
, aead
);
111 static inline struct scatterlist
*esp_req_sg(struct crypto_aead
*aead
,
112 struct aead_request
*req
)
114 return (void *)ALIGN((unsigned long)(req
+ 1) +
115 crypto_aead_reqsize(aead
),
116 __alignof__(struct scatterlist
));
119 static void esp_ssg_unref(struct xfrm_state
*x
, void *tmp
)
121 struct crypto_aead
*aead
= x
->data
;
124 struct aead_request
*req
;
125 struct scatterlist
*sg
;
127 if (x
->props
.flags
& XFRM_STATE_ESN
)
128 seqhilen
+= sizeof(__be32
);
130 iv
= esp_tmp_iv(aead
, tmp
, seqhilen
);
131 req
= esp_tmp_req(aead
, iv
);
133 /* Unref skb_frag_pages in the src scatterlist if necessary.
134 * Skip the first sg which comes from skb->data.
136 if (req
->src
!= req
->dst
)
137 for (sg
= sg_next(req
->src
); sg
; sg
= sg_next(sg
))
138 put_page(sg_page(sg
));
141 static void esp_output_done(struct crypto_async_request
*base
, int err
)
143 struct sk_buff
*skb
= base
->data
;
144 struct xfrm_offload
*xo
= xfrm_offload(skb
);
146 struct xfrm_state
*x
;
148 if (xo
&& (xo
->flags
& XFRM_DEV_RESUME
)) {
149 struct sec_path
*sp
= skb_sec_path(skb
);
151 x
= sp
->xvec
[sp
->len
- 1];
153 x
= skb_dst(skb
)->xfrm
;
156 tmp
= ESP_SKB_CB(skb
)->tmp
;
157 esp_ssg_unref(x
, tmp
);
160 if (xo
&& (xo
->flags
& XFRM_DEV_RESUME
)) {
162 XFRM_INC_STATS(xs_net(x
), LINUX_MIB_XFRMOUTSTATEPROTOERROR
);
167 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
169 xfrm_dev_resume(skb
);
171 xfrm_output_resume(skb
, err
);
175 /* Move ESP header back into place. */
176 static void esp_restore_header(struct sk_buff
*skb
, unsigned int offset
)
178 struct ip_esp_hdr
*esph
= (void *)(skb
->data
+ offset
);
179 void *tmp
= ESP_SKB_CB(skb
)->tmp
;
180 __be32
*seqhi
= esp_tmp_seqhi(tmp
);
182 esph
->seq_no
= esph
->spi
;
186 static void esp_output_restore_header(struct sk_buff
*skb
)
188 esp_restore_header(skb
, skb_transport_offset(skb
) - sizeof(__be32
));
191 static struct ip_esp_hdr
*esp_output_set_esn(struct sk_buff
*skb
,
192 struct xfrm_state
*x
,
193 struct ip_esp_hdr
*esph
,
196 /* For ESN we move the header forward by 4 bytes to
197 * accomodate the high bits. We will move it back after
200 if ((x
->props
.flags
& XFRM_STATE_ESN
)) {
201 struct xfrm_offload
*xo
= xfrm_offload(skb
);
203 esph
= (void *)(skb_transport_header(skb
) - sizeof(__be32
));
206 esph
->seq_no
= htonl(xo
->seq
.hi
);
208 esph
->seq_no
= htonl(XFRM_SKB_CB(skb
)->seq
.output
.hi
);
211 esph
->spi
= x
->id
.spi
;
216 static void esp_output_done_esn(struct crypto_async_request
*base
, int err
)
218 struct sk_buff
*skb
= base
->data
;
220 esp_output_restore_header(skb
);
221 esp_output_done(base
, err
);
224 static void esp_output_fill_trailer(u8
*tail
, int tfclen
, int plen
, __u8 proto
)
226 /* Fill padding... */
228 memset(tail
, 0, tfclen
);
233 for (i
= 0; i
< plen
- 2; i
++)
236 tail
[plen
- 2] = plen
- 2;
237 tail
[plen
- 1] = proto
;
240 int esp6_output_head(struct xfrm_state
*x
, struct sk_buff
*skb
, struct esp_info
*esp
)
246 struct sk_buff
*trailer
;
247 int tailen
= esp
->tailen
;
249 if (!skb_cloned(skb
)) {
250 if (tailen
<= skb_tailroom(skb
)) {
253 tail
= skb_tail_pointer(trailer
);
256 } else if ((skb_shinfo(skb
)->nr_frags
< MAX_SKB_FRAGS
)
257 && !skb_has_frag_list(skb
)) {
259 struct sock
*sk
= skb
->sk
;
260 struct page_frag
*pfrag
= &x
->xfrag
;
262 esp
->inplace
= false;
264 allocsize
= ALIGN(tailen
, L1_CACHE_BYTES
);
266 spin_lock_bh(&x
->lock
);
268 if (unlikely(!skb_page_frag_refill(allocsize
, pfrag
, GFP_ATOMIC
))) {
269 spin_unlock_bh(&x
->lock
);
276 vaddr
= kmap_atomic(page
);
278 tail
= vaddr
+ pfrag
->offset
;
280 esp_output_fill_trailer(tail
, esp
->tfclen
, esp
->plen
, esp
->proto
);
282 kunmap_atomic(vaddr
);
284 nfrags
= skb_shinfo(skb
)->nr_frags
;
286 __skb_fill_page_desc(skb
, nfrags
, page
, pfrag
->offset
,
288 skb_shinfo(skb
)->nr_frags
= ++nfrags
;
290 pfrag
->offset
= pfrag
->offset
+ allocsize
;
292 spin_unlock_bh(&x
->lock
);
297 skb
->data_len
+= tailen
;
298 skb
->truesize
+= tailen
;
299 if (sk
&& sk_fullsock(sk
))
300 refcount_add(tailen
, &sk
->sk_wmem_alloc
);
307 nfrags
= skb_cow_data(skb
, tailen
, &trailer
);
310 tail
= skb_tail_pointer(trailer
);
313 esp_output_fill_trailer(tail
, esp
->tfclen
, esp
->plen
, esp
->proto
);
314 pskb_put(skb
, trailer
, tailen
);
319 EXPORT_SYMBOL_GPL(esp6_output_head
);
321 int esp6_output_tail(struct xfrm_state
*x
, struct sk_buff
*skb
, struct esp_info
*esp
)
331 struct ip_esp_hdr
*esph
;
332 struct aead_request
*req
;
333 struct crypto_aead
*aead
;
334 struct scatterlist
*sg
, *dsg
;
337 assoclen
= sizeof(struct ip_esp_hdr
);
340 if (x
->props
.flags
& XFRM_STATE_ESN
) {
341 seqhilen
+= sizeof(__be32
);
342 assoclen
+= sizeof(__be32
);
346 alen
= crypto_aead_authsize(aead
);
347 ivlen
= crypto_aead_ivsize(aead
);
349 tmp
= esp_alloc_tmp(aead
, esp
->nfrags
+ 2, seqhilen
);
353 seqhi
= esp_tmp_seqhi(tmp
);
354 iv
= esp_tmp_iv(aead
, tmp
, seqhilen
);
355 req
= esp_tmp_req(aead
, iv
);
356 sg
= esp_req_sg(aead
, req
);
361 dsg
= &sg
[esp
->nfrags
];
363 esph
= esp_output_set_esn(skb
, x
, ip_esp_hdr(skb
), seqhi
);
365 sg_init_table(sg
, esp
->nfrags
);
366 err
= skb_to_sgvec(skb
, sg
,
367 (unsigned char *)esph
- skb
->data
,
368 assoclen
+ ivlen
+ esp
->clen
+ alen
);
369 if (unlikely(err
< 0))
374 struct page_frag
*pfrag
= &x
->xfrag
;
376 allocsize
= ALIGN(skb
->data_len
, L1_CACHE_BYTES
);
378 spin_lock_bh(&x
->lock
);
379 if (unlikely(!skb_page_frag_refill(allocsize
, pfrag
, GFP_ATOMIC
))) {
380 spin_unlock_bh(&x
->lock
);
384 skb_shinfo(skb
)->nr_frags
= 1;
388 /* replace page frags in skb with new page */
389 __skb_fill_page_desc(skb
, 0, page
, pfrag
->offset
, skb
->data_len
);
390 pfrag
->offset
= pfrag
->offset
+ allocsize
;
391 spin_unlock_bh(&x
->lock
);
393 sg_init_table(dsg
, skb_shinfo(skb
)->nr_frags
+ 1);
394 err
= skb_to_sgvec(skb
, dsg
,
395 (unsigned char *)esph
- skb
->data
,
396 assoclen
+ ivlen
+ esp
->clen
+ alen
);
397 if (unlikely(err
< 0))
401 if ((x
->props
.flags
& XFRM_STATE_ESN
))
402 aead_request_set_callback(req
, 0, esp_output_done_esn
, skb
);
404 aead_request_set_callback(req
, 0, esp_output_done
, skb
);
406 aead_request_set_crypt(req
, sg
, dsg
, ivlen
+ esp
->clen
, iv
);
407 aead_request_set_ad(req
, assoclen
);
409 memset(iv
, 0, ivlen
);
410 memcpy(iv
+ ivlen
- min(ivlen
, 8), (u8
*)&esp
->seqno
+ 8 - min(ivlen
, 8),
413 ESP_SKB_CB(skb
)->tmp
= tmp
;
414 err
= crypto_aead_encrypt(req
);
425 if ((x
->props
.flags
& XFRM_STATE_ESN
))
426 esp_output_restore_header(skb
);
430 esp_ssg_unref(x
, tmp
);
437 EXPORT_SYMBOL_GPL(esp6_output_tail
);
439 static int esp6_output(struct xfrm_state
*x
, struct sk_buff
*skb
)
443 struct ip_esp_hdr
*esph
;
444 struct crypto_aead
*aead
;
449 esp
.proto
= *skb_mac_header(skb
);
450 *skb_mac_header(skb
) = IPPROTO_ESP
;
452 /* skb is pure payload to encrypt */
455 alen
= crypto_aead_authsize(aead
);
459 struct xfrm_dst
*dst
= (struct xfrm_dst
*)skb_dst(skb
);
462 padto
= min(x
->tfcpad
, esp6_get_mtu(x
, dst
->child_mtu_cached
));
463 if (skb
->len
< padto
)
464 esp
.tfclen
= padto
- skb
->len
;
466 blksize
= ALIGN(crypto_aead_blocksize(aead
), 4);
467 esp
.clen
= ALIGN(skb
->len
+ 2 + esp
.tfclen
, blksize
);
468 esp
.plen
= esp
.clen
- skb
->len
- esp
.tfclen
;
469 esp
.tailen
= esp
.tfclen
+ esp
.plen
+ alen
;
471 esp
.nfrags
= esp6_output_head(x
, skb
, &esp
);
475 esph
= ip_esp_hdr(skb
);
476 esph
->spi
= x
->id
.spi
;
478 esph
->seq_no
= htonl(XFRM_SKB_CB(skb
)->seq
.output
.low
);
479 esp
.seqno
= cpu_to_be64(XFRM_SKB_CB(skb
)->seq
.output
.low
+
480 ((u64
)XFRM_SKB_CB(skb
)->seq
.output
.hi
<< 32));
482 skb_push(skb
, -skb_network_offset(skb
));
484 return esp6_output_tail(x
, skb
, &esp
);
487 static inline int esp_remove_trailer(struct sk_buff
*skb
)
489 struct xfrm_state
*x
= xfrm_input_state(skb
);
490 struct xfrm_offload
*xo
= xfrm_offload(skb
);
491 struct crypto_aead
*aead
= x
->data
;
492 int alen
, hlen
, elen
;
498 alen
= crypto_aead_authsize(aead
);
499 hlen
= sizeof(struct ip_esp_hdr
) + crypto_aead_ivsize(aead
);
500 elen
= skb
->len
- hlen
;
502 if (xo
&& (xo
->flags
& XFRM_ESP_NO_TRAILER
)) {
507 ret
= skb_copy_bits(skb
, skb
->len
- alen
- 2, nexthdr
, 2);
512 if (padlen
+ 2 + alen
>= elen
) {
513 net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
514 padlen
+ 2, elen
- alen
);
518 trimlen
= alen
+ padlen
+ 2;
519 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
520 csumdiff
= skb_checksum(skb
, skb
->len
- trimlen
, trimlen
, 0);
521 skb
->csum
= csum_block_sub(skb
->csum
, csumdiff
,
524 pskb_trim(skb
, skb
->len
- trimlen
);
532 int esp6_input_done2(struct sk_buff
*skb
, int err
)
534 struct xfrm_state
*x
= xfrm_input_state(skb
);
535 struct xfrm_offload
*xo
= xfrm_offload(skb
);
536 struct crypto_aead
*aead
= x
->data
;
537 int hlen
= sizeof(struct ip_esp_hdr
) + crypto_aead_ivsize(aead
);
538 int hdr_len
= skb_network_header_len(skb
);
540 if (!xo
|| (xo
&& !(xo
->flags
& CRYPTO_DONE
)))
541 kfree(ESP_SKB_CB(skb
)->tmp
);
546 err
= esp_remove_trailer(skb
);
547 if (unlikely(err
< 0))
550 skb_postpull_rcsum(skb
, skb_network_header(skb
),
551 skb_network_header_len(skb
));
552 skb_pull_rcsum(skb
, hlen
);
553 if (x
->props
.mode
== XFRM_MODE_TUNNEL
)
554 skb_reset_transport_header(skb
);
556 skb_set_transport_header(skb
, -hdr_len
);
558 /* RFC4303: Drop dummy packets without any error */
559 if (err
== IPPROTO_NONE
)
565 EXPORT_SYMBOL_GPL(esp6_input_done2
);
567 static void esp_input_done(struct crypto_async_request
*base
, int err
)
569 struct sk_buff
*skb
= base
->data
;
571 xfrm_input_resume(skb
, esp6_input_done2(skb
, err
));
574 static void esp_input_restore_header(struct sk_buff
*skb
)
576 esp_restore_header(skb
, 0);
580 static void esp_input_set_header(struct sk_buff
*skb
, __be32
*seqhi
)
582 struct xfrm_state
*x
= xfrm_input_state(skb
);
584 /* For ESN we move the header forward by 4 bytes to
585 * accomodate the high bits. We will move it back after
588 if ((x
->props
.flags
& XFRM_STATE_ESN
)) {
589 struct ip_esp_hdr
*esph
= skb_push(skb
, 4);
592 esph
->spi
= esph
->seq_no
;
593 esph
->seq_no
= XFRM_SKB_CB(skb
)->seq
.input
.hi
;
597 static void esp_input_done_esn(struct crypto_async_request
*base
, int err
)
599 struct sk_buff
*skb
= base
->data
;
601 esp_input_restore_header(skb
);
602 esp_input_done(base
, err
);
605 static int esp6_input(struct xfrm_state
*x
, struct sk_buff
*skb
)
607 struct crypto_aead
*aead
= x
->data
;
608 struct aead_request
*req
;
609 struct sk_buff
*trailer
;
610 int ivlen
= crypto_aead_ivsize(aead
);
611 int elen
= skb
->len
- sizeof(struct ip_esp_hdr
) - ivlen
;
619 struct scatterlist
*sg
;
621 if (!pskb_may_pull(skb
, sizeof(struct ip_esp_hdr
) + ivlen
)) {
631 assoclen
= sizeof(struct ip_esp_hdr
);
634 if (x
->props
.flags
& XFRM_STATE_ESN
) {
635 seqhilen
+= sizeof(__be32
);
636 assoclen
+= seqhilen
;
639 if (!skb_cloned(skb
)) {
640 if (!skb_is_nonlinear(skb
)) {
644 } else if (!skb_has_frag_list(skb
)) {
645 nfrags
= skb_shinfo(skb
)->nr_frags
;
652 nfrags
= skb_cow_data(skb
, 0, &trailer
);
660 tmp
= esp_alloc_tmp(aead
, nfrags
, seqhilen
);
664 ESP_SKB_CB(skb
)->tmp
= tmp
;
665 seqhi
= esp_tmp_seqhi(tmp
);
666 iv
= esp_tmp_iv(aead
, tmp
, seqhilen
);
667 req
= esp_tmp_req(aead
, iv
);
668 sg
= esp_req_sg(aead
, req
);
670 esp_input_set_header(skb
, seqhi
);
672 sg_init_table(sg
, nfrags
);
673 ret
= skb_to_sgvec(skb
, sg
, 0, skb
->len
);
674 if (unlikely(ret
< 0)) {
679 skb
->ip_summed
= CHECKSUM_NONE
;
681 if ((x
->props
.flags
& XFRM_STATE_ESN
))
682 aead_request_set_callback(req
, 0, esp_input_done_esn
, skb
);
684 aead_request_set_callback(req
, 0, esp_input_done
, skb
);
686 aead_request_set_crypt(req
, sg
, sg
, elen
+ ivlen
, iv
);
687 aead_request_set_ad(req
, assoclen
);
689 ret
= crypto_aead_decrypt(req
);
690 if (ret
== -EINPROGRESS
)
693 if ((x
->props
.flags
& XFRM_STATE_ESN
))
694 esp_input_restore_header(skb
);
696 ret
= esp6_input_done2(skb
, ret
);
702 static u32
esp6_get_mtu(struct xfrm_state
*x
, int mtu
)
704 struct crypto_aead
*aead
= x
->data
;
705 u32 blksize
= ALIGN(crypto_aead_blocksize(aead
), 4);
706 unsigned int net_adj
;
708 if (x
->props
.mode
!= XFRM_MODE_TUNNEL
)
709 net_adj
= sizeof(struct ipv6hdr
);
713 return ((mtu
- x
->props
.header_len
- crypto_aead_authsize(aead
) -
714 net_adj
) & ~(blksize
- 1)) + net_adj
- 2;
717 static int esp6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
718 u8 type
, u8 code
, int offset
, __be32 info
)
720 struct net
*net
= dev_net(skb
->dev
);
721 const struct ipv6hdr
*iph
= (const struct ipv6hdr
*)skb
->data
;
722 struct ip_esp_hdr
*esph
= (struct ip_esp_hdr
*)(skb
->data
+ offset
);
723 struct xfrm_state
*x
;
725 if (type
!= ICMPV6_PKT_TOOBIG
&&
726 type
!= NDISC_REDIRECT
)
729 x
= xfrm_state_lookup(net
, skb
->mark
, (const xfrm_address_t
*)&iph
->daddr
,
730 esph
->spi
, IPPROTO_ESP
, AF_INET6
);
734 if (type
== NDISC_REDIRECT
)
735 ip6_redirect(skb
, net
, skb
->dev
->ifindex
, 0,
736 sock_net_uid(net
, NULL
));
738 ip6_update_pmtu(skb
, net
, info
, 0, 0, sock_net_uid(net
, NULL
));
744 static void esp6_destroy(struct xfrm_state
*x
)
746 struct crypto_aead
*aead
= x
->data
;
751 crypto_free_aead(aead
);
754 static int esp_init_aead(struct xfrm_state
*x
)
756 char aead_name
[CRYPTO_MAX_ALG_NAME
];
757 struct crypto_aead
*aead
;
761 if (snprintf(aead_name
, CRYPTO_MAX_ALG_NAME
, "%s(%s)",
762 x
->geniv
, x
->aead
->alg_name
) >= CRYPTO_MAX_ALG_NAME
)
765 aead
= crypto_alloc_aead(aead_name
, 0, 0);
772 err
= crypto_aead_setkey(aead
, x
->aead
->alg_key
,
773 (x
->aead
->alg_key_len
+ 7) / 8);
777 err
= crypto_aead_setauthsize(aead
, x
->aead
->alg_icv_len
/ 8);
785 static int esp_init_authenc(struct xfrm_state
*x
)
787 struct crypto_aead
*aead
;
788 struct crypto_authenc_key_param
*param
;
792 char authenc_name
[CRYPTO_MAX_ALG_NAME
];
802 if ((x
->props
.flags
& XFRM_STATE_ESN
)) {
803 if (snprintf(authenc_name
, CRYPTO_MAX_ALG_NAME
,
804 "%s%sauthencesn(%s,%s)%s",
805 x
->geniv
?: "", x
->geniv
? "(" : "",
806 x
->aalg
? x
->aalg
->alg_name
: "digest_null",
808 x
->geniv
? ")" : "") >= CRYPTO_MAX_ALG_NAME
)
811 if (snprintf(authenc_name
, CRYPTO_MAX_ALG_NAME
,
812 "%s%sauthenc(%s,%s)%s",
813 x
->geniv
?: "", x
->geniv
? "(" : "",
814 x
->aalg
? x
->aalg
->alg_name
: "digest_null",
816 x
->geniv
? ")" : "") >= CRYPTO_MAX_ALG_NAME
)
820 aead
= crypto_alloc_aead(authenc_name
, 0, 0);
827 keylen
= (x
->aalg
? (x
->aalg
->alg_key_len
+ 7) / 8 : 0) +
828 (x
->ealg
->alg_key_len
+ 7) / 8 + RTA_SPACE(sizeof(*param
));
830 key
= kmalloc(keylen
, GFP_KERNEL
);
836 rta
->rta_type
= CRYPTO_AUTHENC_KEYA_PARAM
;
837 rta
->rta_len
= RTA_LENGTH(sizeof(*param
));
838 param
= RTA_DATA(rta
);
839 p
+= RTA_SPACE(sizeof(*param
));
842 struct xfrm_algo_desc
*aalg_desc
;
844 memcpy(p
, x
->aalg
->alg_key
, (x
->aalg
->alg_key_len
+ 7) / 8);
845 p
+= (x
->aalg
->alg_key_len
+ 7) / 8;
847 aalg_desc
= xfrm_aalg_get_byname(x
->aalg
->alg_name
, 0);
851 if (aalg_desc
->uinfo
.auth
.icv_fullbits
/ 8 !=
852 crypto_aead_authsize(aead
)) {
853 pr_info("ESP: %s digestsize %u != %hu\n",
855 crypto_aead_authsize(aead
),
856 aalg_desc
->uinfo
.auth
.icv_fullbits
/ 8);
860 err
= crypto_aead_setauthsize(
861 aead
, x
->aalg
->alg_trunc_len
/ 8);
866 param
->enckeylen
= cpu_to_be32((x
->ealg
->alg_key_len
+ 7) / 8);
867 memcpy(p
, x
->ealg
->alg_key
, (x
->ealg
->alg_key_len
+ 7) / 8);
869 err
= crypto_aead_setkey(aead
, key
, keylen
);
878 static int esp6_init_state(struct xfrm_state
*x
)
880 struct crypto_aead
*aead
;
890 err
= esp_init_aead(x
);
892 err
= esp_init_authenc(x
);
899 x
->props
.header_len
= sizeof(struct ip_esp_hdr
) +
900 crypto_aead_ivsize(aead
);
901 switch (x
->props
.mode
) {
903 if (x
->sel
.family
!= AF_INET6
)
904 x
->props
.header_len
+= IPV4_BEET_PHMAXLEN
+
905 (sizeof(struct ipv6hdr
) - sizeof(struct iphdr
));
908 case XFRM_MODE_TRANSPORT
:
910 case XFRM_MODE_TUNNEL
:
911 x
->props
.header_len
+= sizeof(struct ipv6hdr
);
915 align
= ALIGN(crypto_aead_blocksize(aead
), 4);
916 x
->props
.trailer_len
= align
+ 1 + crypto_aead_authsize(aead
);
922 static int esp6_rcv_cb(struct sk_buff
*skb
, int err
)
927 static const struct xfrm_type esp6_type
= {
928 .description
= "ESP6",
929 .owner
= THIS_MODULE
,
930 .proto
= IPPROTO_ESP
,
931 .flags
= XFRM_TYPE_REPLAY_PROT
,
932 .init_state
= esp6_init_state
,
933 .destructor
= esp6_destroy
,
934 .get_mtu
= esp6_get_mtu
,
936 .output
= esp6_output
,
937 .hdr_offset
= xfrm6_find_1stfragopt
,
940 static struct xfrm6_protocol esp6_protocol
= {
941 .handler
= xfrm6_rcv
,
942 .cb_handler
= esp6_rcv_cb
,
943 .err_handler
= esp6_err
,
947 static int __init
esp6_init(void)
949 if (xfrm_register_type(&esp6_type
, AF_INET6
) < 0) {
950 pr_info("%s: can't add xfrm type\n", __func__
);
953 if (xfrm6_protocol_register(&esp6_protocol
, IPPROTO_ESP
) < 0) {
954 pr_info("%s: can't add protocol\n", __func__
);
955 xfrm_unregister_type(&esp6_type
, AF_INET6
);
962 static void __exit
esp6_fini(void)
964 if (xfrm6_protocol_deregister(&esp6_protocol
, IPPROTO_ESP
) < 0)
965 pr_info("%s: can't remove protocol\n", __func__
);
966 if (xfrm_unregister_type(&esp6_type
, AF_INET6
) < 0)
967 pr_info("%s: can't remove xfrm type\n", __func__
);
970 module_init(esp6_init
);
971 module_exit(esp6_fini
);
973 MODULE_LICENSE("GPL");
974 MODULE_ALIAS_XFRM_TYPE(AF_INET6
, XFRM_PROTO_ESP
);