1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C)2002 USAGI/WIDE Project
7 * Mitsuru KANDA @USAGI : IPv6 Support
8 * Kazunori MIYAZAWA @USAGI :
9 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
11 * This file is derived from net/ipv4/esp.c
14 #define pr_fmt(fmt) "IPv6: " fmt
16 #include <crypto/aead.h>
17 #include <crypto/authenc.h>
18 #include <linux/err.h>
19 #include <linux/module.h>
23 #include <linux/scatterlist.h>
24 #include <linux/kernel.h>
25 #include <linux/pfkeyv2.h>
26 #include <linux/random.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <net/ip6_checksum.h>
30 #include <net/ip6_route.h>
33 #include <net/protocol.h>
35 #include <linux/icmpv6.h>
37 #include <net/espintcp.h>
38 #include <net/inet6_hashtables.h>
40 #include <linux/highmem.h>
43 struct xfrm_skb_cb xfrm
;
47 struct esp_output_extra
{
52 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
55 * Allocate an AEAD request structure with extra space for SG and IV.
57 * For alignment considerations the upper 32 bits of the sequence number are
58 * placed at the front, if present. Followed by the IV, the request and finally
61 * TODO: Use spare space in skb for this where possible.
63 static void *esp_alloc_tmp(struct crypto_aead
*aead
, int nfrags
, int seqihlen
)
69 len
+= crypto_aead_ivsize(aead
);
72 len
+= crypto_aead_alignmask(aead
) &
73 ~(crypto_tfm_ctx_alignment() - 1);
74 len
= ALIGN(len
, crypto_tfm_ctx_alignment());
77 len
+= sizeof(struct aead_request
) + crypto_aead_reqsize(aead
);
78 len
= ALIGN(len
, __alignof__(struct scatterlist
));
80 len
+= sizeof(struct scatterlist
) * nfrags
;
82 return kmalloc(len
, GFP_ATOMIC
);
85 static inline void *esp_tmp_extra(void *tmp
)
87 return PTR_ALIGN(tmp
, __alignof__(struct esp_output_extra
));
90 static inline u8
*esp_tmp_iv(struct crypto_aead
*aead
, void *tmp
, int seqhilen
)
92 return crypto_aead_ivsize(aead
) ?
93 PTR_ALIGN((u8
*)tmp
+ seqhilen
,
94 crypto_aead_alignmask(aead
) + 1) : tmp
+ seqhilen
;
97 static inline struct aead_request
*esp_tmp_req(struct crypto_aead
*aead
, u8
*iv
)
99 struct aead_request
*req
;
101 req
= (void *)PTR_ALIGN(iv
+ crypto_aead_ivsize(aead
),
102 crypto_tfm_ctx_alignment());
103 aead_request_set_tfm(req
, aead
);
107 static inline struct scatterlist
*esp_req_sg(struct crypto_aead
*aead
,
108 struct aead_request
*req
)
110 return (void *)ALIGN((unsigned long)(req
+ 1) +
111 crypto_aead_reqsize(aead
),
112 __alignof__(struct scatterlist
));
115 static void esp_ssg_unref(struct xfrm_state
*x
, void *tmp
)
117 struct esp_output_extra
*extra
= esp_tmp_extra(tmp
);
118 struct crypto_aead
*aead
= x
->data
;
121 struct aead_request
*req
;
122 struct scatterlist
*sg
;
124 if (x
->props
.flags
& XFRM_STATE_ESN
)
125 extralen
+= sizeof(*extra
);
127 iv
= esp_tmp_iv(aead
, tmp
, extralen
);
128 req
= esp_tmp_req(aead
, iv
);
130 /* Unref skb_frag_pages in the src scatterlist if necessary.
131 * Skip the first sg which comes from skb->data.
133 if (req
->src
!= req
->dst
)
134 for (sg
= sg_next(req
->src
); sg
; sg
= sg_next(sg
))
135 put_page(sg_page(sg
));
138 #ifdef CONFIG_INET6_ESPINTCP
144 static void esp_free_tcp_sk(struct rcu_head
*head
)
146 struct esp_tcp_sk
*esk
= container_of(head
, struct esp_tcp_sk
, rcu
);
152 static struct sock
*esp6_find_tcp_sk(struct xfrm_state
*x
)
154 struct xfrm_encap_tmpl
*encap
= x
->encap
;
155 struct esp_tcp_sk
*esk
;
160 sk
= rcu_dereference(x
->encap_sk
);
161 if (sk
&& sk
->sk_state
== TCP_ESTABLISHED
)
164 spin_lock_bh(&x
->lock
);
165 sport
= encap
->encap_sport
;
166 dport
= encap
->encap_dport
;
167 nsk
= rcu_dereference_protected(x
->encap_sk
,
168 lockdep_is_held(&x
->lock
));
169 if (sk
&& sk
== nsk
) {
170 esk
= kmalloc(sizeof(*esk
), GFP_ATOMIC
);
172 spin_unlock_bh(&x
->lock
);
173 return ERR_PTR(-ENOMEM
);
175 RCU_INIT_POINTER(x
->encap_sk
, NULL
);
177 call_rcu(&esk
->rcu
, esp_free_tcp_sk
);
179 spin_unlock_bh(&x
->lock
);
181 sk
= __inet6_lookup_established(xs_net(x
), &tcp_hashinfo
, &x
->id
.daddr
.in6
,
182 dport
, &x
->props
.saddr
.in6
, ntohs(sport
), 0, 0);
184 return ERR_PTR(-ENOENT
);
186 if (!tcp_is_ulp_esp(sk
)) {
188 return ERR_PTR(-EINVAL
);
191 spin_lock_bh(&x
->lock
);
192 nsk
= rcu_dereference_protected(x
->encap_sk
,
193 lockdep_is_held(&x
->lock
));
194 if (encap
->encap_sport
!= sport
||
195 encap
->encap_dport
!= dport
) {
197 sk
= nsk
?: ERR_PTR(-EREMCHG
);
198 } else if (sk
== nsk
) {
201 rcu_assign_pointer(x
->encap_sk
, sk
);
203 spin_unlock_bh(&x
->lock
);
208 static int esp_output_tcp_finish(struct xfrm_state
*x
, struct sk_buff
*skb
)
215 sk
= esp6_find_tcp_sk(x
);
216 err
= PTR_ERR_OR_ZERO(sk
);
221 if (sock_owned_by_user(sk
))
222 err
= espintcp_queue_out(sk
, skb
);
224 err
= espintcp_push_skb(sk
, skb
);
232 static int esp_output_tcp_encap_cb(struct net
*net
, struct sock
*sk
,
235 struct dst_entry
*dst
= skb_dst(skb
);
236 struct xfrm_state
*x
= dst
->xfrm
;
238 return esp_output_tcp_finish(x
, skb
);
241 static int esp_output_tail_tcp(struct xfrm_state
*x
, struct sk_buff
*skb
)
246 err
= xfrm_trans_queue_net(xs_net(x
), skb
, esp_output_tcp_encap_cb
);
249 /* EINPROGRESS just happens to do the right thing. It
250 * actually means that the skb has been consumed and
253 return err
?: -EINPROGRESS
;
256 static int esp_output_tail_tcp(struct xfrm_state
*x
, struct sk_buff
*skb
)
264 static void esp_output_encap_csum(struct sk_buff
*skb
)
266 /* UDP encap with IPv6 requires a valid checksum */
267 if (*skb_mac_header(skb
) == IPPROTO_UDP
) {
268 struct udphdr
*uh
= udp_hdr(skb
);
269 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
270 int len
= ntohs(uh
->len
);
271 unsigned int offset
= skb_transport_offset(skb
);
272 __wsum csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
274 uh
->check
= csum_ipv6_magic(&ip6h
->saddr
, &ip6h
->daddr
,
275 len
, IPPROTO_UDP
, csum
);
277 uh
->check
= CSUM_MANGLED_0
;
281 static void esp_output_done(struct crypto_async_request
*base
, int err
)
283 struct sk_buff
*skb
= base
->data
;
284 struct xfrm_offload
*xo
= xfrm_offload(skb
);
286 struct xfrm_state
*x
;
288 if (xo
&& (xo
->flags
& XFRM_DEV_RESUME
)) {
289 struct sec_path
*sp
= skb_sec_path(skb
);
291 x
= sp
->xvec
[sp
->len
- 1];
293 x
= skb_dst(skb
)->xfrm
;
296 tmp
= ESP_SKB_CB(skb
)->tmp
;
297 esp_ssg_unref(x
, tmp
);
300 esp_output_encap_csum(skb
);
302 if (xo
&& (xo
->flags
& XFRM_DEV_RESUME
)) {
304 XFRM_INC_STATS(xs_net(x
), LINUX_MIB_XFRMOUTSTATEPROTOERROR
);
309 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
311 xfrm_dev_resume(skb
);
314 x
->encap
&& x
->encap
->encap_type
== TCP_ENCAP_ESPINTCP
)
315 esp_output_tail_tcp(x
, skb
);
317 xfrm_output_resume(skb
, err
);
321 /* Move ESP header back into place. */
322 static void esp_restore_header(struct sk_buff
*skb
, unsigned int offset
)
324 struct ip_esp_hdr
*esph
= (void *)(skb
->data
+ offset
);
325 void *tmp
= ESP_SKB_CB(skb
)->tmp
;
326 __be32
*seqhi
= esp_tmp_extra(tmp
);
328 esph
->seq_no
= esph
->spi
;
332 static void esp_output_restore_header(struct sk_buff
*skb
)
334 void *tmp
= ESP_SKB_CB(skb
)->tmp
;
335 struct esp_output_extra
*extra
= esp_tmp_extra(tmp
);
337 esp_restore_header(skb
, skb_transport_offset(skb
) + extra
->esphoff
-
341 static struct ip_esp_hdr
*esp_output_set_esn(struct sk_buff
*skb
,
342 struct xfrm_state
*x
,
343 struct ip_esp_hdr
*esph
,
344 struct esp_output_extra
*extra
)
346 /* For ESN we move the header forward by 4 bytes to
347 * accomodate the high bits. We will move it back after
350 if ((x
->props
.flags
& XFRM_STATE_ESN
)) {
352 struct xfrm_offload
*xo
= xfrm_offload(skb
);
357 seqhi
= XFRM_SKB_CB(skb
)->seq
.output
.hi
;
359 extra
->esphoff
= (unsigned char *)esph
-
360 skb_transport_header(skb
);
361 esph
= (struct ip_esp_hdr
*)((unsigned char *)esph
- 4);
362 extra
->seqhi
= esph
->spi
;
363 esph
->seq_no
= htonl(seqhi
);
366 esph
->spi
= x
->id
.spi
;
371 static void esp_output_done_esn(struct crypto_async_request
*base
, int err
)
373 struct sk_buff
*skb
= base
->data
;
375 esp_output_restore_header(skb
);
376 esp_output_done(base
, err
);
379 static struct ip_esp_hdr
*esp6_output_udp_encap(struct sk_buff
*skb
,
381 struct esp_info
*esp
,
389 len
= skb
->len
+ esp
->tailen
- skb_transport_offset(skb
);
391 return ERR_PTR(-EMSGSIZE
);
393 uh
= (struct udphdr
*)esp
->esph
;
396 uh
->len
= htons(len
);
399 *skb_mac_header(skb
) = IPPROTO_UDP
;
401 if (encap_type
== UDP_ENCAP_ESPINUDP_NON_IKE
) {
402 udpdata32
= (__be32
*)(uh
+ 1);
403 udpdata32
[0] = udpdata32
[1] = 0;
404 return (struct ip_esp_hdr
*)(udpdata32
+ 2);
407 return (struct ip_esp_hdr
*)(uh
+ 1);
410 #ifdef CONFIG_INET6_ESPINTCP
411 static struct ip_esp_hdr
*esp6_output_tcp_encap(struct xfrm_state
*x
,
413 struct esp_info
*esp
)
415 __be16
*lenp
= (void *)esp
->esph
;
416 struct ip_esp_hdr
*esph
;
420 len
= skb
->len
+ esp
->tailen
- skb_transport_offset(skb
);
421 if (len
> IP_MAX_MTU
)
422 return ERR_PTR(-EMSGSIZE
);
425 sk
= esp6_find_tcp_sk(x
);
432 esph
= (struct ip_esp_hdr
*)(lenp
+ 1);
437 static struct ip_esp_hdr
*esp6_output_tcp_encap(struct xfrm_state
*x
,
439 struct esp_info
*esp
)
441 return ERR_PTR(-EOPNOTSUPP
);
445 static int esp6_output_encap(struct xfrm_state
*x
, struct sk_buff
*skb
,
446 struct esp_info
*esp
)
448 struct xfrm_encap_tmpl
*encap
= x
->encap
;
449 struct ip_esp_hdr
*esph
;
453 spin_lock_bh(&x
->lock
);
454 sport
= encap
->encap_sport
;
455 dport
= encap
->encap_dport
;
456 encap_type
= encap
->encap_type
;
457 spin_unlock_bh(&x
->lock
);
459 switch (encap_type
) {
461 case UDP_ENCAP_ESPINUDP
:
462 case UDP_ENCAP_ESPINUDP_NON_IKE
:
463 esph
= esp6_output_udp_encap(skb
, encap_type
, esp
, sport
, dport
);
465 case TCP_ENCAP_ESPINTCP
:
466 esph
= esp6_output_tcp_encap(x
, skb
, esp
);
471 return PTR_ERR(esph
);
478 int esp6_output_head(struct xfrm_state
*x
, struct sk_buff
*skb
, struct esp_info
*esp
)
485 struct sk_buff
*trailer
;
486 int tailen
= esp
->tailen
;
489 int err
= esp6_output_encap(x
, skb
, esp
);
495 if (!skb_cloned(skb
)) {
496 if (tailen
<= skb_tailroom(skb
)) {
499 tail
= skb_tail_pointer(trailer
);
502 } else if ((skb_shinfo(skb
)->nr_frags
< MAX_SKB_FRAGS
)
503 && !skb_has_frag_list(skb
)) {
505 struct sock
*sk
= skb
->sk
;
506 struct page_frag
*pfrag
= &x
->xfrag
;
508 esp
->inplace
= false;
510 allocsize
= ALIGN(tailen
, L1_CACHE_BYTES
);
512 spin_lock_bh(&x
->lock
);
514 if (unlikely(!skb_page_frag_refill(allocsize
, pfrag
, GFP_ATOMIC
))) {
515 spin_unlock_bh(&x
->lock
);
522 vaddr
= kmap_atomic(page
);
524 tail
= vaddr
+ pfrag
->offset
;
526 esp_output_fill_trailer(tail
, esp
->tfclen
, esp
->plen
, esp
->proto
);
528 kunmap_atomic(vaddr
);
530 nfrags
= skb_shinfo(skb
)->nr_frags
;
532 __skb_fill_page_desc(skb
, nfrags
, page
, pfrag
->offset
,
534 skb_shinfo(skb
)->nr_frags
= ++nfrags
;
536 pfrag
->offset
= pfrag
->offset
+ allocsize
;
538 spin_unlock_bh(&x
->lock
);
543 skb
->data_len
+= tailen
;
544 skb
->truesize
+= tailen
;
545 if (sk
&& sk_fullsock(sk
))
546 refcount_add(tailen
, &sk
->sk_wmem_alloc
);
553 esph_offset
= (unsigned char *)esp
->esph
- skb_transport_header(skb
);
555 nfrags
= skb_cow_data(skb
, tailen
, &trailer
);
558 tail
= skb_tail_pointer(trailer
);
559 esp
->esph
= (struct ip_esp_hdr
*)(skb_transport_header(skb
) + esph_offset
);
562 esp_output_fill_trailer(tail
, esp
->tfclen
, esp
->plen
, esp
->proto
);
563 pskb_put(skb
, trailer
, tailen
);
568 EXPORT_SYMBOL_GPL(esp6_output_head
);
570 int esp6_output_tail(struct xfrm_state
*x
, struct sk_buff
*skb
, struct esp_info
*esp
)
579 struct ip_esp_hdr
*esph
;
580 struct aead_request
*req
;
581 struct crypto_aead
*aead
;
582 struct scatterlist
*sg
, *dsg
;
583 struct esp_output_extra
*extra
;
586 assoclen
= sizeof(struct ip_esp_hdr
);
589 if (x
->props
.flags
& XFRM_STATE_ESN
) {
590 extralen
+= sizeof(*extra
);
591 assoclen
+= sizeof(__be32
);
595 alen
= crypto_aead_authsize(aead
);
596 ivlen
= crypto_aead_ivsize(aead
);
598 tmp
= esp_alloc_tmp(aead
, esp
->nfrags
+ 2, extralen
);
602 extra
= esp_tmp_extra(tmp
);
603 iv
= esp_tmp_iv(aead
, tmp
, extralen
);
604 req
= esp_tmp_req(aead
, iv
);
605 sg
= esp_req_sg(aead
, req
);
610 dsg
= &sg
[esp
->nfrags
];
612 esph
= esp_output_set_esn(skb
, x
, esp
->esph
, extra
);
615 sg_init_table(sg
, esp
->nfrags
);
616 err
= skb_to_sgvec(skb
, sg
,
617 (unsigned char *)esph
- skb
->data
,
618 assoclen
+ ivlen
+ esp
->clen
+ alen
);
619 if (unlikely(err
< 0))
624 struct page_frag
*pfrag
= &x
->xfrag
;
626 allocsize
= ALIGN(skb
->data_len
, L1_CACHE_BYTES
);
628 spin_lock_bh(&x
->lock
);
629 if (unlikely(!skb_page_frag_refill(allocsize
, pfrag
, GFP_ATOMIC
))) {
630 spin_unlock_bh(&x
->lock
);
634 skb_shinfo(skb
)->nr_frags
= 1;
638 /* replace page frags in skb with new page */
639 __skb_fill_page_desc(skb
, 0, page
, pfrag
->offset
, skb
->data_len
);
640 pfrag
->offset
= pfrag
->offset
+ allocsize
;
641 spin_unlock_bh(&x
->lock
);
643 sg_init_table(dsg
, skb_shinfo(skb
)->nr_frags
+ 1);
644 err
= skb_to_sgvec(skb
, dsg
,
645 (unsigned char *)esph
- skb
->data
,
646 assoclen
+ ivlen
+ esp
->clen
+ alen
);
647 if (unlikely(err
< 0))
651 if ((x
->props
.flags
& XFRM_STATE_ESN
))
652 aead_request_set_callback(req
, 0, esp_output_done_esn
, skb
);
654 aead_request_set_callback(req
, 0, esp_output_done
, skb
);
656 aead_request_set_crypt(req
, sg
, dsg
, ivlen
+ esp
->clen
, iv
);
657 aead_request_set_ad(req
, assoclen
);
659 memset(iv
, 0, ivlen
);
660 memcpy(iv
+ ivlen
- min(ivlen
, 8), (u8
*)&esp
->seqno
+ 8 - min(ivlen
, 8),
663 ESP_SKB_CB(skb
)->tmp
= tmp
;
664 err
= crypto_aead_encrypt(req
);
675 if ((x
->props
.flags
& XFRM_STATE_ESN
))
676 esp_output_restore_header(skb
);
677 esp_output_encap_csum(skb
);
681 esp_ssg_unref(x
, tmp
);
683 if (!err
&& x
->encap
&& x
->encap
->encap_type
== TCP_ENCAP_ESPINTCP
)
684 err
= esp_output_tail_tcp(x
, skb
);
691 EXPORT_SYMBOL_GPL(esp6_output_tail
);
693 static int esp6_output(struct xfrm_state
*x
, struct sk_buff
*skb
)
697 struct ip_esp_hdr
*esph
;
698 struct crypto_aead
*aead
;
703 esp
.proto
= *skb_mac_header(skb
);
704 *skb_mac_header(skb
) = IPPROTO_ESP
;
706 /* skb is pure payload to encrypt */
709 alen
= crypto_aead_authsize(aead
);
713 struct xfrm_dst
*dst
= (struct xfrm_dst
*)skb_dst(skb
);
716 padto
= min(x
->tfcpad
, xfrm_state_mtu(x
, dst
->child_mtu_cached
));
717 if (skb
->len
< padto
)
718 esp
.tfclen
= padto
- skb
->len
;
720 blksize
= ALIGN(crypto_aead_blocksize(aead
), 4);
721 esp
.clen
= ALIGN(skb
->len
+ 2 + esp
.tfclen
, blksize
);
722 esp
.plen
= esp
.clen
- skb
->len
- esp
.tfclen
;
723 esp
.tailen
= esp
.tfclen
+ esp
.plen
+ alen
;
725 esp
.esph
= ip_esp_hdr(skb
);
727 esp
.nfrags
= esp6_output_head(x
, skb
, &esp
);
732 esph
->spi
= x
->id
.spi
;
734 esph
->seq_no
= htonl(XFRM_SKB_CB(skb
)->seq
.output
.low
);
735 esp
.seqno
= cpu_to_be64(XFRM_SKB_CB(skb
)->seq
.output
.low
+
736 ((u64
)XFRM_SKB_CB(skb
)->seq
.output
.hi
<< 32));
738 skb_push(skb
, -skb_network_offset(skb
));
740 return esp6_output_tail(x
, skb
, &esp
);
743 static inline int esp_remove_trailer(struct sk_buff
*skb
)
745 struct xfrm_state
*x
= xfrm_input_state(skb
);
746 struct xfrm_offload
*xo
= xfrm_offload(skb
);
747 struct crypto_aead
*aead
= x
->data
;
748 int alen
, hlen
, elen
;
754 alen
= crypto_aead_authsize(aead
);
755 hlen
= sizeof(struct ip_esp_hdr
) + crypto_aead_ivsize(aead
);
756 elen
= skb
->len
- hlen
;
758 if (xo
&& (xo
->flags
& XFRM_ESP_NO_TRAILER
)) {
763 ret
= skb_copy_bits(skb
, skb
->len
- alen
- 2, nexthdr
, 2);
768 if (padlen
+ 2 + alen
>= elen
) {
769 net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
770 padlen
+ 2, elen
- alen
);
774 trimlen
= alen
+ padlen
+ 2;
775 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
776 csumdiff
= skb_checksum(skb
, skb
->len
- trimlen
, trimlen
, 0);
777 skb
->csum
= csum_block_sub(skb
->csum
, csumdiff
,
780 pskb_trim(skb
, skb
->len
- trimlen
);
788 int esp6_input_done2(struct sk_buff
*skb
, int err
)
790 struct xfrm_state
*x
= xfrm_input_state(skb
);
791 struct xfrm_offload
*xo
= xfrm_offload(skb
);
792 struct crypto_aead
*aead
= x
->data
;
793 int hlen
= sizeof(struct ip_esp_hdr
) + crypto_aead_ivsize(aead
);
794 int hdr_len
= skb_network_header_len(skb
);
796 if (!xo
|| (xo
&& !(xo
->flags
& CRYPTO_DONE
)))
797 kfree(ESP_SKB_CB(skb
)->tmp
);
802 err
= esp_remove_trailer(skb
);
803 if (unlikely(err
< 0))
807 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
808 int offset
= skb_network_offset(skb
) + sizeof(*ip6h
);
809 struct xfrm_encap_tmpl
*encap
= x
->encap
;
810 u8 nexthdr
= ip6h
->nexthdr
;
811 __be16 frag_off
, source
;
815 offset
= ipv6_skip_exthdr(skb
, offset
, &nexthdr
, &frag_off
);
816 uh
= (void *)(skb
->data
+ offset
);
817 th
= (void *)(skb
->data
+ offset
);
820 switch (x
->encap
->encap_type
) {
821 case TCP_ENCAP_ESPINTCP
:
824 case UDP_ENCAP_ESPINUDP
:
825 case UDP_ENCAP_ESPINUDP_NON_IKE
:
835 * 1) if the NAT-T peer's IP or port changed then
836 * advertize the change to the keying daemon.
837 * This is an inbound SA, so just compare
840 if (!ipv6_addr_equal(&ip6h
->saddr
, &x
->props
.saddr
.in6
) ||
841 source
!= encap
->encap_sport
) {
842 xfrm_address_t ipaddr
;
844 memcpy(&ipaddr
.a6
, &ip6h
->saddr
.s6_addr
, sizeof(ipaddr
.a6
));
845 km_new_mapping(x
, &ipaddr
, source
);
847 /* XXX: perhaps add an extra
848 * policy check here, to see
849 * if we should allow or
850 * reject a packet from a
857 * 2) ignore UDP/TCP checksums in case
858 * of NAT-T in Transport Mode, or
859 * perform other post-processing fixes
860 * as per draft-ietf-ipsec-udp-encaps-06,
863 if (x
->props
.mode
== XFRM_MODE_TRANSPORT
)
864 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
867 skb_postpull_rcsum(skb
, skb_network_header(skb
),
868 skb_network_header_len(skb
));
869 skb_pull_rcsum(skb
, hlen
);
870 if (x
->props
.mode
== XFRM_MODE_TUNNEL
)
871 skb_reset_transport_header(skb
);
873 skb_set_transport_header(skb
, -hdr_len
);
875 /* RFC4303: Drop dummy packets without any error */
876 if (err
== IPPROTO_NONE
)
882 EXPORT_SYMBOL_GPL(esp6_input_done2
);
884 static void esp_input_done(struct crypto_async_request
*base
, int err
)
886 struct sk_buff
*skb
= base
->data
;
888 xfrm_input_resume(skb
, esp6_input_done2(skb
, err
));
891 static void esp_input_restore_header(struct sk_buff
*skb
)
893 esp_restore_header(skb
, 0);
897 static void esp_input_set_header(struct sk_buff
*skb
, __be32
*seqhi
)
899 struct xfrm_state
*x
= xfrm_input_state(skb
);
901 /* For ESN we move the header forward by 4 bytes to
902 * accomodate the high bits. We will move it back after
905 if ((x
->props
.flags
& XFRM_STATE_ESN
)) {
906 struct ip_esp_hdr
*esph
= skb_push(skb
, 4);
909 esph
->spi
= esph
->seq_no
;
910 esph
->seq_no
= XFRM_SKB_CB(skb
)->seq
.input
.hi
;
914 static void esp_input_done_esn(struct crypto_async_request
*base
, int err
)
916 struct sk_buff
*skb
= base
->data
;
918 esp_input_restore_header(skb
);
919 esp_input_done(base
, err
);
922 static int esp6_input(struct xfrm_state
*x
, struct sk_buff
*skb
)
924 struct crypto_aead
*aead
= x
->data
;
925 struct aead_request
*req
;
926 struct sk_buff
*trailer
;
927 int ivlen
= crypto_aead_ivsize(aead
);
928 int elen
= skb
->len
- sizeof(struct ip_esp_hdr
) - ivlen
;
936 struct scatterlist
*sg
;
938 if (!pskb_may_pull(skb
, sizeof(struct ip_esp_hdr
) + ivlen
)) {
948 assoclen
= sizeof(struct ip_esp_hdr
);
951 if (x
->props
.flags
& XFRM_STATE_ESN
) {
952 seqhilen
+= sizeof(__be32
);
953 assoclen
+= seqhilen
;
956 if (!skb_cloned(skb
)) {
957 if (!skb_is_nonlinear(skb
)) {
961 } else if (!skb_has_frag_list(skb
)) {
962 nfrags
= skb_shinfo(skb
)->nr_frags
;
969 nfrags
= skb_cow_data(skb
, 0, &trailer
);
977 tmp
= esp_alloc_tmp(aead
, nfrags
, seqhilen
);
981 ESP_SKB_CB(skb
)->tmp
= tmp
;
982 seqhi
= esp_tmp_extra(tmp
);
983 iv
= esp_tmp_iv(aead
, tmp
, seqhilen
);
984 req
= esp_tmp_req(aead
, iv
);
985 sg
= esp_req_sg(aead
, req
);
987 esp_input_set_header(skb
, seqhi
);
989 sg_init_table(sg
, nfrags
);
990 ret
= skb_to_sgvec(skb
, sg
, 0, skb
->len
);
991 if (unlikely(ret
< 0)) {
996 skb
->ip_summed
= CHECKSUM_NONE
;
998 if ((x
->props
.flags
& XFRM_STATE_ESN
))
999 aead_request_set_callback(req
, 0, esp_input_done_esn
, skb
);
1001 aead_request_set_callback(req
, 0, esp_input_done
, skb
);
1003 aead_request_set_crypt(req
, sg
, sg
, elen
+ ivlen
, iv
);
1004 aead_request_set_ad(req
, assoclen
);
1006 ret
= crypto_aead_decrypt(req
);
1007 if (ret
== -EINPROGRESS
)
1010 if ((x
->props
.flags
& XFRM_STATE_ESN
))
1011 esp_input_restore_header(skb
);
1013 ret
= esp6_input_done2(skb
, ret
);
1019 static int esp6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
1020 u8 type
, u8 code
, int offset
, __be32 info
)
1022 struct net
*net
= dev_net(skb
->dev
);
1023 const struct ipv6hdr
*iph
= (const struct ipv6hdr
*)skb
->data
;
1024 struct ip_esp_hdr
*esph
= (struct ip_esp_hdr
*)(skb
->data
+ offset
);
1025 struct xfrm_state
*x
;
1027 if (type
!= ICMPV6_PKT_TOOBIG
&&
1028 type
!= NDISC_REDIRECT
)
1031 x
= xfrm_state_lookup(net
, skb
->mark
, (const xfrm_address_t
*)&iph
->daddr
,
1032 esph
->spi
, IPPROTO_ESP
, AF_INET6
);
1036 if (type
== NDISC_REDIRECT
)
1037 ip6_redirect(skb
, net
, skb
->dev
->ifindex
, 0,
1038 sock_net_uid(net
, NULL
));
1040 ip6_update_pmtu(skb
, net
, info
, 0, 0, sock_net_uid(net
, NULL
));
1046 static void esp6_destroy(struct xfrm_state
*x
)
1048 struct crypto_aead
*aead
= x
->data
;
1053 crypto_free_aead(aead
);
1056 static int esp_init_aead(struct xfrm_state
*x
)
1058 char aead_name
[CRYPTO_MAX_ALG_NAME
];
1059 struct crypto_aead
*aead
;
1062 err
= -ENAMETOOLONG
;
1063 if (snprintf(aead_name
, CRYPTO_MAX_ALG_NAME
, "%s(%s)",
1064 x
->geniv
, x
->aead
->alg_name
) >= CRYPTO_MAX_ALG_NAME
)
1067 aead
= crypto_alloc_aead(aead_name
, 0, 0);
1068 err
= PTR_ERR(aead
);
1074 err
= crypto_aead_setkey(aead
, x
->aead
->alg_key
,
1075 (x
->aead
->alg_key_len
+ 7) / 8);
1079 err
= crypto_aead_setauthsize(aead
, x
->aead
->alg_icv_len
/ 8);
1087 static int esp_init_authenc(struct xfrm_state
*x
)
1089 struct crypto_aead
*aead
;
1090 struct crypto_authenc_key_param
*param
;
1094 char authenc_name
[CRYPTO_MAX_ALG_NAME
];
1095 unsigned int keylen
;
1102 err
= -ENAMETOOLONG
;
1104 if ((x
->props
.flags
& XFRM_STATE_ESN
)) {
1105 if (snprintf(authenc_name
, CRYPTO_MAX_ALG_NAME
,
1106 "%s%sauthencesn(%s,%s)%s",
1107 x
->geniv
?: "", x
->geniv
? "(" : "",
1108 x
->aalg
? x
->aalg
->alg_name
: "digest_null",
1110 x
->geniv
? ")" : "") >= CRYPTO_MAX_ALG_NAME
)
1113 if (snprintf(authenc_name
, CRYPTO_MAX_ALG_NAME
,
1114 "%s%sauthenc(%s,%s)%s",
1115 x
->geniv
?: "", x
->geniv
? "(" : "",
1116 x
->aalg
? x
->aalg
->alg_name
: "digest_null",
1118 x
->geniv
? ")" : "") >= CRYPTO_MAX_ALG_NAME
)
1122 aead
= crypto_alloc_aead(authenc_name
, 0, 0);
1123 err
= PTR_ERR(aead
);
1129 keylen
= (x
->aalg
? (x
->aalg
->alg_key_len
+ 7) / 8 : 0) +
1130 (x
->ealg
->alg_key_len
+ 7) / 8 + RTA_SPACE(sizeof(*param
));
1132 key
= kmalloc(keylen
, GFP_KERNEL
);
1138 rta
->rta_type
= CRYPTO_AUTHENC_KEYA_PARAM
;
1139 rta
->rta_len
= RTA_LENGTH(sizeof(*param
));
1140 param
= RTA_DATA(rta
);
1141 p
+= RTA_SPACE(sizeof(*param
));
1144 struct xfrm_algo_desc
*aalg_desc
;
1146 memcpy(p
, x
->aalg
->alg_key
, (x
->aalg
->alg_key_len
+ 7) / 8);
1147 p
+= (x
->aalg
->alg_key_len
+ 7) / 8;
1149 aalg_desc
= xfrm_aalg_get_byname(x
->aalg
->alg_name
, 0);
1153 if (aalg_desc
->uinfo
.auth
.icv_fullbits
/ 8 !=
1154 crypto_aead_authsize(aead
)) {
1155 pr_info("ESP: %s digestsize %u != %hu\n",
1157 crypto_aead_authsize(aead
),
1158 aalg_desc
->uinfo
.auth
.icv_fullbits
/ 8);
1162 err
= crypto_aead_setauthsize(
1163 aead
, x
->aalg
->alg_trunc_len
/ 8);
1168 param
->enckeylen
= cpu_to_be32((x
->ealg
->alg_key_len
+ 7) / 8);
1169 memcpy(p
, x
->ealg
->alg_key
, (x
->ealg
->alg_key_len
+ 7) / 8);
1171 err
= crypto_aead_setkey(aead
, key
, keylen
);
1180 static int esp6_init_state(struct xfrm_state
*x
)
1182 struct crypto_aead
*aead
;
1189 err
= esp_init_aead(x
);
1191 err
= esp_init_authenc(x
);
1198 x
->props
.header_len
= sizeof(struct ip_esp_hdr
) +
1199 crypto_aead_ivsize(aead
);
1200 switch (x
->props
.mode
) {
1201 case XFRM_MODE_BEET
:
1202 if (x
->sel
.family
!= AF_INET6
)
1203 x
->props
.header_len
+= IPV4_BEET_PHMAXLEN
+
1204 (sizeof(struct ipv6hdr
) - sizeof(struct iphdr
));
1207 case XFRM_MODE_TRANSPORT
:
1209 case XFRM_MODE_TUNNEL
:
1210 x
->props
.header_len
+= sizeof(struct ipv6hdr
);
1215 struct xfrm_encap_tmpl
*encap
= x
->encap
;
1217 switch (encap
->encap_type
) {
1221 case UDP_ENCAP_ESPINUDP
:
1222 x
->props
.header_len
+= sizeof(struct udphdr
);
1224 case UDP_ENCAP_ESPINUDP_NON_IKE
:
1225 x
->props
.header_len
+= sizeof(struct udphdr
) + 2 * sizeof(u32
);
1227 #ifdef CONFIG_INET6_ESPINTCP
1228 case TCP_ENCAP_ESPINTCP
:
1229 /* only the length field, TCP encap is done by
1232 x
->props
.header_len
+= 2;
1238 align
= ALIGN(crypto_aead_blocksize(aead
), 4);
1239 x
->props
.trailer_len
= align
+ 1 + crypto_aead_authsize(aead
);
1245 static int esp6_rcv_cb(struct sk_buff
*skb
, int err
)
1250 static const struct xfrm_type esp6_type
= {
1251 .description
= "ESP6",
1252 .owner
= THIS_MODULE
,
1253 .proto
= IPPROTO_ESP
,
1254 .flags
= XFRM_TYPE_REPLAY_PROT
,
1255 .init_state
= esp6_init_state
,
1256 .destructor
= esp6_destroy
,
1257 .input
= esp6_input
,
1258 .output
= esp6_output
,
1259 .hdr_offset
= xfrm6_find_1stfragopt
,
1262 static struct xfrm6_protocol esp6_protocol
= {
1263 .handler
= xfrm6_rcv
,
1264 .input_handler
= xfrm_input
,
1265 .cb_handler
= esp6_rcv_cb
,
1266 .err_handler
= esp6_err
,
1270 static int __init
esp6_init(void)
1272 if (xfrm_register_type(&esp6_type
, AF_INET6
) < 0) {
1273 pr_info("%s: can't add xfrm type\n", __func__
);
1276 if (xfrm6_protocol_register(&esp6_protocol
, IPPROTO_ESP
) < 0) {
1277 pr_info("%s: can't add protocol\n", __func__
);
1278 xfrm_unregister_type(&esp6_type
, AF_INET6
);
1285 static void __exit
esp6_fini(void)
1287 if (xfrm6_protocol_deregister(&esp6_protocol
, IPPROTO_ESP
) < 0)
1288 pr_info("%s: can't remove protocol\n", __func__
);
1289 xfrm_unregister_type(&esp6_type
, AF_INET6
);
1292 module_init(esp6_init
);
1293 module_exit(esp6_fini
);
1295 MODULE_LICENSE("GPL");
1296 MODULE_ALIAS_XFRM_TYPE(AF_INET6
, XFRM_PROTO_ESP
);