1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C)2002 USAGI/WIDE Project
7 * Mitsuru KANDA @USAGI : IPv6 Support
8 * Kazunori MIYAZAWA @USAGI :
9 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
11 * This file is derived from net/ipv4/esp.c
14 #define pr_fmt(fmt) "IPv6: " fmt
16 #include <crypto/aead.h>
17 #include <crypto/authenc.h>
18 #include <linux/err.h>
19 #include <linux/module.h>
23 #include <linux/scatterlist.h>
24 #include <linux/kernel.h>
25 #include <linux/pfkeyv2.h>
26 #include <linux/random.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <net/ip6_checksum.h>
30 #include <net/ip6_route.h>
33 #include <net/protocol.h>
35 #include <linux/icmpv6.h>
37 #include <net/espintcp.h>
38 #include <net/inet6_hashtables.h>
39 #include <linux/skbuff_ref.h>
41 #include <linux/highmem.h>
44 struct xfrm_skb_cb xfrm
;
48 struct esp_output_extra
{
53 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
56 * Allocate an AEAD request structure with extra space for SG and IV.
58 * For alignment considerations the upper 32 bits of the sequence number are
59 * placed at the front, if present. Followed by the IV, the request and finally
62 * TODO: Use spare space in skb for this where possible.
64 static void *esp_alloc_tmp(struct crypto_aead
*aead
, int nfrags
, int seqihlen
)
70 len
+= crypto_aead_ivsize(aead
);
73 len
+= crypto_aead_alignmask(aead
) &
74 ~(crypto_tfm_ctx_alignment() - 1);
75 len
= ALIGN(len
, crypto_tfm_ctx_alignment());
78 len
+= sizeof(struct aead_request
) + crypto_aead_reqsize(aead
);
79 len
= ALIGN(len
, __alignof__(struct scatterlist
));
81 len
+= sizeof(struct scatterlist
) * nfrags
;
83 return kmalloc(len
, GFP_ATOMIC
);
86 static inline void *esp_tmp_extra(void *tmp
)
88 return PTR_ALIGN(tmp
, __alignof__(struct esp_output_extra
));
91 static inline u8
*esp_tmp_iv(struct crypto_aead
*aead
, void *tmp
, int seqhilen
)
93 return crypto_aead_ivsize(aead
) ?
94 PTR_ALIGN((u8
*)tmp
+ seqhilen
,
95 crypto_aead_alignmask(aead
) + 1) : tmp
+ seqhilen
;
98 static inline struct aead_request
*esp_tmp_req(struct crypto_aead
*aead
, u8
*iv
)
100 struct aead_request
*req
;
102 req
= (void *)PTR_ALIGN(iv
+ crypto_aead_ivsize(aead
),
103 crypto_tfm_ctx_alignment());
104 aead_request_set_tfm(req
, aead
);
108 static inline struct scatterlist
*esp_req_sg(struct crypto_aead
*aead
,
109 struct aead_request
*req
)
111 return (void *)ALIGN((unsigned long)(req
+ 1) +
112 crypto_aead_reqsize(aead
),
113 __alignof__(struct scatterlist
));
116 static void esp_ssg_unref(struct xfrm_state
*x
, void *tmp
, struct sk_buff
*skb
)
118 struct crypto_aead
*aead
= x
->data
;
121 struct aead_request
*req
;
122 struct scatterlist
*sg
;
124 if (x
->props
.flags
& XFRM_STATE_ESN
)
125 extralen
+= sizeof(struct esp_output_extra
);
127 iv
= esp_tmp_iv(aead
, tmp
, extralen
);
128 req
= esp_tmp_req(aead
, iv
);
130 /* Unref skb_frag_pages in the src scatterlist if necessary.
131 * Skip the first sg which comes from skb->data.
133 if (req
->src
!= req
->dst
)
134 for (sg
= sg_next(req
->src
); sg
; sg
= sg_next(sg
))
135 skb_page_unref(page_to_netmem(sg_page(sg
)),
139 #ifdef CONFIG_INET6_ESPINTCP
145 static void esp_free_tcp_sk(struct rcu_head
*head
)
147 struct esp_tcp_sk
*esk
= container_of(head
, struct esp_tcp_sk
, rcu
);
153 static struct sock
*esp6_find_tcp_sk(struct xfrm_state
*x
)
155 struct xfrm_encap_tmpl
*encap
= x
->encap
;
156 struct net
*net
= xs_net(x
);
157 struct esp_tcp_sk
*esk
;
162 sk
= rcu_dereference(x
->encap_sk
);
163 if (sk
&& sk
->sk_state
== TCP_ESTABLISHED
)
166 spin_lock_bh(&x
->lock
);
167 sport
= encap
->encap_sport
;
168 dport
= encap
->encap_dport
;
169 nsk
= rcu_dereference_protected(x
->encap_sk
,
170 lockdep_is_held(&x
->lock
));
171 if (sk
&& sk
== nsk
) {
172 esk
= kmalloc(sizeof(*esk
), GFP_ATOMIC
);
174 spin_unlock_bh(&x
->lock
);
175 return ERR_PTR(-ENOMEM
);
177 RCU_INIT_POINTER(x
->encap_sk
, NULL
);
179 call_rcu(&esk
->rcu
, esp_free_tcp_sk
);
181 spin_unlock_bh(&x
->lock
);
183 sk
= __inet6_lookup_established(net
, net
->ipv4
.tcp_death_row
.hashinfo
, &x
->id
.daddr
.in6
,
184 dport
, &x
->props
.saddr
.in6
, ntohs(sport
), 0, 0);
186 return ERR_PTR(-ENOENT
);
188 if (!tcp_is_ulp_esp(sk
)) {
190 return ERR_PTR(-EINVAL
);
193 spin_lock_bh(&x
->lock
);
194 nsk
= rcu_dereference_protected(x
->encap_sk
,
195 lockdep_is_held(&x
->lock
));
196 if (encap
->encap_sport
!= sport
||
197 encap
->encap_dport
!= dport
) {
199 sk
= nsk
?: ERR_PTR(-EREMCHG
);
200 } else if (sk
== nsk
) {
203 rcu_assign_pointer(x
->encap_sk
, sk
);
205 spin_unlock_bh(&x
->lock
);
210 static int esp_output_tcp_finish(struct xfrm_state
*x
, struct sk_buff
*skb
)
217 sk
= esp6_find_tcp_sk(x
);
218 err
= PTR_ERR_OR_ZERO(sk
);
223 if (sock_owned_by_user(sk
))
224 err
= espintcp_queue_out(sk
, skb
);
226 err
= espintcp_push_skb(sk
, skb
);
234 static int esp_output_tcp_encap_cb(struct net
*net
, struct sock
*sk
,
237 struct dst_entry
*dst
= skb_dst(skb
);
238 struct xfrm_state
*x
= dst
->xfrm
;
240 return esp_output_tcp_finish(x
, skb
);
243 static int esp_output_tail_tcp(struct xfrm_state
*x
, struct sk_buff
*skb
)
248 err
= xfrm_trans_queue_net(xs_net(x
), skb
, esp_output_tcp_encap_cb
);
251 /* EINPROGRESS just happens to do the right thing. It
252 * actually means that the skb has been consumed and
255 return err
?: -EINPROGRESS
;
258 static int esp_output_tail_tcp(struct xfrm_state
*x
, struct sk_buff
*skb
)
265 static void esp_output_encap_csum(struct sk_buff
*skb
)
267 /* UDP encap with IPv6 requires a valid checksum */
268 if (*skb_mac_header(skb
) == IPPROTO_UDP
) {
269 struct udphdr
*uh
= udp_hdr(skb
);
270 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
271 int len
= ntohs(uh
->len
);
272 unsigned int offset
= skb_transport_offset(skb
);
273 __wsum csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
275 uh
->check
= csum_ipv6_magic(&ip6h
->saddr
, &ip6h
->daddr
,
276 len
, IPPROTO_UDP
, csum
);
278 uh
->check
= CSUM_MANGLED_0
;
282 static void esp_output_done(void *data
, int err
)
284 struct sk_buff
*skb
= data
;
285 struct xfrm_offload
*xo
= xfrm_offload(skb
);
287 struct xfrm_state
*x
;
289 if (xo
&& (xo
->flags
& XFRM_DEV_RESUME
)) {
290 struct sec_path
*sp
= skb_sec_path(skb
);
292 x
= sp
->xvec
[sp
->len
- 1];
294 x
= skb_dst(skb
)->xfrm
;
297 tmp
= ESP_SKB_CB(skb
)->tmp
;
298 esp_ssg_unref(x
, tmp
, skb
);
301 esp_output_encap_csum(skb
);
303 if (xo
&& (xo
->flags
& XFRM_DEV_RESUME
)) {
305 XFRM_INC_STATS(xs_net(x
), LINUX_MIB_XFRMOUTSTATEPROTOERROR
);
310 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
312 xfrm_dev_resume(skb
);
315 x
->encap
&& x
->encap
->encap_type
== TCP_ENCAP_ESPINTCP
)
316 esp_output_tail_tcp(x
, skb
);
318 xfrm_output_resume(skb
->sk
, skb
, err
);
322 /* Move ESP header back into place. */
323 static void esp_restore_header(struct sk_buff
*skb
, unsigned int offset
)
325 struct ip_esp_hdr
*esph
= (void *)(skb
->data
+ offset
);
326 void *tmp
= ESP_SKB_CB(skb
)->tmp
;
327 __be32
*seqhi
= esp_tmp_extra(tmp
);
329 esph
->seq_no
= esph
->spi
;
333 static void esp_output_restore_header(struct sk_buff
*skb
)
335 void *tmp
= ESP_SKB_CB(skb
)->tmp
;
336 struct esp_output_extra
*extra
= esp_tmp_extra(tmp
);
338 esp_restore_header(skb
, skb_transport_offset(skb
) + extra
->esphoff
-
342 static struct ip_esp_hdr
*esp_output_set_esn(struct sk_buff
*skb
,
343 struct xfrm_state
*x
,
344 struct ip_esp_hdr
*esph
,
345 struct esp_output_extra
*extra
)
347 /* For ESN we move the header forward by 4 bytes to
348 * accommodate the high bits. We will move it back after
351 if ((x
->props
.flags
& XFRM_STATE_ESN
)) {
353 struct xfrm_offload
*xo
= xfrm_offload(skb
);
358 seqhi
= XFRM_SKB_CB(skb
)->seq
.output
.hi
;
360 extra
->esphoff
= (unsigned char *)esph
-
361 skb_transport_header(skb
);
362 esph
= (struct ip_esp_hdr
*)((unsigned char *)esph
- 4);
363 extra
->seqhi
= esph
->spi
;
364 esph
->seq_no
= htonl(seqhi
);
367 esph
->spi
= x
->id
.spi
;
372 static void esp_output_done_esn(void *data
, int err
)
374 struct sk_buff
*skb
= data
;
376 esp_output_restore_header(skb
);
377 esp_output_done(data
, err
);
380 static struct ip_esp_hdr
*esp6_output_udp_encap(struct sk_buff
*skb
,
382 struct esp_info
*esp
,
389 len
= skb
->len
+ esp
->tailen
- skb_transport_offset(skb
);
391 return ERR_PTR(-EMSGSIZE
);
393 uh
= (struct udphdr
*)esp
->esph
;
396 uh
->len
= htons(len
);
399 *skb_mac_header(skb
) = IPPROTO_UDP
;
401 return (struct ip_esp_hdr
*)(uh
+ 1);
404 #ifdef CONFIG_INET6_ESPINTCP
405 static struct ip_esp_hdr
*esp6_output_tcp_encap(struct xfrm_state
*x
,
407 struct esp_info
*esp
)
409 __be16
*lenp
= (void *)esp
->esph
;
410 struct ip_esp_hdr
*esph
;
414 len
= skb
->len
+ esp
->tailen
- skb_transport_offset(skb
);
415 if (len
> IP_MAX_MTU
)
416 return ERR_PTR(-EMSGSIZE
);
419 sk
= esp6_find_tcp_sk(x
);
426 esph
= (struct ip_esp_hdr
*)(lenp
+ 1);
431 static struct ip_esp_hdr
*esp6_output_tcp_encap(struct xfrm_state
*x
,
433 struct esp_info
*esp
)
435 return ERR_PTR(-EOPNOTSUPP
);
439 static int esp6_output_encap(struct xfrm_state
*x
, struct sk_buff
*skb
,
440 struct esp_info
*esp
)
442 struct xfrm_encap_tmpl
*encap
= x
->encap
;
443 struct ip_esp_hdr
*esph
;
447 spin_lock_bh(&x
->lock
);
448 sport
= encap
->encap_sport
;
449 dport
= encap
->encap_dport
;
450 encap_type
= encap
->encap_type
;
451 spin_unlock_bh(&x
->lock
);
453 switch (encap_type
) {
455 case UDP_ENCAP_ESPINUDP
:
456 esph
= esp6_output_udp_encap(skb
, encap_type
, esp
, sport
, dport
);
458 case TCP_ENCAP_ESPINTCP
:
459 esph
= esp6_output_tcp_encap(x
, skb
, esp
);
464 return PTR_ERR(esph
);
471 int esp6_output_head(struct xfrm_state
*x
, struct sk_buff
*skb
, struct esp_info
*esp
)
477 struct sk_buff
*trailer
;
478 int tailen
= esp
->tailen
;
481 int err
= esp6_output_encap(x
, skb
, esp
);
487 if (ALIGN(tailen
, L1_CACHE_BYTES
) > PAGE_SIZE
||
488 ALIGN(skb
->data_len
, L1_CACHE_BYTES
) > PAGE_SIZE
)
491 if (!skb_cloned(skb
)) {
492 if (tailen
<= skb_tailroom(skb
)) {
495 tail
= skb_tail_pointer(trailer
);
498 } else if ((skb_shinfo(skb
)->nr_frags
< MAX_SKB_FRAGS
)
499 && !skb_has_frag_list(skb
)) {
501 struct sock
*sk
= skb
->sk
;
502 struct page_frag
*pfrag
= &x
->xfrag
;
504 esp
->inplace
= false;
506 allocsize
= ALIGN(tailen
, L1_CACHE_BYTES
);
508 spin_lock_bh(&x
->lock
);
510 if (unlikely(!skb_page_frag_refill(allocsize
, pfrag
, GFP_ATOMIC
))) {
511 spin_unlock_bh(&x
->lock
);
518 tail
= page_address(page
) + pfrag
->offset
;
520 esp_output_fill_trailer(tail
, esp
->tfclen
, esp
->plen
, esp
->proto
);
522 nfrags
= skb_shinfo(skb
)->nr_frags
;
524 __skb_fill_page_desc(skb
, nfrags
, page
, pfrag
->offset
,
526 skb_shinfo(skb
)->nr_frags
= ++nfrags
;
528 pfrag
->offset
= pfrag
->offset
+ allocsize
;
530 spin_unlock_bh(&x
->lock
);
535 skb
->data_len
+= tailen
;
536 skb
->truesize
+= tailen
;
537 if (sk
&& sk_fullsock(sk
))
538 refcount_add(tailen
, &sk
->sk_wmem_alloc
);
545 esph_offset
= (unsigned char *)esp
->esph
- skb_transport_header(skb
);
547 nfrags
= skb_cow_data(skb
, tailen
, &trailer
);
550 tail
= skb_tail_pointer(trailer
);
551 esp
->esph
= (struct ip_esp_hdr
*)(skb_transport_header(skb
) + esph_offset
);
554 esp_output_fill_trailer(tail
, esp
->tfclen
, esp
->plen
, esp
->proto
);
555 pskb_put(skb
, trailer
, tailen
);
560 EXPORT_SYMBOL_GPL(esp6_output_head
);
562 int esp6_output_tail(struct xfrm_state
*x
, struct sk_buff
*skb
, struct esp_info
*esp
)
571 struct ip_esp_hdr
*esph
;
572 struct aead_request
*req
;
573 struct crypto_aead
*aead
;
574 struct scatterlist
*sg
, *dsg
;
575 struct esp_output_extra
*extra
;
578 assoclen
= sizeof(struct ip_esp_hdr
);
581 if (x
->props
.flags
& XFRM_STATE_ESN
) {
582 extralen
+= sizeof(*extra
);
583 assoclen
+= sizeof(__be32
);
587 alen
= crypto_aead_authsize(aead
);
588 ivlen
= crypto_aead_ivsize(aead
);
590 tmp
= esp_alloc_tmp(aead
, esp
->nfrags
+ 2, extralen
);
594 extra
= esp_tmp_extra(tmp
);
595 iv
= esp_tmp_iv(aead
, tmp
, extralen
);
596 req
= esp_tmp_req(aead
, iv
);
597 sg
= esp_req_sg(aead
, req
);
602 dsg
= &sg
[esp
->nfrags
];
604 esph
= esp_output_set_esn(skb
, x
, esp
->esph
, extra
);
607 sg_init_table(sg
, esp
->nfrags
);
608 err
= skb_to_sgvec(skb
, sg
,
609 (unsigned char *)esph
- skb
->data
,
610 assoclen
+ ivlen
+ esp
->clen
+ alen
);
611 if (unlikely(err
< 0))
616 struct page_frag
*pfrag
= &x
->xfrag
;
618 allocsize
= ALIGN(skb
->data_len
, L1_CACHE_BYTES
);
620 spin_lock_bh(&x
->lock
);
621 if (unlikely(!skb_page_frag_refill(allocsize
, pfrag
, GFP_ATOMIC
))) {
622 spin_unlock_bh(&x
->lock
);
626 skb_shinfo(skb
)->nr_frags
= 1;
630 /* replace page frags in skb with new page */
631 __skb_fill_page_desc(skb
, 0, page
, pfrag
->offset
, skb
->data_len
);
632 pfrag
->offset
= pfrag
->offset
+ allocsize
;
633 spin_unlock_bh(&x
->lock
);
635 sg_init_table(dsg
, skb_shinfo(skb
)->nr_frags
+ 1);
636 err
= skb_to_sgvec(skb
, dsg
,
637 (unsigned char *)esph
- skb
->data
,
638 assoclen
+ ivlen
+ esp
->clen
+ alen
);
639 if (unlikely(err
< 0))
643 if ((x
->props
.flags
& XFRM_STATE_ESN
))
644 aead_request_set_callback(req
, 0, esp_output_done_esn
, skb
);
646 aead_request_set_callback(req
, 0, esp_output_done
, skb
);
648 aead_request_set_crypt(req
, sg
, dsg
, ivlen
+ esp
->clen
, iv
);
649 aead_request_set_ad(req
, assoclen
);
651 memset(iv
, 0, ivlen
);
652 memcpy(iv
+ ivlen
- min(ivlen
, 8), (u8
*)&esp
->seqno
+ 8 - min(ivlen
, 8),
655 ESP_SKB_CB(skb
)->tmp
= tmp
;
656 err
= crypto_aead_encrypt(req
);
667 if ((x
->props
.flags
& XFRM_STATE_ESN
))
668 esp_output_restore_header(skb
);
669 esp_output_encap_csum(skb
);
673 esp_ssg_unref(x
, tmp
, skb
);
675 if (!err
&& x
->encap
&& x
->encap
->encap_type
== TCP_ENCAP_ESPINTCP
)
676 err
= esp_output_tail_tcp(x
, skb
);
683 EXPORT_SYMBOL_GPL(esp6_output_tail
);
685 static int esp6_output(struct xfrm_state
*x
, struct sk_buff
*skb
)
689 struct ip_esp_hdr
*esph
;
690 struct crypto_aead
*aead
;
695 esp
.proto
= *skb_mac_header(skb
);
696 *skb_mac_header(skb
) = IPPROTO_ESP
;
698 /* skb is pure payload to encrypt */
701 alen
= crypto_aead_authsize(aead
);
705 struct xfrm_dst
*dst
= (struct xfrm_dst
*)skb_dst(skb
);
708 padto
= min(x
->tfcpad
, xfrm_state_mtu(x
, dst
->child_mtu_cached
));
709 if (skb
->len
< padto
)
710 esp
.tfclen
= padto
- skb
->len
;
712 blksize
= ALIGN(crypto_aead_blocksize(aead
), 4);
713 esp
.clen
= ALIGN(skb
->len
+ 2 + esp
.tfclen
, blksize
);
714 esp
.plen
= esp
.clen
- skb
->len
- esp
.tfclen
;
715 esp
.tailen
= esp
.tfclen
+ esp
.plen
+ alen
;
717 esp
.esph
= ip_esp_hdr(skb
);
719 esp
.nfrags
= esp6_output_head(x
, skb
, &esp
);
724 esph
->spi
= x
->id
.spi
;
726 esph
->seq_no
= htonl(XFRM_SKB_CB(skb
)->seq
.output
.low
);
727 esp
.seqno
= cpu_to_be64(XFRM_SKB_CB(skb
)->seq
.output
.low
+
728 ((u64
)XFRM_SKB_CB(skb
)->seq
.output
.hi
<< 32));
730 skb_push(skb
, -skb_network_offset(skb
));
732 return esp6_output_tail(x
, skb
, &esp
);
735 static inline int esp_remove_trailer(struct sk_buff
*skb
)
737 struct xfrm_state
*x
= xfrm_input_state(skb
);
738 struct crypto_aead
*aead
= x
->data
;
739 int alen
, hlen
, elen
;
745 alen
= crypto_aead_authsize(aead
);
746 hlen
= sizeof(struct ip_esp_hdr
) + crypto_aead_ivsize(aead
);
747 elen
= skb
->len
- hlen
;
749 ret
= skb_copy_bits(skb
, skb
->len
- alen
- 2, nexthdr
, 2);
754 if (padlen
+ 2 + alen
>= elen
) {
755 net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
756 padlen
+ 2, elen
- alen
);
760 trimlen
= alen
+ padlen
+ 2;
761 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
762 csumdiff
= skb_checksum(skb
, skb
->len
- trimlen
, trimlen
, 0);
763 skb
->csum
= csum_block_sub(skb
->csum
, csumdiff
,
766 ret
= pskb_trim(skb
, skb
->len
- trimlen
);
776 int esp6_input_done2(struct sk_buff
*skb
, int err
)
778 struct xfrm_state
*x
= xfrm_input_state(skb
);
779 struct xfrm_offload
*xo
= xfrm_offload(skb
);
780 struct crypto_aead
*aead
= x
->data
;
781 int hlen
= sizeof(struct ip_esp_hdr
) + crypto_aead_ivsize(aead
);
782 int hdr_len
= skb_network_header_len(skb
);
784 if (!xo
|| !(xo
->flags
& CRYPTO_DONE
))
785 kfree(ESP_SKB_CB(skb
)->tmp
);
790 err
= esp_remove_trailer(skb
);
791 if (unlikely(err
< 0))
795 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
796 int offset
= skb_network_offset(skb
) + sizeof(*ip6h
);
797 struct xfrm_encap_tmpl
*encap
= x
->encap
;
798 u8 nexthdr
= ip6h
->nexthdr
;
799 __be16 frag_off
, source
;
803 offset
= ipv6_skip_exthdr(skb
, offset
, &nexthdr
, &frag_off
);
809 uh
= (void *)(skb
->data
+ offset
);
810 th
= (void *)(skb
->data
+ offset
);
813 switch (x
->encap
->encap_type
) {
814 case TCP_ENCAP_ESPINTCP
:
817 case UDP_ENCAP_ESPINUDP
:
827 * 1) if the NAT-T peer's IP or port changed then
828 * advertise the change to the keying daemon.
829 * This is an inbound SA, so just compare
832 if (!ipv6_addr_equal(&ip6h
->saddr
, &x
->props
.saddr
.in6
) ||
833 source
!= encap
->encap_sport
) {
834 xfrm_address_t ipaddr
;
836 memcpy(&ipaddr
.a6
, &ip6h
->saddr
.s6_addr
, sizeof(ipaddr
.a6
));
837 km_new_mapping(x
, &ipaddr
, source
);
839 /* XXX: perhaps add an extra
840 * policy check here, to see
841 * if we should allow or
842 * reject a packet from a
849 * 2) ignore UDP/TCP checksums in case
850 * of NAT-T in Transport Mode, or
851 * perform other post-processing fixes
852 * as per draft-ietf-ipsec-udp-encaps-06,
855 if (x
->props
.mode
== XFRM_MODE_TRANSPORT
)
856 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
859 skb_postpull_rcsum(skb
, skb_network_header(skb
),
860 skb_network_header_len(skb
));
861 skb_pull_rcsum(skb
, hlen
);
862 if (x
->props
.mode
== XFRM_MODE_TUNNEL
)
863 skb_reset_transport_header(skb
);
865 skb_set_transport_header(skb
, -hdr_len
);
867 /* RFC4303: Drop dummy packets without any error */
868 if (err
== IPPROTO_NONE
)
874 EXPORT_SYMBOL_GPL(esp6_input_done2
);
876 static void esp_input_done(void *data
, int err
)
878 struct sk_buff
*skb
= data
;
880 xfrm_input_resume(skb
, esp6_input_done2(skb
, err
));
883 static void esp_input_restore_header(struct sk_buff
*skb
)
885 esp_restore_header(skb
, 0);
889 static void esp_input_set_header(struct sk_buff
*skb
, __be32
*seqhi
)
891 struct xfrm_state
*x
= xfrm_input_state(skb
);
893 /* For ESN we move the header forward by 4 bytes to
894 * accommodate the high bits. We will move it back after
897 if ((x
->props
.flags
& XFRM_STATE_ESN
)) {
898 struct ip_esp_hdr
*esph
= skb_push(skb
, 4);
901 esph
->spi
= esph
->seq_no
;
902 esph
->seq_no
= XFRM_SKB_CB(skb
)->seq
.input
.hi
;
906 static void esp_input_done_esn(void *data
, int err
)
908 struct sk_buff
*skb
= data
;
910 esp_input_restore_header(skb
);
911 esp_input_done(data
, err
);
914 static int esp6_input(struct xfrm_state
*x
, struct sk_buff
*skb
)
916 struct crypto_aead
*aead
= x
->data
;
917 struct aead_request
*req
;
918 struct sk_buff
*trailer
;
919 int ivlen
= crypto_aead_ivsize(aead
);
920 int elen
= skb
->len
- sizeof(struct ip_esp_hdr
) - ivlen
;
928 struct scatterlist
*sg
;
930 if (!pskb_may_pull(skb
, sizeof(struct ip_esp_hdr
) + ivlen
)) {
940 assoclen
= sizeof(struct ip_esp_hdr
);
943 if (x
->props
.flags
& XFRM_STATE_ESN
) {
944 seqhilen
+= sizeof(__be32
);
945 assoclen
+= seqhilen
;
948 if (!skb_cloned(skb
)) {
949 if (!skb_is_nonlinear(skb
)) {
953 } else if (!skb_has_frag_list(skb
)) {
954 nfrags
= skb_shinfo(skb
)->nr_frags
;
961 nfrags
= skb_cow_data(skb
, 0, &trailer
);
969 tmp
= esp_alloc_tmp(aead
, nfrags
, seqhilen
);
973 ESP_SKB_CB(skb
)->tmp
= tmp
;
974 seqhi
= esp_tmp_extra(tmp
);
975 iv
= esp_tmp_iv(aead
, tmp
, seqhilen
);
976 req
= esp_tmp_req(aead
, iv
);
977 sg
= esp_req_sg(aead
, req
);
979 esp_input_set_header(skb
, seqhi
);
981 sg_init_table(sg
, nfrags
);
982 ret
= skb_to_sgvec(skb
, sg
, 0, skb
->len
);
983 if (unlikely(ret
< 0)) {
988 skb
->ip_summed
= CHECKSUM_NONE
;
990 if ((x
->props
.flags
& XFRM_STATE_ESN
))
991 aead_request_set_callback(req
, 0, esp_input_done_esn
, skb
);
993 aead_request_set_callback(req
, 0, esp_input_done
, skb
);
995 aead_request_set_crypt(req
, sg
, sg
, elen
+ ivlen
, iv
);
996 aead_request_set_ad(req
, assoclen
);
998 ret
= crypto_aead_decrypt(req
);
999 if (ret
== -EINPROGRESS
)
1002 if ((x
->props
.flags
& XFRM_STATE_ESN
))
1003 esp_input_restore_header(skb
);
1005 ret
= esp6_input_done2(skb
, ret
);
1011 static int esp6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
1012 u8 type
, u8 code
, int offset
, __be32 info
)
1014 struct net
*net
= dev_net(skb
->dev
);
1015 const struct ipv6hdr
*iph
= (const struct ipv6hdr
*)skb
->data
;
1016 struct ip_esp_hdr
*esph
= (struct ip_esp_hdr
*)(skb
->data
+ offset
);
1017 struct xfrm_state
*x
;
1019 if (type
!= ICMPV6_PKT_TOOBIG
&&
1020 type
!= NDISC_REDIRECT
)
1023 x
= xfrm_state_lookup(net
, skb
->mark
, (const xfrm_address_t
*)&iph
->daddr
,
1024 esph
->spi
, IPPROTO_ESP
, AF_INET6
);
1028 if (type
== NDISC_REDIRECT
)
1029 ip6_redirect(skb
, net
, skb
->dev
->ifindex
, 0,
1030 sock_net_uid(net
, NULL
));
1032 ip6_update_pmtu(skb
, net
, info
, 0, 0, sock_net_uid(net
, NULL
));
1038 static void esp6_destroy(struct xfrm_state
*x
)
1040 struct crypto_aead
*aead
= x
->data
;
1045 crypto_free_aead(aead
);
1048 static int esp_init_aead(struct xfrm_state
*x
, struct netlink_ext_ack
*extack
)
1050 char aead_name
[CRYPTO_MAX_ALG_NAME
];
1051 struct crypto_aead
*aead
;
1054 if (snprintf(aead_name
, CRYPTO_MAX_ALG_NAME
, "%s(%s)",
1055 x
->geniv
, x
->aead
->alg_name
) >= CRYPTO_MAX_ALG_NAME
) {
1056 NL_SET_ERR_MSG(extack
, "Algorithm name is too long");
1057 return -ENAMETOOLONG
;
1060 aead
= crypto_alloc_aead(aead_name
, 0, 0);
1061 err
= PTR_ERR(aead
);
1067 err
= crypto_aead_setkey(aead
, x
->aead
->alg_key
,
1068 (x
->aead
->alg_key_len
+ 7) / 8);
1072 err
= crypto_aead_setauthsize(aead
, x
->aead
->alg_icv_len
/ 8);
1079 NL_SET_ERR_MSG(extack
, "Kernel was unable to initialize cryptographic operations");
1083 static int esp_init_authenc(struct xfrm_state
*x
,
1084 struct netlink_ext_ack
*extack
)
1086 struct crypto_aead
*aead
;
1087 struct crypto_authenc_key_param
*param
;
1091 char authenc_name
[CRYPTO_MAX_ALG_NAME
];
1092 unsigned int keylen
;
1095 err
= -ENAMETOOLONG
;
1097 if ((x
->props
.flags
& XFRM_STATE_ESN
)) {
1098 if (snprintf(authenc_name
, CRYPTO_MAX_ALG_NAME
,
1099 "%s%sauthencesn(%s,%s)%s",
1100 x
->geniv
?: "", x
->geniv
? "(" : "",
1101 x
->aalg
? x
->aalg
->alg_name
: "digest_null",
1103 x
->geniv
? ")" : "") >= CRYPTO_MAX_ALG_NAME
) {
1104 NL_SET_ERR_MSG(extack
, "Algorithm name is too long");
1108 if (snprintf(authenc_name
, CRYPTO_MAX_ALG_NAME
,
1109 "%s%sauthenc(%s,%s)%s",
1110 x
->geniv
?: "", x
->geniv
? "(" : "",
1111 x
->aalg
? x
->aalg
->alg_name
: "digest_null",
1113 x
->geniv
? ")" : "") >= CRYPTO_MAX_ALG_NAME
) {
1114 NL_SET_ERR_MSG(extack
, "Algorithm name is too long");
1119 aead
= crypto_alloc_aead(authenc_name
, 0, 0);
1120 err
= PTR_ERR(aead
);
1122 NL_SET_ERR_MSG(extack
, "Kernel was unable to initialize cryptographic operations");
1128 keylen
= (x
->aalg
? (x
->aalg
->alg_key_len
+ 7) / 8 : 0) +
1129 (x
->ealg
->alg_key_len
+ 7) / 8 + RTA_SPACE(sizeof(*param
));
1131 key
= kmalloc(keylen
, GFP_KERNEL
);
1137 rta
->rta_type
= CRYPTO_AUTHENC_KEYA_PARAM
;
1138 rta
->rta_len
= RTA_LENGTH(sizeof(*param
));
1139 param
= RTA_DATA(rta
);
1140 p
+= RTA_SPACE(sizeof(*param
));
1143 struct xfrm_algo_desc
*aalg_desc
;
1145 memcpy(p
, x
->aalg
->alg_key
, (x
->aalg
->alg_key_len
+ 7) / 8);
1146 p
+= (x
->aalg
->alg_key_len
+ 7) / 8;
1148 aalg_desc
= xfrm_aalg_get_byname(x
->aalg
->alg_name
, 0);
1152 if (aalg_desc
->uinfo
.auth
.icv_fullbits
/ 8 !=
1153 crypto_aead_authsize(aead
)) {
1154 NL_SET_ERR_MSG(extack
, "Kernel was unable to initialize cryptographic operations");
1158 err
= crypto_aead_setauthsize(
1159 aead
, x
->aalg
->alg_trunc_len
/ 8);
1161 NL_SET_ERR_MSG(extack
, "Kernel was unable to initialize cryptographic operations");
1166 param
->enckeylen
= cpu_to_be32((x
->ealg
->alg_key_len
+ 7) / 8);
1167 memcpy(p
, x
->ealg
->alg_key
, (x
->ealg
->alg_key_len
+ 7) / 8);
1169 err
= crypto_aead_setkey(aead
, key
, keylen
);
1178 static int esp6_init_state(struct xfrm_state
*x
, struct netlink_ext_ack
*extack
)
1180 struct crypto_aead
*aead
;
1187 err
= esp_init_aead(x
, extack
);
1188 } else if (x
->ealg
) {
1189 err
= esp_init_authenc(x
, extack
);
1191 NL_SET_ERR_MSG(extack
, "ESP: AEAD or CRYPT must be provided");
1200 x
->props
.header_len
= sizeof(struct ip_esp_hdr
) +
1201 crypto_aead_ivsize(aead
);
1202 switch (x
->props
.mode
) {
1203 case XFRM_MODE_BEET
:
1204 if (x
->sel
.family
!= AF_INET6
)
1205 x
->props
.header_len
+= IPV4_BEET_PHMAXLEN
+
1206 (sizeof(struct ipv6hdr
) - sizeof(struct iphdr
));
1209 case XFRM_MODE_TRANSPORT
:
1211 case XFRM_MODE_TUNNEL
:
1212 x
->props
.header_len
+= sizeof(struct ipv6hdr
);
1217 struct xfrm_encap_tmpl
*encap
= x
->encap
;
1219 switch (encap
->encap_type
) {
1221 NL_SET_ERR_MSG(extack
, "Unsupported encapsulation type for ESP");
1224 case UDP_ENCAP_ESPINUDP
:
1225 x
->props
.header_len
+= sizeof(struct udphdr
);
1227 #ifdef CONFIG_INET6_ESPINTCP
1228 case TCP_ENCAP_ESPINTCP
:
1229 /* only the length field, TCP encap is done by
1232 x
->props
.header_len
+= 2;
1238 align
= ALIGN(crypto_aead_blocksize(aead
), 4);
1239 x
->props
.trailer_len
= align
+ 1 + crypto_aead_authsize(aead
);
1245 static int esp6_rcv_cb(struct sk_buff
*skb
, int err
)
1250 static const struct xfrm_type esp6_type
= {
1251 .owner
= THIS_MODULE
,
1252 .proto
= IPPROTO_ESP
,
1253 .flags
= XFRM_TYPE_REPLAY_PROT
,
1254 .init_state
= esp6_init_state
,
1255 .destructor
= esp6_destroy
,
1256 .input
= esp6_input
,
1257 .output
= esp6_output
,
1260 static struct xfrm6_protocol esp6_protocol
= {
1261 .handler
= xfrm6_rcv
,
1262 .input_handler
= xfrm_input
,
1263 .cb_handler
= esp6_rcv_cb
,
1264 .err_handler
= esp6_err
,
1268 static int __init
esp6_init(void)
1270 if (xfrm_register_type(&esp6_type
, AF_INET6
) < 0) {
1271 pr_info("%s: can't add xfrm type\n", __func__
);
1274 if (xfrm6_protocol_register(&esp6_protocol
, IPPROTO_ESP
) < 0) {
1275 pr_info("%s: can't add protocol\n", __func__
);
1276 xfrm_unregister_type(&esp6_type
, AF_INET6
);
1283 static void __exit
esp6_fini(void)
1285 if (xfrm6_protocol_deregister(&esp6_protocol
, IPPROTO_ESP
) < 0)
1286 pr_info("%s: can't remove protocol\n", __func__
);
1287 xfrm_unregister_type(&esp6_type
, AF_INET6
);
1290 module_init(esp6_init
);
1291 module_exit(esp6_fini
);
1293 MODULE_DESCRIPTION("IPv6 ESP transformation helpers");
1294 MODULE_LICENSE("GPL");
1295 MODULE_ALIAS_XFRM_TYPE(AF_INET6
, XFRM_PROTO_ESP
);