1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) "IPsec: " fmt
4 #include <crypto/aead.h>
5 #include <crypto/authenc.h>
7 #include <linux/module.h>
11 #include <linux/scatterlist.h>
12 #include <linux/kernel.h>
13 #include <linux/pfkeyv2.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/in6.h>
19 #include <net/protocol.h>
22 #include <net/espintcp.h>
23 #include <linux/skbuff_ref.h>
25 #include <linux/highmem.h>
28 struct xfrm_skb_cb xfrm
;
32 struct esp_output_extra
{
37 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
40 * Allocate an AEAD request structure with extra space for SG and IV.
42 * For alignment considerations the IV is placed at the front, followed
43 * by the request and finally the SG list.
45 * TODO: Use spare space in skb for this where possible.
47 static void *esp_alloc_tmp(struct crypto_aead
*aead
, int nfrags
, int extralen
)
53 len
+= crypto_aead_ivsize(aead
);
56 len
+= crypto_aead_alignmask(aead
) &
57 ~(crypto_tfm_ctx_alignment() - 1);
58 len
= ALIGN(len
, crypto_tfm_ctx_alignment());
61 len
+= sizeof(struct aead_request
) + crypto_aead_reqsize(aead
);
62 len
= ALIGN(len
, __alignof__(struct scatterlist
));
64 len
+= sizeof(struct scatterlist
) * nfrags
;
66 return kmalloc(len
, GFP_ATOMIC
);
69 static inline void *esp_tmp_extra(void *tmp
)
71 return PTR_ALIGN(tmp
, __alignof__(struct esp_output_extra
));
74 static inline u8
*esp_tmp_iv(struct crypto_aead
*aead
, void *tmp
, int extralen
)
76 return crypto_aead_ivsize(aead
) ?
77 PTR_ALIGN((u8
*)tmp
+ extralen
,
78 crypto_aead_alignmask(aead
) + 1) : tmp
+ extralen
;
81 static inline struct aead_request
*esp_tmp_req(struct crypto_aead
*aead
, u8
*iv
)
83 struct aead_request
*req
;
85 req
= (void *)PTR_ALIGN(iv
+ crypto_aead_ivsize(aead
),
86 crypto_tfm_ctx_alignment());
87 aead_request_set_tfm(req
, aead
);
91 static inline struct scatterlist
*esp_req_sg(struct crypto_aead
*aead
,
92 struct aead_request
*req
)
94 return (void *)ALIGN((unsigned long)(req
+ 1) +
95 crypto_aead_reqsize(aead
),
96 __alignof__(struct scatterlist
));
99 static void esp_ssg_unref(struct xfrm_state
*x
, void *tmp
, struct sk_buff
*skb
)
101 struct crypto_aead
*aead
= x
->data
;
104 struct aead_request
*req
;
105 struct scatterlist
*sg
;
107 if (x
->props
.flags
& XFRM_STATE_ESN
)
108 extralen
+= sizeof(struct esp_output_extra
);
110 iv
= esp_tmp_iv(aead
, tmp
, extralen
);
111 req
= esp_tmp_req(aead
, iv
);
113 /* Unref skb_frag_pages in the src scatterlist if necessary.
114 * Skip the first sg which comes from skb->data.
116 if (req
->src
!= req
->dst
)
117 for (sg
= sg_next(req
->src
); sg
; sg
= sg_next(sg
))
118 skb_page_unref(page_to_netmem(sg_page(sg
)),
122 #ifdef CONFIG_INET_ESPINTCP
128 static void esp_free_tcp_sk(struct rcu_head
*head
)
130 struct esp_tcp_sk
*esk
= container_of(head
, struct esp_tcp_sk
, rcu
);
136 static struct sock
*esp_find_tcp_sk(struct xfrm_state
*x
)
138 struct xfrm_encap_tmpl
*encap
= x
->encap
;
139 struct net
*net
= xs_net(x
);
140 struct esp_tcp_sk
*esk
;
145 sk
= rcu_dereference(x
->encap_sk
);
146 if (sk
&& sk
->sk_state
== TCP_ESTABLISHED
)
149 spin_lock_bh(&x
->lock
);
150 sport
= encap
->encap_sport
;
151 dport
= encap
->encap_dport
;
152 nsk
= rcu_dereference_protected(x
->encap_sk
,
153 lockdep_is_held(&x
->lock
));
154 if (sk
&& sk
== nsk
) {
155 esk
= kmalloc(sizeof(*esk
), GFP_ATOMIC
);
157 spin_unlock_bh(&x
->lock
);
158 return ERR_PTR(-ENOMEM
);
160 RCU_INIT_POINTER(x
->encap_sk
, NULL
);
162 call_rcu(&esk
->rcu
, esp_free_tcp_sk
);
164 spin_unlock_bh(&x
->lock
);
166 sk
= inet_lookup_established(net
, net
->ipv4
.tcp_death_row
.hashinfo
, x
->id
.daddr
.a4
,
167 dport
, x
->props
.saddr
.a4
, sport
, 0);
169 return ERR_PTR(-ENOENT
);
171 if (!tcp_is_ulp_esp(sk
)) {
173 return ERR_PTR(-EINVAL
);
176 spin_lock_bh(&x
->lock
);
177 nsk
= rcu_dereference_protected(x
->encap_sk
,
178 lockdep_is_held(&x
->lock
));
179 if (encap
->encap_sport
!= sport
||
180 encap
->encap_dport
!= dport
) {
182 sk
= nsk
?: ERR_PTR(-EREMCHG
);
183 } else if (sk
== nsk
) {
186 rcu_assign_pointer(x
->encap_sk
, sk
);
188 spin_unlock_bh(&x
->lock
);
193 static int esp_output_tcp_finish(struct xfrm_state
*x
, struct sk_buff
*skb
)
200 sk
= esp_find_tcp_sk(x
);
201 err
= PTR_ERR_OR_ZERO(sk
);
206 if (sock_owned_by_user(sk
))
207 err
= espintcp_queue_out(sk
, skb
);
209 err
= espintcp_push_skb(sk
, skb
);
217 static int esp_output_tcp_encap_cb(struct net
*net
, struct sock
*sk
,
220 struct dst_entry
*dst
= skb_dst(skb
);
221 struct xfrm_state
*x
= dst
->xfrm
;
223 return esp_output_tcp_finish(x
, skb
);
226 static int esp_output_tail_tcp(struct xfrm_state
*x
, struct sk_buff
*skb
)
231 err
= xfrm_trans_queue_net(xs_net(x
), skb
, esp_output_tcp_encap_cb
);
234 /* EINPROGRESS just happens to do the right thing. It
235 * actually means that the skb has been consumed and
238 return err
?: -EINPROGRESS
;
241 static int esp_output_tail_tcp(struct xfrm_state
*x
, struct sk_buff
*skb
)
248 static void esp_output_done(void *data
, int err
)
250 struct sk_buff
*skb
= data
;
251 struct xfrm_offload
*xo
= xfrm_offload(skb
);
253 struct xfrm_state
*x
;
255 if (xo
&& (xo
->flags
& XFRM_DEV_RESUME
)) {
256 struct sec_path
*sp
= skb_sec_path(skb
);
258 x
= sp
->xvec
[sp
->len
- 1];
260 x
= skb_dst(skb
)->xfrm
;
263 tmp
= ESP_SKB_CB(skb
)->tmp
;
264 esp_ssg_unref(x
, tmp
, skb
);
267 if (xo
&& (xo
->flags
& XFRM_DEV_RESUME
)) {
269 XFRM_INC_STATS(xs_net(x
), LINUX_MIB_XFRMOUTSTATEPROTOERROR
);
274 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
276 xfrm_dev_resume(skb
);
279 x
->encap
&& x
->encap
->encap_type
== TCP_ENCAP_ESPINTCP
)
280 esp_output_tail_tcp(x
, skb
);
282 xfrm_output_resume(skb
->sk
, skb
, err
);
286 /* Move ESP header back into place. */
287 static void esp_restore_header(struct sk_buff
*skb
, unsigned int offset
)
289 struct ip_esp_hdr
*esph
= (void *)(skb
->data
+ offset
);
290 void *tmp
= ESP_SKB_CB(skb
)->tmp
;
291 __be32
*seqhi
= esp_tmp_extra(tmp
);
293 esph
->seq_no
= esph
->spi
;
297 static void esp_output_restore_header(struct sk_buff
*skb
)
299 void *tmp
= ESP_SKB_CB(skb
)->tmp
;
300 struct esp_output_extra
*extra
= esp_tmp_extra(tmp
);
302 esp_restore_header(skb
, skb_transport_offset(skb
) + extra
->esphoff
-
306 static struct ip_esp_hdr
*esp_output_set_extra(struct sk_buff
*skb
,
307 struct xfrm_state
*x
,
308 struct ip_esp_hdr
*esph
,
309 struct esp_output_extra
*extra
)
311 /* For ESN we move the header forward by 4 bytes to
312 * accommodate the high bits. We will move it back after
315 if ((x
->props
.flags
& XFRM_STATE_ESN
)) {
317 struct xfrm_offload
*xo
= xfrm_offload(skb
);
322 seqhi
= XFRM_SKB_CB(skb
)->seq
.output
.hi
;
324 extra
->esphoff
= (unsigned char *)esph
-
325 skb_transport_header(skb
);
326 esph
= (struct ip_esp_hdr
*)((unsigned char *)esph
- 4);
327 extra
->seqhi
= esph
->spi
;
328 esph
->seq_no
= htonl(seqhi
);
331 esph
->spi
= x
->id
.spi
;
336 static void esp_output_done_esn(void *data
, int err
)
338 struct sk_buff
*skb
= data
;
340 esp_output_restore_header(skb
);
341 esp_output_done(data
, err
);
344 static struct ip_esp_hdr
*esp_output_udp_encap(struct sk_buff
*skb
,
346 struct esp_info
*esp
,
352 struct xfrm_offload
*xo
= xfrm_offload(skb
);
354 len
= skb
->len
+ esp
->tailen
- skb_transport_offset(skb
);
355 if (len
+ sizeof(struct iphdr
) > IP_MAX_MTU
)
356 return ERR_PTR(-EMSGSIZE
);
358 uh
= (struct udphdr
*)esp
->esph
;
361 uh
->len
= htons(len
);
364 /* For IPv4 ESP with UDP encapsulation, if xo is not null, the skb is in the crypto offload
365 * data path, which means that esp_output_udp_encap is called outside of the XFRM stack.
366 * In this case, the mac header doesn't point to the IPv4 protocol field, so don't set it.
368 if (!xo
|| encap_type
!= UDP_ENCAP_ESPINUDP
)
369 *skb_mac_header(skb
) = IPPROTO_UDP
;
371 return (struct ip_esp_hdr
*)(uh
+ 1);
374 #ifdef CONFIG_INET_ESPINTCP
375 static struct ip_esp_hdr
*esp_output_tcp_encap(struct xfrm_state
*x
,
377 struct esp_info
*esp
)
379 __be16
*lenp
= (void *)esp
->esph
;
380 struct ip_esp_hdr
*esph
;
384 len
= skb
->len
+ esp
->tailen
- skb_transport_offset(skb
);
385 if (len
> IP_MAX_MTU
)
386 return ERR_PTR(-EMSGSIZE
);
389 sk
= esp_find_tcp_sk(x
);
396 esph
= (struct ip_esp_hdr
*)(lenp
+ 1);
401 static struct ip_esp_hdr
*esp_output_tcp_encap(struct xfrm_state
*x
,
403 struct esp_info
*esp
)
405 return ERR_PTR(-EOPNOTSUPP
);
409 static int esp_output_encap(struct xfrm_state
*x
, struct sk_buff
*skb
,
410 struct esp_info
*esp
)
412 struct xfrm_encap_tmpl
*encap
= x
->encap
;
413 struct ip_esp_hdr
*esph
;
417 spin_lock_bh(&x
->lock
);
418 sport
= encap
->encap_sport
;
419 dport
= encap
->encap_dport
;
420 encap_type
= encap
->encap_type
;
421 spin_unlock_bh(&x
->lock
);
423 switch (encap_type
) {
425 case UDP_ENCAP_ESPINUDP
:
426 esph
= esp_output_udp_encap(skb
, encap_type
, esp
, sport
, dport
);
428 case TCP_ENCAP_ESPINTCP
:
429 esph
= esp_output_tcp_encap(x
, skb
, esp
);
434 return PTR_ERR(esph
);
441 int esp_output_head(struct xfrm_state
*x
, struct sk_buff
*skb
, struct esp_info
*esp
)
447 struct sk_buff
*trailer
;
448 int tailen
= esp
->tailen
;
450 /* this is non-NULL only with TCP/UDP Encapsulation */
452 int err
= esp_output_encap(x
, skb
, esp
);
458 if (ALIGN(tailen
, L1_CACHE_BYTES
) > PAGE_SIZE
||
459 ALIGN(skb
->data_len
, L1_CACHE_BYTES
) > PAGE_SIZE
)
462 if (!skb_cloned(skb
)) {
463 if (tailen
<= skb_tailroom(skb
)) {
466 tail
= skb_tail_pointer(trailer
);
469 } else if ((skb_shinfo(skb
)->nr_frags
< MAX_SKB_FRAGS
)
470 && !skb_has_frag_list(skb
)) {
472 struct sock
*sk
= skb
->sk
;
473 struct page_frag
*pfrag
= &x
->xfrag
;
475 esp
->inplace
= false;
477 allocsize
= ALIGN(tailen
, L1_CACHE_BYTES
);
479 spin_lock_bh(&x
->lock
);
481 if (unlikely(!skb_page_frag_refill(allocsize
, pfrag
, GFP_ATOMIC
))) {
482 spin_unlock_bh(&x
->lock
);
489 tail
= page_address(page
) + pfrag
->offset
;
491 esp_output_fill_trailer(tail
, esp
->tfclen
, esp
->plen
, esp
->proto
);
493 nfrags
= skb_shinfo(skb
)->nr_frags
;
495 __skb_fill_page_desc(skb
, nfrags
, page
, pfrag
->offset
,
497 skb_shinfo(skb
)->nr_frags
= ++nfrags
;
499 pfrag
->offset
= pfrag
->offset
+ allocsize
;
501 spin_unlock_bh(&x
->lock
);
505 skb_len_add(skb
, tailen
);
506 if (sk
&& sk_fullsock(sk
))
507 refcount_add(tailen
, &sk
->sk_wmem_alloc
);
514 esph_offset
= (unsigned char *)esp
->esph
- skb_transport_header(skb
);
516 nfrags
= skb_cow_data(skb
, tailen
, &trailer
);
519 tail
= skb_tail_pointer(trailer
);
520 esp
->esph
= (struct ip_esp_hdr
*)(skb_transport_header(skb
) + esph_offset
);
523 esp_output_fill_trailer(tail
, esp
->tfclen
, esp
->plen
, esp
->proto
);
524 pskb_put(skb
, trailer
, tailen
);
529 EXPORT_SYMBOL_GPL(esp_output_head
);
531 int esp_output_tail(struct xfrm_state
*x
, struct sk_buff
*skb
, struct esp_info
*esp
)
540 struct ip_esp_hdr
*esph
;
541 struct crypto_aead
*aead
;
542 struct aead_request
*req
;
543 struct scatterlist
*sg
, *dsg
;
544 struct esp_output_extra
*extra
;
547 assoclen
= sizeof(struct ip_esp_hdr
);
550 if (x
->props
.flags
& XFRM_STATE_ESN
) {
551 extralen
+= sizeof(*extra
);
552 assoclen
+= sizeof(__be32
);
556 alen
= crypto_aead_authsize(aead
);
557 ivlen
= crypto_aead_ivsize(aead
);
559 tmp
= esp_alloc_tmp(aead
, esp
->nfrags
+ 2, extralen
);
563 extra
= esp_tmp_extra(tmp
);
564 iv
= esp_tmp_iv(aead
, tmp
, extralen
);
565 req
= esp_tmp_req(aead
, iv
);
566 sg
= esp_req_sg(aead
, req
);
571 dsg
= &sg
[esp
->nfrags
];
573 esph
= esp_output_set_extra(skb
, x
, esp
->esph
, extra
);
576 sg_init_table(sg
, esp
->nfrags
);
577 err
= skb_to_sgvec(skb
, sg
,
578 (unsigned char *)esph
- skb
->data
,
579 assoclen
+ ivlen
+ esp
->clen
+ alen
);
580 if (unlikely(err
< 0))
585 struct page_frag
*pfrag
= &x
->xfrag
;
587 allocsize
= ALIGN(skb
->data_len
, L1_CACHE_BYTES
);
589 spin_lock_bh(&x
->lock
);
590 if (unlikely(!skb_page_frag_refill(allocsize
, pfrag
, GFP_ATOMIC
))) {
591 spin_unlock_bh(&x
->lock
);
595 skb_shinfo(skb
)->nr_frags
= 1;
599 /* replace page frags in skb with new page */
600 __skb_fill_page_desc(skb
, 0, page
, pfrag
->offset
, skb
->data_len
);
601 pfrag
->offset
= pfrag
->offset
+ allocsize
;
602 spin_unlock_bh(&x
->lock
);
604 sg_init_table(dsg
, skb_shinfo(skb
)->nr_frags
+ 1);
605 err
= skb_to_sgvec(skb
, dsg
,
606 (unsigned char *)esph
- skb
->data
,
607 assoclen
+ ivlen
+ esp
->clen
+ alen
);
608 if (unlikely(err
< 0))
612 if ((x
->props
.flags
& XFRM_STATE_ESN
))
613 aead_request_set_callback(req
, 0, esp_output_done_esn
, skb
);
615 aead_request_set_callback(req
, 0, esp_output_done
, skb
);
617 aead_request_set_crypt(req
, sg
, dsg
, ivlen
+ esp
->clen
, iv
);
618 aead_request_set_ad(req
, assoclen
);
620 memset(iv
, 0, ivlen
);
621 memcpy(iv
+ ivlen
- min(ivlen
, 8), (u8
*)&esp
->seqno
+ 8 - min(ivlen
, 8),
624 ESP_SKB_CB(skb
)->tmp
= tmp
;
625 err
= crypto_aead_encrypt(req
);
636 if ((x
->props
.flags
& XFRM_STATE_ESN
))
637 esp_output_restore_header(skb
);
641 esp_ssg_unref(x
, tmp
, skb
);
643 if (!err
&& x
->encap
&& x
->encap
->encap_type
== TCP_ENCAP_ESPINTCP
)
644 err
= esp_output_tail_tcp(x
, skb
);
651 EXPORT_SYMBOL_GPL(esp_output_tail
);
653 static int esp_output(struct xfrm_state
*x
, struct sk_buff
*skb
)
657 struct ip_esp_hdr
*esph
;
658 struct crypto_aead
*aead
;
663 esp
.proto
= *skb_mac_header(skb
);
664 *skb_mac_header(skb
) = IPPROTO_ESP
;
666 /* skb is pure payload to encrypt */
669 alen
= crypto_aead_authsize(aead
);
673 struct xfrm_dst
*dst
= (struct xfrm_dst
*)skb_dst(skb
);
676 padto
= min(x
->tfcpad
, xfrm_state_mtu(x
, dst
->child_mtu_cached
));
677 if (skb
->len
< padto
)
678 esp
.tfclen
= padto
- skb
->len
;
680 blksize
= ALIGN(crypto_aead_blocksize(aead
), 4);
681 esp
.clen
= ALIGN(skb
->len
+ 2 + esp
.tfclen
, blksize
);
682 esp
.plen
= esp
.clen
- skb
->len
- esp
.tfclen
;
683 esp
.tailen
= esp
.tfclen
+ esp
.plen
+ alen
;
685 esp
.esph
= ip_esp_hdr(skb
);
687 esp
.nfrags
= esp_output_head(x
, skb
, &esp
);
692 esph
->spi
= x
->id
.spi
;
694 esph
->seq_no
= htonl(XFRM_SKB_CB(skb
)->seq
.output
.low
);
695 esp
.seqno
= cpu_to_be64(XFRM_SKB_CB(skb
)->seq
.output
.low
+
696 ((u64
)XFRM_SKB_CB(skb
)->seq
.output
.hi
<< 32));
698 skb_push(skb
, -skb_network_offset(skb
));
700 return esp_output_tail(x
, skb
, &esp
);
703 static inline int esp_remove_trailer(struct sk_buff
*skb
)
705 struct xfrm_state
*x
= xfrm_input_state(skb
);
706 struct crypto_aead
*aead
= x
->data
;
707 int alen
, hlen
, elen
;
713 alen
= crypto_aead_authsize(aead
);
714 hlen
= sizeof(struct ip_esp_hdr
) + crypto_aead_ivsize(aead
);
715 elen
= skb
->len
- hlen
;
717 if (skb_copy_bits(skb
, skb
->len
- alen
- 2, nexthdr
, 2))
722 if (padlen
+ 2 + alen
>= elen
) {
723 net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
724 padlen
+ 2, elen
- alen
);
728 trimlen
= alen
+ padlen
+ 2;
729 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
730 csumdiff
= skb_checksum(skb
, skb
->len
- trimlen
, trimlen
, 0);
731 skb
->csum
= csum_block_sub(skb
->csum
, csumdiff
,
734 ret
= pskb_trim(skb
, skb
->len
- trimlen
);
744 int esp_input_done2(struct sk_buff
*skb
, int err
)
746 const struct iphdr
*iph
;
747 struct xfrm_state
*x
= xfrm_input_state(skb
);
748 struct xfrm_offload
*xo
= xfrm_offload(skb
);
749 struct crypto_aead
*aead
= x
->data
;
750 int hlen
= sizeof(struct ip_esp_hdr
) + crypto_aead_ivsize(aead
);
753 if (!xo
|| !(xo
->flags
& CRYPTO_DONE
))
754 kfree(ESP_SKB_CB(skb
)->tmp
);
759 err
= esp_remove_trailer(skb
);
760 if (unlikely(err
< 0))
767 struct xfrm_encap_tmpl
*encap
= x
->encap
;
768 struct tcphdr
*th
= (void *)(skb_network_header(skb
) + ihl
);
769 struct udphdr
*uh
= (void *)(skb_network_header(skb
) + ihl
);
772 switch (x
->encap
->encap_type
) {
773 case TCP_ENCAP_ESPINTCP
:
776 case UDP_ENCAP_ESPINUDP
:
786 * 1) if the NAT-T peer's IP or port changed then
787 * advertise the change to the keying daemon.
788 * This is an inbound SA, so just compare
791 if (iph
->saddr
!= x
->props
.saddr
.a4
||
792 source
!= encap
->encap_sport
) {
793 xfrm_address_t ipaddr
;
795 ipaddr
.a4
= iph
->saddr
;
796 km_new_mapping(x
, &ipaddr
, source
);
798 /* XXX: perhaps add an extra
799 * policy check here, to see
800 * if we should allow or
801 * reject a packet from a
808 * 2) ignore UDP/TCP checksums in case
809 * of NAT-T in Transport Mode, or
810 * perform other post-processing fixes
811 * as per draft-ietf-ipsec-udp-encaps-06,
814 if (x
->props
.mode
== XFRM_MODE_TRANSPORT
)
815 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
818 skb_pull_rcsum(skb
, hlen
);
819 if (x
->props
.mode
== XFRM_MODE_TUNNEL
)
820 skb_reset_transport_header(skb
);
822 skb_set_transport_header(skb
, -ihl
);
824 /* RFC4303: Drop dummy packets without any error */
825 if (err
== IPPROTO_NONE
)
831 EXPORT_SYMBOL_GPL(esp_input_done2
);
833 static void esp_input_done(void *data
, int err
)
835 struct sk_buff
*skb
= data
;
837 xfrm_input_resume(skb
, esp_input_done2(skb
, err
));
840 static void esp_input_restore_header(struct sk_buff
*skb
)
842 esp_restore_header(skb
, 0);
846 static void esp_input_set_header(struct sk_buff
*skb
, __be32
*seqhi
)
848 struct xfrm_state
*x
= xfrm_input_state(skb
);
849 struct ip_esp_hdr
*esph
;
851 /* For ESN we move the header forward by 4 bytes to
852 * accommodate the high bits. We will move it back after
855 if ((x
->props
.flags
& XFRM_STATE_ESN
)) {
856 esph
= skb_push(skb
, 4);
858 esph
->spi
= esph
->seq_no
;
859 esph
->seq_no
= XFRM_SKB_CB(skb
)->seq
.input
.hi
;
863 static void esp_input_done_esn(void *data
, int err
)
865 struct sk_buff
*skb
= data
;
867 esp_input_restore_header(skb
);
868 esp_input_done(data
, err
);
872 * Note: detecting truncated vs. non-truncated authentication data is very
873 * expensive, so we only support truncated data, which is the recommended
876 static int esp_input(struct xfrm_state
*x
, struct sk_buff
*skb
)
878 struct crypto_aead
*aead
= x
->data
;
879 struct aead_request
*req
;
880 struct sk_buff
*trailer
;
881 int ivlen
= crypto_aead_ivsize(aead
);
882 int elen
= skb
->len
- sizeof(struct ip_esp_hdr
) - ivlen
;
889 struct scatterlist
*sg
;
892 if (!pskb_may_pull(skb
, sizeof(struct ip_esp_hdr
) + ivlen
))
898 assoclen
= sizeof(struct ip_esp_hdr
);
901 if (x
->props
.flags
& XFRM_STATE_ESN
) {
902 seqhilen
+= sizeof(__be32
);
903 assoclen
+= seqhilen
;
906 if (!skb_cloned(skb
)) {
907 if (!skb_is_nonlinear(skb
)) {
911 } else if (!skb_has_frag_list(skb
)) {
912 nfrags
= skb_shinfo(skb
)->nr_frags
;
919 err
= skb_cow_data(skb
, 0, &trailer
);
927 tmp
= esp_alloc_tmp(aead
, nfrags
, seqhilen
);
931 ESP_SKB_CB(skb
)->tmp
= tmp
;
932 seqhi
= esp_tmp_extra(tmp
);
933 iv
= esp_tmp_iv(aead
, tmp
, seqhilen
);
934 req
= esp_tmp_req(aead
, iv
);
935 sg
= esp_req_sg(aead
, req
);
937 esp_input_set_header(skb
, seqhi
);
939 sg_init_table(sg
, nfrags
);
940 err
= skb_to_sgvec(skb
, sg
, 0, skb
->len
);
941 if (unlikely(err
< 0)) {
946 skb
->ip_summed
= CHECKSUM_NONE
;
948 if ((x
->props
.flags
& XFRM_STATE_ESN
))
949 aead_request_set_callback(req
, 0, esp_input_done_esn
, skb
);
951 aead_request_set_callback(req
, 0, esp_input_done
, skb
);
953 aead_request_set_crypt(req
, sg
, sg
, elen
+ ivlen
, iv
);
954 aead_request_set_ad(req
, assoclen
);
956 err
= crypto_aead_decrypt(req
);
957 if (err
== -EINPROGRESS
)
960 if ((x
->props
.flags
& XFRM_STATE_ESN
))
961 esp_input_restore_header(skb
);
963 err
= esp_input_done2(skb
, err
);
969 static int esp4_err(struct sk_buff
*skb
, u32 info
)
971 struct net
*net
= dev_net(skb
->dev
);
972 const struct iphdr
*iph
= (const struct iphdr
*)skb
->data
;
973 struct ip_esp_hdr
*esph
= (struct ip_esp_hdr
*)(skb
->data
+(iph
->ihl
<<2));
974 struct xfrm_state
*x
;
976 switch (icmp_hdr(skb
)->type
) {
977 case ICMP_DEST_UNREACH
:
978 if (icmp_hdr(skb
)->code
!= ICMP_FRAG_NEEDED
)
987 x
= xfrm_state_lookup(net
, skb
->mark
, (const xfrm_address_t
*)&iph
->daddr
,
988 esph
->spi
, IPPROTO_ESP
, AF_INET
);
992 if (icmp_hdr(skb
)->type
== ICMP_DEST_UNREACH
)
993 ipv4_update_pmtu(skb
, net
, info
, 0, IPPROTO_ESP
);
995 ipv4_redirect(skb
, net
, 0, IPPROTO_ESP
);
1001 static void esp_destroy(struct xfrm_state
*x
)
1003 struct crypto_aead
*aead
= x
->data
;
1008 crypto_free_aead(aead
);
1011 static int esp_init_aead(struct xfrm_state
*x
, struct netlink_ext_ack
*extack
)
1013 char aead_name
[CRYPTO_MAX_ALG_NAME
];
1014 struct crypto_aead
*aead
;
1017 if (snprintf(aead_name
, CRYPTO_MAX_ALG_NAME
, "%s(%s)",
1018 x
->geniv
, x
->aead
->alg_name
) >= CRYPTO_MAX_ALG_NAME
) {
1019 NL_SET_ERR_MSG(extack
, "Algorithm name is too long");
1020 return -ENAMETOOLONG
;
1023 aead
= crypto_alloc_aead(aead_name
, 0, 0);
1024 err
= PTR_ERR(aead
);
1030 err
= crypto_aead_setkey(aead
, x
->aead
->alg_key
,
1031 (x
->aead
->alg_key_len
+ 7) / 8);
1035 err
= crypto_aead_setauthsize(aead
, x
->aead
->alg_icv_len
/ 8);
1042 NL_SET_ERR_MSG(extack
, "Kernel was unable to initialize cryptographic operations");
1046 static int esp_init_authenc(struct xfrm_state
*x
,
1047 struct netlink_ext_ack
*extack
)
1049 struct crypto_aead
*aead
;
1050 struct crypto_authenc_key_param
*param
;
1054 char authenc_name
[CRYPTO_MAX_ALG_NAME
];
1055 unsigned int keylen
;
1058 err
= -ENAMETOOLONG
;
1060 if ((x
->props
.flags
& XFRM_STATE_ESN
)) {
1061 if (snprintf(authenc_name
, CRYPTO_MAX_ALG_NAME
,
1062 "%s%sauthencesn(%s,%s)%s",
1063 x
->geniv
?: "", x
->geniv
? "(" : "",
1064 x
->aalg
? x
->aalg
->alg_name
: "digest_null",
1066 x
->geniv
? ")" : "") >= CRYPTO_MAX_ALG_NAME
) {
1067 NL_SET_ERR_MSG(extack
, "Algorithm name is too long");
1071 if (snprintf(authenc_name
, CRYPTO_MAX_ALG_NAME
,
1072 "%s%sauthenc(%s,%s)%s",
1073 x
->geniv
?: "", x
->geniv
? "(" : "",
1074 x
->aalg
? x
->aalg
->alg_name
: "digest_null",
1076 x
->geniv
? ")" : "") >= CRYPTO_MAX_ALG_NAME
) {
1077 NL_SET_ERR_MSG(extack
, "Algorithm name is too long");
1082 aead
= crypto_alloc_aead(authenc_name
, 0, 0);
1083 err
= PTR_ERR(aead
);
1085 NL_SET_ERR_MSG(extack
, "Kernel was unable to initialize cryptographic operations");
1091 keylen
= (x
->aalg
? (x
->aalg
->alg_key_len
+ 7) / 8 : 0) +
1092 (x
->ealg
->alg_key_len
+ 7) / 8 + RTA_SPACE(sizeof(*param
));
1094 key
= kmalloc(keylen
, GFP_KERNEL
);
1100 rta
->rta_type
= CRYPTO_AUTHENC_KEYA_PARAM
;
1101 rta
->rta_len
= RTA_LENGTH(sizeof(*param
));
1102 param
= RTA_DATA(rta
);
1103 p
+= RTA_SPACE(sizeof(*param
));
1106 struct xfrm_algo_desc
*aalg_desc
;
1108 memcpy(p
, x
->aalg
->alg_key
, (x
->aalg
->alg_key_len
+ 7) / 8);
1109 p
+= (x
->aalg
->alg_key_len
+ 7) / 8;
1111 aalg_desc
= xfrm_aalg_get_byname(x
->aalg
->alg_name
, 0);
1115 if (aalg_desc
->uinfo
.auth
.icv_fullbits
/ 8 !=
1116 crypto_aead_authsize(aead
)) {
1117 NL_SET_ERR_MSG(extack
, "Kernel was unable to initialize cryptographic operations");
1121 err
= crypto_aead_setauthsize(
1122 aead
, x
->aalg
->alg_trunc_len
/ 8);
1124 NL_SET_ERR_MSG(extack
, "Kernel was unable to initialize cryptographic operations");
1129 param
->enckeylen
= cpu_to_be32((x
->ealg
->alg_key_len
+ 7) / 8);
1130 memcpy(p
, x
->ealg
->alg_key
, (x
->ealg
->alg_key_len
+ 7) / 8);
1132 err
= crypto_aead_setkey(aead
, key
, keylen
);
1135 kfree_sensitive(key
);
1141 static int esp_init_state(struct xfrm_state
*x
, struct netlink_ext_ack
*extack
)
1143 struct crypto_aead
*aead
;
1150 err
= esp_init_aead(x
, extack
);
1151 } else if (x
->ealg
) {
1152 err
= esp_init_authenc(x
, extack
);
1154 NL_SET_ERR_MSG(extack
, "ESP: AEAD or CRYPT must be provided");
1163 x
->props
.header_len
= sizeof(struct ip_esp_hdr
) +
1164 crypto_aead_ivsize(aead
);
1165 if (x
->props
.mode
== XFRM_MODE_TUNNEL
)
1166 x
->props
.header_len
+= sizeof(struct iphdr
);
1167 else if (x
->props
.mode
== XFRM_MODE_BEET
&& x
->sel
.family
!= AF_INET6
)
1168 x
->props
.header_len
+= IPV4_BEET_PHMAXLEN
;
1170 struct xfrm_encap_tmpl
*encap
= x
->encap
;
1172 switch (encap
->encap_type
) {
1174 NL_SET_ERR_MSG(extack
, "Unsupported encapsulation type for ESP");
1177 case UDP_ENCAP_ESPINUDP
:
1178 x
->props
.header_len
+= sizeof(struct udphdr
);
1180 #ifdef CONFIG_INET_ESPINTCP
1181 case TCP_ENCAP_ESPINTCP
:
1182 /* only the length field, TCP encap is done by
1185 x
->props
.header_len
+= 2;
1191 align
= ALIGN(crypto_aead_blocksize(aead
), 4);
1192 x
->props
.trailer_len
= align
+ 1 + crypto_aead_authsize(aead
);
1198 static int esp4_rcv_cb(struct sk_buff
*skb
, int err
)
1203 static const struct xfrm_type esp_type
=
1205 .owner
= THIS_MODULE
,
1206 .proto
= IPPROTO_ESP
,
1207 .flags
= XFRM_TYPE_REPLAY_PROT
,
1208 .init_state
= esp_init_state
,
1209 .destructor
= esp_destroy
,
1211 .output
= esp_output
,
1214 static struct xfrm4_protocol esp4_protocol
= {
1215 .handler
= xfrm4_rcv
,
1216 .input_handler
= xfrm_input
,
1217 .cb_handler
= esp4_rcv_cb
,
1218 .err_handler
= esp4_err
,
1222 static int __init
esp4_init(void)
1224 if (xfrm_register_type(&esp_type
, AF_INET
) < 0) {
1225 pr_info("%s: can't add xfrm type\n", __func__
);
1228 if (xfrm4_protocol_register(&esp4_protocol
, IPPROTO_ESP
) < 0) {
1229 pr_info("%s: can't add protocol\n", __func__
);
1230 xfrm_unregister_type(&esp_type
, AF_INET
);
1236 static void __exit
esp4_fini(void)
1238 if (xfrm4_protocol_deregister(&esp4_protocol
, IPPROTO_ESP
) < 0)
1239 pr_info("%s: can't remove protocol\n", __func__
);
1240 xfrm_unregister_type(&esp_type
, AF_INET
);
1243 module_init(esp4_init
);
1244 module_exit(esp4_fini
);
1245 MODULE_DESCRIPTION("IPv4 ESP transformation library");
1246 MODULE_LICENSE("GPL");
1247 MODULE_ALIAS_XFRM_TYPE(AF_INET
, XFRM_PROTO_ESP
);