1 #define pr_fmt(fmt) "IPsec: " fmt
3 #include <crypto/aead.h>
4 #include <crypto/authenc.h>
6 #include <linux/module.h>
10 #include <linux/scatterlist.h>
11 #include <linux/kernel.h>
12 #include <linux/pfkeyv2.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/in6.h>
18 #include <net/protocol.h>
21 #include <linux/highmem.h>
24 struct xfrm_skb_cb xfrm
;
28 struct esp_output_extra
{
33 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
35 static u32
esp4_get_mtu(struct xfrm_state
*x
, int mtu
);
38 * Allocate an AEAD request structure with extra space for SG and IV.
40 * For alignment considerations the IV is placed at the front, followed
41 * by the request and finally the SG list.
43 * TODO: Use spare space in skb for this where possible.
45 static void *esp_alloc_tmp(struct crypto_aead
*aead
, int nfrags
, int extralen
)
51 len
+= crypto_aead_ivsize(aead
);
54 len
+= crypto_aead_alignmask(aead
) &
55 ~(crypto_tfm_ctx_alignment() - 1);
56 len
= ALIGN(len
, crypto_tfm_ctx_alignment());
59 len
+= sizeof(struct aead_request
) + crypto_aead_reqsize(aead
);
60 len
= ALIGN(len
, __alignof__(struct scatterlist
));
62 len
+= sizeof(struct scatterlist
) * nfrags
;
64 return kmalloc(len
, GFP_ATOMIC
);
67 static inline void *esp_tmp_extra(void *tmp
)
69 return PTR_ALIGN(tmp
, __alignof__(struct esp_output_extra
));
72 static inline u8
*esp_tmp_iv(struct crypto_aead
*aead
, void *tmp
, int extralen
)
74 return crypto_aead_ivsize(aead
) ?
75 PTR_ALIGN((u8
*)tmp
+ extralen
,
76 crypto_aead_alignmask(aead
) + 1) : tmp
+ extralen
;
79 static inline struct aead_request
*esp_tmp_req(struct crypto_aead
*aead
, u8
*iv
)
81 struct aead_request
*req
;
83 req
= (void *)PTR_ALIGN(iv
+ crypto_aead_ivsize(aead
),
84 crypto_tfm_ctx_alignment());
85 aead_request_set_tfm(req
, aead
);
89 static inline struct scatterlist
*esp_req_sg(struct crypto_aead
*aead
,
90 struct aead_request
*req
)
92 return (void *)ALIGN((unsigned long)(req
+ 1) +
93 crypto_aead_reqsize(aead
),
94 __alignof__(struct scatterlist
));
97 static void esp_ssg_unref(struct xfrm_state
*x
, void *tmp
)
99 struct esp_output_extra
*extra
= esp_tmp_extra(tmp
);
100 struct crypto_aead
*aead
= x
->data
;
103 struct aead_request
*req
;
104 struct scatterlist
*sg
;
106 if (x
->props
.flags
& XFRM_STATE_ESN
)
107 extralen
+= sizeof(*extra
);
109 extra
= esp_tmp_extra(tmp
);
110 iv
= esp_tmp_iv(aead
, tmp
, extralen
);
111 req
= esp_tmp_req(aead
, iv
);
113 /* Unref skb_frag_pages in the src scatterlist if necessary.
114 * Skip the first sg which comes from skb->data.
116 if (req
->src
!= req
->dst
)
117 for (sg
= sg_next(req
->src
); sg
; sg
= sg_next(sg
))
118 put_page(sg_page(sg
));
121 static void esp_output_done(struct crypto_async_request
*base
, int err
)
123 struct sk_buff
*skb
= base
->data
;
124 struct xfrm_offload
*xo
= xfrm_offload(skb
);
126 struct xfrm_state
*x
;
128 if (xo
&& (xo
->flags
& XFRM_DEV_RESUME
))
129 x
= skb
->sp
->xvec
[skb
->sp
->len
- 1];
131 x
= skb_dst(skb
)->xfrm
;
133 tmp
= ESP_SKB_CB(skb
)->tmp
;
134 esp_ssg_unref(x
, tmp
);
137 if (xo
&& (xo
->flags
& XFRM_DEV_RESUME
)) {
139 XFRM_INC_STATS(xs_net(x
), LINUX_MIB_XFRMOUTSTATEPROTOERROR
);
144 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
146 xfrm_dev_resume(skb
);
148 xfrm_output_resume(skb
, err
);
152 /* Move ESP header back into place. */
153 static void esp_restore_header(struct sk_buff
*skb
, unsigned int offset
)
155 struct ip_esp_hdr
*esph
= (void *)(skb
->data
+ offset
);
156 void *tmp
= ESP_SKB_CB(skb
)->tmp
;
157 __be32
*seqhi
= esp_tmp_extra(tmp
);
159 esph
->seq_no
= esph
->spi
;
163 static void esp_output_restore_header(struct sk_buff
*skb
)
165 void *tmp
= ESP_SKB_CB(skb
)->tmp
;
166 struct esp_output_extra
*extra
= esp_tmp_extra(tmp
);
168 esp_restore_header(skb
, skb_transport_offset(skb
) + extra
->esphoff
-
172 static struct ip_esp_hdr
*esp_output_set_extra(struct sk_buff
*skb
,
173 struct xfrm_state
*x
,
174 struct ip_esp_hdr
*esph
,
175 struct esp_output_extra
*extra
)
177 /* For ESN we move the header forward by 4 bytes to
178 * accomodate the high bits. We will move it back after
181 if ((x
->props
.flags
& XFRM_STATE_ESN
)) {
183 struct xfrm_offload
*xo
= xfrm_offload(skb
);
188 seqhi
= XFRM_SKB_CB(skb
)->seq
.output
.hi
;
190 extra
->esphoff
= (unsigned char *)esph
-
191 skb_transport_header(skb
);
192 esph
= (struct ip_esp_hdr
*)((unsigned char *)esph
- 4);
193 extra
->seqhi
= esph
->spi
;
194 esph
->seq_no
= htonl(seqhi
);
197 esph
->spi
= x
->id
.spi
;
202 static void esp_output_done_esn(struct crypto_async_request
*base
, int err
)
204 struct sk_buff
*skb
= base
->data
;
206 esp_output_restore_header(skb
);
207 esp_output_done(base
, err
);
210 static void esp_output_fill_trailer(u8
*tail
, int tfclen
, int plen
, __u8 proto
)
212 /* Fill padding... */
214 memset(tail
, 0, tfclen
);
219 for (i
= 0; i
< plen
- 2; i
++)
222 tail
[plen
- 2] = plen
- 2;
223 tail
[plen
- 1] = proto
;
226 static void esp_output_udp_encap(struct xfrm_state
*x
, struct sk_buff
*skb
, struct esp_info
*esp
)
232 struct xfrm_encap_tmpl
*encap
= x
->encap
;
233 struct ip_esp_hdr
*esph
= esp
->esph
;
235 spin_lock_bh(&x
->lock
);
236 sport
= encap
->encap_sport
;
237 dport
= encap
->encap_dport
;
238 encap_type
= encap
->encap_type
;
239 spin_unlock_bh(&x
->lock
);
241 uh
= (struct udphdr
*)esph
;
244 uh
->len
= htons(skb
->len
+ esp
->tailen
245 - skb_transport_offset(skb
));
248 switch (encap_type
) {
250 case UDP_ENCAP_ESPINUDP
:
251 esph
= (struct ip_esp_hdr
*)(uh
+ 1);
253 case UDP_ENCAP_ESPINUDP_NON_IKE
:
254 udpdata32
= (__be32
*)(uh
+ 1);
255 udpdata32
[0] = udpdata32
[1] = 0;
256 esph
= (struct ip_esp_hdr
*)(udpdata32
+ 2);
260 *skb_mac_header(skb
) = IPPROTO_UDP
;
264 int esp_output_head(struct xfrm_state
*x
, struct sk_buff
*skb
, struct esp_info
*esp
)
271 struct sk_buff
*trailer
;
272 int tailen
= esp
->tailen
;
274 /* this is non-NULL only with UDP Encapsulation */
276 esp_output_udp_encap(x
, skb
, esp
);
278 if (!skb_cloned(skb
)) {
279 if (tailen
<= skb_tailroom(skb
)) {
282 tail
= skb_tail_pointer(trailer
);
285 } else if ((skb_shinfo(skb
)->nr_frags
< MAX_SKB_FRAGS
)
286 && !skb_has_frag_list(skb
)) {
288 struct sock
*sk
= skb
->sk
;
289 struct page_frag
*pfrag
= &x
->xfrag
;
291 esp
->inplace
= false;
293 allocsize
= ALIGN(tailen
, L1_CACHE_BYTES
);
295 spin_lock_bh(&x
->lock
);
297 if (unlikely(!skb_page_frag_refill(allocsize
, pfrag
, GFP_ATOMIC
))) {
298 spin_unlock_bh(&x
->lock
);
305 vaddr
= kmap_atomic(page
);
307 tail
= vaddr
+ pfrag
->offset
;
309 esp_output_fill_trailer(tail
, esp
->tfclen
, esp
->plen
, esp
->proto
);
311 kunmap_atomic(vaddr
);
313 nfrags
= skb_shinfo(skb
)->nr_frags
;
315 __skb_fill_page_desc(skb
, nfrags
, page
, pfrag
->offset
,
317 skb_shinfo(skb
)->nr_frags
= ++nfrags
;
319 pfrag
->offset
= pfrag
->offset
+ allocsize
;
321 spin_unlock_bh(&x
->lock
);
326 skb
->data_len
+= tailen
;
327 skb
->truesize
+= tailen
;
329 refcount_add(tailen
, &sk
->sk_wmem_alloc
);
336 esph_offset
= (unsigned char *)esp
->esph
- skb_transport_header(skb
);
338 nfrags
= skb_cow_data(skb
, tailen
, &trailer
);
341 tail
= skb_tail_pointer(trailer
);
342 esp
->esph
= (struct ip_esp_hdr
*)(skb_transport_header(skb
) + esph_offset
);
345 esp_output_fill_trailer(tail
, esp
->tfclen
, esp
->plen
, esp
->proto
);
346 pskb_put(skb
, trailer
, tailen
);
351 EXPORT_SYMBOL_GPL(esp_output_head
);
353 int esp_output_tail(struct xfrm_state
*x
, struct sk_buff
*skb
, struct esp_info
*esp
)
362 struct ip_esp_hdr
*esph
;
363 struct crypto_aead
*aead
;
364 struct aead_request
*req
;
365 struct scatterlist
*sg
, *dsg
;
366 struct esp_output_extra
*extra
;
369 assoclen
= sizeof(struct ip_esp_hdr
);
372 if (x
->props
.flags
& XFRM_STATE_ESN
) {
373 extralen
+= sizeof(*extra
);
374 assoclen
+= sizeof(__be32
);
378 alen
= crypto_aead_authsize(aead
);
379 ivlen
= crypto_aead_ivsize(aead
);
381 tmp
= esp_alloc_tmp(aead
, esp
->nfrags
+ 2, extralen
);
385 extra
= esp_tmp_extra(tmp
);
386 iv
= esp_tmp_iv(aead
, tmp
, extralen
);
387 req
= esp_tmp_req(aead
, iv
);
388 sg
= esp_req_sg(aead
, req
);
393 dsg
= &sg
[esp
->nfrags
];
395 esph
= esp_output_set_extra(skb
, x
, esp
->esph
, extra
);
398 sg_init_table(sg
, esp
->nfrags
);
399 err
= skb_to_sgvec(skb
, sg
,
400 (unsigned char *)esph
- skb
->data
,
401 assoclen
+ ivlen
+ esp
->clen
+ alen
);
402 if (unlikely(err
< 0))
407 struct page_frag
*pfrag
= &x
->xfrag
;
409 allocsize
= ALIGN(skb
->data_len
, L1_CACHE_BYTES
);
411 spin_lock_bh(&x
->lock
);
412 if (unlikely(!skb_page_frag_refill(allocsize
, pfrag
, GFP_ATOMIC
))) {
413 spin_unlock_bh(&x
->lock
);
417 skb_shinfo(skb
)->nr_frags
= 1;
421 /* replace page frags in skb with new page */
422 __skb_fill_page_desc(skb
, 0, page
, pfrag
->offset
, skb
->data_len
);
423 pfrag
->offset
= pfrag
->offset
+ allocsize
;
424 spin_unlock_bh(&x
->lock
);
426 sg_init_table(dsg
, skb_shinfo(skb
)->nr_frags
+ 1);
427 err
= skb_to_sgvec(skb
, dsg
,
428 (unsigned char *)esph
- skb
->data
,
429 assoclen
+ ivlen
+ esp
->clen
+ alen
);
430 if (unlikely(err
< 0))
434 if ((x
->props
.flags
& XFRM_STATE_ESN
))
435 aead_request_set_callback(req
, 0, esp_output_done_esn
, skb
);
437 aead_request_set_callback(req
, 0, esp_output_done
, skb
);
439 aead_request_set_crypt(req
, sg
, dsg
, ivlen
+ esp
->clen
, iv
);
440 aead_request_set_ad(req
, assoclen
);
442 memset(iv
, 0, ivlen
);
443 memcpy(iv
+ ivlen
- min(ivlen
, 8), (u8
*)&esp
->seqno
+ 8 - min(ivlen
, 8),
446 ESP_SKB_CB(skb
)->tmp
= tmp
;
447 err
= crypto_aead_encrypt(req
);
458 if ((x
->props
.flags
& XFRM_STATE_ESN
))
459 esp_output_restore_header(skb
);
463 esp_ssg_unref(x
, tmp
);
470 EXPORT_SYMBOL_GPL(esp_output_tail
);
472 static int esp_output(struct xfrm_state
*x
, struct sk_buff
*skb
)
476 struct ip_esp_hdr
*esph
;
477 struct crypto_aead
*aead
;
482 esp
.proto
= *skb_mac_header(skb
);
483 *skb_mac_header(skb
) = IPPROTO_ESP
;
485 /* skb is pure payload to encrypt */
488 alen
= crypto_aead_authsize(aead
);
492 struct xfrm_dst
*dst
= (struct xfrm_dst
*)skb_dst(skb
);
495 padto
= min(x
->tfcpad
, esp4_get_mtu(x
, dst
->child_mtu_cached
));
496 if (skb
->len
< padto
)
497 esp
.tfclen
= padto
- skb
->len
;
499 blksize
= ALIGN(crypto_aead_blocksize(aead
), 4);
500 esp
.clen
= ALIGN(skb
->len
+ 2 + esp
.tfclen
, blksize
);
501 esp
.plen
= esp
.clen
- skb
->len
- esp
.tfclen
;
502 esp
.tailen
= esp
.tfclen
+ esp
.plen
+ alen
;
504 esp
.esph
= ip_esp_hdr(skb
);
506 esp
.nfrags
= esp_output_head(x
, skb
, &esp
);
511 esph
->spi
= x
->id
.spi
;
513 esph
->seq_no
= htonl(XFRM_SKB_CB(skb
)->seq
.output
.low
);
514 esp
.seqno
= cpu_to_be64(XFRM_SKB_CB(skb
)->seq
.output
.low
+
515 ((u64
)XFRM_SKB_CB(skb
)->seq
.output
.hi
<< 32));
517 skb_push(skb
, -skb_network_offset(skb
));
519 return esp_output_tail(x
, skb
, &esp
);
522 static inline int esp_remove_trailer(struct sk_buff
*skb
)
524 struct xfrm_state
*x
= xfrm_input_state(skb
);
525 struct xfrm_offload
*xo
= xfrm_offload(skb
);
526 struct crypto_aead
*aead
= x
->data
;
527 int alen
, hlen
, elen
;
533 alen
= crypto_aead_authsize(aead
);
534 hlen
= sizeof(struct ip_esp_hdr
) + crypto_aead_ivsize(aead
);
535 elen
= skb
->len
- hlen
;
537 if (xo
&& (xo
->flags
& XFRM_ESP_NO_TRAILER
)) {
542 if (skb_copy_bits(skb
, skb
->len
- alen
- 2, nexthdr
, 2))
547 if (padlen
+ 2 + alen
>= elen
) {
548 net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
549 padlen
+ 2, elen
- alen
);
553 trimlen
= alen
+ padlen
+ 2;
554 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
555 csumdiff
= skb_checksum(skb
, skb
->len
- trimlen
, trimlen
, 0);
556 skb
->csum
= csum_block_sub(skb
->csum
, csumdiff
,
559 pskb_trim(skb
, skb
->len
- trimlen
);
567 int esp_input_done2(struct sk_buff
*skb
, int err
)
569 const struct iphdr
*iph
;
570 struct xfrm_state
*x
= xfrm_input_state(skb
);
571 struct xfrm_offload
*xo
= xfrm_offload(skb
);
572 struct crypto_aead
*aead
= x
->data
;
573 int hlen
= sizeof(struct ip_esp_hdr
) + crypto_aead_ivsize(aead
);
576 if (!xo
|| (xo
&& !(xo
->flags
& CRYPTO_DONE
)))
577 kfree(ESP_SKB_CB(skb
)->tmp
);
582 err
= esp_remove_trailer(skb
);
583 if (unlikely(err
< 0))
590 struct xfrm_encap_tmpl
*encap
= x
->encap
;
591 struct udphdr
*uh
= (void *)(skb_network_header(skb
) + ihl
);
594 * 1) if the NAT-T peer's IP or port changed then
595 * advertize the change to the keying daemon.
596 * This is an inbound SA, so just compare
599 if (iph
->saddr
!= x
->props
.saddr
.a4
||
600 uh
->source
!= encap
->encap_sport
) {
601 xfrm_address_t ipaddr
;
603 ipaddr
.a4
= iph
->saddr
;
604 km_new_mapping(x
, &ipaddr
, uh
->source
);
606 /* XXX: perhaps add an extra
607 * policy check here, to see
608 * if we should allow or
609 * reject a packet from a
616 * 2) ignore UDP/TCP checksums in case
617 * of NAT-T in Transport Mode, or
618 * perform other post-processing fixes
619 * as per draft-ietf-ipsec-udp-encaps-06,
622 if (x
->props
.mode
== XFRM_MODE_TRANSPORT
)
623 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
626 skb_pull_rcsum(skb
, hlen
);
627 if (x
->props
.mode
== XFRM_MODE_TUNNEL
)
628 skb_reset_transport_header(skb
);
630 skb_set_transport_header(skb
, -ihl
);
632 /* RFC4303: Drop dummy packets without any error */
633 if (err
== IPPROTO_NONE
)
639 EXPORT_SYMBOL_GPL(esp_input_done2
);
641 static void esp_input_done(struct crypto_async_request
*base
, int err
)
643 struct sk_buff
*skb
= base
->data
;
645 xfrm_input_resume(skb
, esp_input_done2(skb
, err
));
648 static void esp_input_restore_header(struct sk_buff
*skb
)
650 esp_restore_header(skb
, 0);
654 static void esp_input_set_header(struct sk_buff
*skb
, __be32
*seqhi
)
656 struct xfrm_state
*x
= xfrm_input_state(skb
);
657 struct ip_esp_hdr
*esph
= (struct ip_esp_hdr
*)skb
->data
;
659 /* For ESN we move the header forward by 4 bytes to
660 * accomodate the high bits. We will move it back after
663 if ((x
->props
.flags
& XFRM_STATE_ESN
)) {
664 esph
= skb_push(skb
, 4);
666 esph
->spi
= esph
->seq_no
;
667 esph
->seq_no
= XFRM_SKB_CB(skb
)->seq
.input
.hi
;
671 static void esp_input_done_esn(struct crypto_async_request
*base
, int err
)
673 struct sk_buff
*skb
= base
->data
;
675 esp_input_restore_header(skb
);
676 esp_input_done(base
, err
);
680 * Note: detecting truncated vs. non-truncated authentication data is very
681 * expensive, so we only support truncated data, which is the recommended
684 static int esp_input(struct xfrm_state
*x
, struct sk_buff
*skb
)
686 struct ip_esp_hdr
*esph
;
687 struct crypto_aead
*aead
= x
->data
;
688 struct aead_request
*req
;
689 struct sk_buff
*trailer
;
690 int ivlen
= crypto_aead_ivsize(aead
);
691 int elen
= skb
->len
- sizeof(*esph
) - ivlen
;
698 struct scatterlist
*sg
;
701 if (!pskb_may_pull(skb
, sizeof(*esph
) + ivlen
))
707 assoclen
= sizeof(*esph
);
710 if (x
->props
.flags
& XFRM_STATE_ESN
) {
711 seqhilen
+= sizeof(__be32
);
712 assoclen
+= seqhilen
;
715 if (!skb_cloned(skb
)) {
716 if (!skb_is_nonlinear(skb
)) {
720 } else if (!skb_has_frag_list(skb
)) {
721 nfrags
= skb_shinfo(skb
)->nr_frags
;
728 err
= skb_cow_data(skb
, 0, &trailer
);
736 tmp
= esp_alloc_tmp(aead
, nfrags
, seqhilen
);
740 ESP_SKB_CB(skb
)->tmp
= tmp
;
741 seqhi
= esp_tmp_extra(tmp
);
742 iv
= esp_tmp_iv(aead
, tmp
, seqhilen
);
743 req
= esp_tmp_req(aead
, iv
);
744 sg
= esp_req_sg(aead
, req
);
746 esp_input_set_header(skb
, seqhi
);
748 sg_init_table(sg
, nfrags
);
749 err
= skb_to_sgvec(skb
, sg
, 0, skb
->len
);
750 if (unlikely(err
< 0)) {
755 skb
->ip_summed
= CHECKSUM_NONE
;
757 if ((x
->props
.flags
& XFRM_STATE_ESN
))
758 aead_request_set_callback(req
, 0, esp_input_done_esn
, skb
);
760 aead_request_set_callback(req
, 0, esp_input_done
, skb
);
762 aead_request_set_crypt(req
, sg
, sg
, elen
+ ivlen
, iv
);
763 aead_request_set_ad(req
, assoclen
);
765 err
= crypto_aead_decrypt(req
);
766 if (err
== -EINPROGRESS
)
769 if ((x
->props
.flags
& XFRM_STATE_ESN
))
770 esp_input_restore_header(skb
);
772 err
= esp_input_done2(skb
, err
);
778 static u32
esp4_get_mtu(struct xfrm_state
*x
, int mtu
)
780 struct crypto_aead
*aead
= x
->data
;
781 u32 blksize
= ALIGN(crypto_aead_blocksize(aead
), 4);
782 unsigned int net_adj
;
784 switch (x
->props
.mode
) {
785 case XFRM_MODE_TRANSPORT
:
787 net_adj
= sizeof(struct iphdr
);
789 case XFRM_MODE_TUNNEL
:
796 return ((mtu
- x
->props
.header_len
- crypto_aead_authsize(aead
) -
797 net_adj
) & ~(blksize
- 1)) + net_adj
- 2;
800 static int esp4_err(struct sk_buff
*skb
, u32 info
)
802 struct net
*net
= dev_net(skb
->dev
);
803 const struct iphdr
*iph
= (const struct iphdr
*)skb
->data
;
804 struct ip_esp_hdr
*esph
= (struct ip_esp_hdr
*)(skb
->data
+(iph
->ihl
<<2));
805 struct xfrm_state
*x
;
807 switch (icmp_hdr(skb
)->type
) {
808 case ICMP_DEST_UNREACH
:
809 if (icmp_hdr(skb
)->code
!= ICMP_FRAG_NEEDED
)
817 x
= xfrm_state_lookup(net
, skb
->mark
, (const xfrm_address_t
*)&iph
->daddr
,
818 esph
->spi
, IPPROTO_ESP
, AF_INET
);
822 if (icmp_hdr(skb
)->type
== ICMP_DEST_UNREACH
)
823 ipv4_update_pmtu(skb
, net
, info
, 0, 0, IPPROTO_ESP
, 0);
825 ipv4_redirect(skb
, net
, 0, 0, IPPROTO_ESP
, 0);
831 static void esp_destroy(struct xfrm_state
*x
)
833 struct crypto_aead
*aead
= x
->data
;
838 crypto_free_aead(aead
);
841 static int esp_init_aead(struct xfrm_state
*x
)
843 char aead_name
[CRYPTO_MAX_ALG_NAME
];
844 struct crypto_aead
*aead
;
848 if (snprintf(aead_name
, CRYPTO_MAX_ALG_NAME
, "%s(%s)",
849 x
->geniv
, x
->aead
->alg_name
) >= CRYPTO_MAX_ALG_NAME
)
852 aead
= crypto_alloc_aead(aead_name
, 0, 0);
859 err
= crypto_aead_setkey(aead
, x
->aead
->alg_key
,
860 (x
->aead
->alg_key_len
+ 7) / 8);
864 err
= crypto_aead_setauthsize(aead
, x
->aead
->alg_icv_len
/ 8);
872 static int esp_init_authenc(struct xfrm_state
*x
)
874 struct crypto_aead
*aead
;
875 struct crypto_authenc_key_param
*param
;
879 char authenc_name
[CRYPTO_MAX_ALG_NAME
];
889 if ((x
->props
.flags
& XFRM_STATE_ESN
)) {
890 if (snprintf(authenc_name
, CRYPTO_MAX_ALG_NAME
,
891 "%s%sauthencesn(%s,%s)%s",
892 x
->geniv
?: "", x
->geniv
? "(" : "",
893 x
->aalg
? x
->aalg
->alg_name
: "digest_null",
895 x
->geniv
? ")" : "") >= CRYPTO_MAX_ALG_NAME
)
898 if (snprintf(authenc_name
, CRYPTO_MAX_ALG_NAME
,
899 "%s%sauthenc(%s,%s)%s",
900 x
->geniv
?: "", x
->geniv
? "(" : "",
901 x
->aalg
? x
->aalg
->alg_name
: "digest_null",
903 x
->geniv
? ")" : "") >= CRYPTO_MAX_ALG_NAME
)
907 aead
= crypto_alloc_aead(authenc_name
, 0, 0);
914 keylen
= (x
->aalg
? (x
->aalg
->alg_key_len
+ 7) / 8 : 0) +
915 (x
->ealg
->alg_key_len
+ 7) / 8 + RTA_SPACE(sizeof(*param
));
917 key
= kmalloc(keylen
, GFP_KERNEL
);
923 rta
->rta_type
= CRYPTO_AUTHENC_KEYA_PARAM
;
924 rta
->rta_len
= RTA_LENGTH(sizeof(*param
));
925 param
= RTA_DATA(rta
);
926 p
+= RTA_SPACE(sizeof(*param
));
929 struct xfrm_algo_desc
*aalg_desc
;
931 memcpy(p
, x
->aalg
->alg_key
, (x
->aalg
->alg_key_len
+ 7) / 8);
932 p
+= (x
->aalg
->alg_key_len
+ 7) / 8;
934 aalg_desc
= xfrm_aalg_get_byname(x
->aalg
->alg_name
, 0);
938 if (aalg_desc
->uinfo
.auth
.icv_fullbits
/ 8 !=
939 crypto_aead_authsize(aead
)) {
940 pr_info("ESP: %s digestsize %u != %hu\n",
942 crypto_aead_authsize(aead
),
943 aalg_desc
->uinfo
.auth
.icv_fullbits
/ 8);
947 err
= crypto_aead_setauthsize(
948 aead
, x
->aalg
->alg_trunc_len
/ 8);
953 param
->enckeylen
= cpu_to_be32((x
->ealg
->alg_key_len
+ 7) / 8);
954 memcpy(p
, x
->ealg
->alg_key
, (x
->ealg
->alg_key_len
+ 7) / 8);
956 err
= crypto_aead_setkey(aead
, key
, keylen
);
965 static int esp_init_state(struct xfrm_state
*x
)
967 struct crypto_aead
*aead
;
974 err
= esp_init_aead(x
);
976 err
= esp_init_authenc(x
);
983 x
->props
.header_len
= sizeof(struct ip_esp_hdr
) +
984 crypto_aead_ivsize(aead
);
985 if (x
->props
.mode
== XFRM_MODE_TUNNEL
)
986 x
->props
.header_len
+= sizeof(struct iphdr
);
987 else if (x
->props
.mode
== XFRM_MODE_BEET
&& x
->sel
.family
!= AF_INET6
)
988 x
->props
.header_len
+= IPV4_BEET_PHMAXLEN
;
990 struct xfrm_encap_tmpl
*encap
= x
->encap
;
992 switch (encap
->encap_type
) {
996 case UDP_ENCAP_ESPINUDP
:
997 x
->props
.header_len
+= sizeof(struct udphdr
);
999 case UDP_ENCAP_ESPINUDP_NON_IKE
:
1000 x
->props
.header_len
+= sizeof(struct udphdr
) + 2 * sizeof(u32
);
1005 align
= ALIGN(crypto_aead_blocksize(aead
), 4);
1006 x
->props
.trailer_len
= align
+ 1 + crypto_aead_authsize(aead
);
1012 static int esp4_rcv_cb(struct sk_buff
*skb
, int err
)
1017 static const struct xfrm_type esp_type
=
1019 .description
= "ESP4",
1020 .owner
= THIS_MODULE
,
1021 .proto
= IPPROTO_ESP
,
1022 .flags
= XFRM_TYPE_REPLAY_PROT
,
1023 .init_state
= esp_init_state
,
1024 .destructor
= esp_destroy
,
1025 .get_mtu
= esp4_get_mtu
,
1027 .output
= esp_output
,
1030 static struct xfrm4_protocol esp4_protocol
= {
1031 .handler
= xfrm4_rcv
,
1032 .input_handler
= xfrm_input
,
1033 .cb_handler
= esp4_rcv_cb
,
1034 .err_handler
= esp4_err
,
1038 static int __init
esp4_init(void)
1040 if (xfrm_register_type(&esp_type
, AF_INET
) < 0) {
1041 pr_info("%s: can't add xfrm type\n", __func__
);
1044 if (xfrm4_protocol_register(&esp4_protocol
, IPPROTO_ESP
) < 0) {
1045 pr_info("%s: can't add protocol\n", __func__
);
1046 xfrm_unregister_type(&esp_type
, AF_INET
);
1052 static void __exit
esp4_fini(void)
1054 if (xfrm4_protocol_deregister(&esp4_protocol
, IPPROTO_ESP
) < 0)
1055 pr_info("%s: can't remove protocol\n", __func__
);
1056 if (xfrm_unregister_type(&esp_type
, AF_INET
) < 0)
1057 pr_info("%s: can't remove xfrm type\n", __func__
);
1060 module_init(esp4_init
);
1061 module_exit(esp4_fini
);
1062 MODULE_LICENSE("GPL");
1063 MODULE_ALIAS_XFRM_TYPE(AF_INET
, XFRM_PROTO_ESP
);