1 #define pr_fmt(fmt) "IPsec: " fmt
3 #include <crypto/aead.h>
4 #include <crypto/authenc.h>
6 #include <linux/module.h>
10 #include <linux/scatterlist.h>
11 #include <linux/kernel.h>
12 #include <linux/pfkeyv2.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/in6.h>
18 #include <net/protocol.h>
22 struct xfrm_skb_cb xfrm
;
26 struct esp_output_extra
{
31 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
33 static u32
esp4_get_mtu(struct xfrm_state
*x
, int mtu
);
36 * Allocate an AEAD request structure with extra space for SG and IV.
38 * For alignment considerations the IV is placed at the front, followed
39 * by the request and finally the SG list.
41 * TODO: Use spare space in skb for this where possible.
43 static void *esp_alloc_tmp(struct crypto_aead
*aead
, int nfrags
, int extralen
)
49 len
+= crypto_aead_ivsize(aead
);
52 len
+= crypto_aead_alignmask(aead
) &
53 ~(crypto_tfm_ctx_alignment() - 1);
54 len
= ALIGN(len
, crypto_tfm_ctx_alignment());
57 len
+= sizeof(struct aead_request
) + crypto_aead_reqsize(aead
);
58 len
= ALIGN(len
, __alignof__(struct scatterlist
));
60 len
+= sizeof(struct scatterlist
) * nfrags
;
62 return kmalloc(len
, GFP_ATOMIC
);
65 static inline void *esp_tmp_extra(void *tmp
)
67 return PTR_ALIGN(tmp
, __alignof__(struct esp_output_extra
));
70 static inline u8
*esp_tmp_iv(struct crypto_aead
*aead
, void *tmp
, int extralen
)
72 return crypto_aead_ivsize(aead
) ?
73 PTR_ALIGN((u8
*)tmp
+ extralen
,
74 crypto_aead_alignmask(aead
) + 1) : tmp
+ extralen
;
77 static inline struct aead_request
*esp_tmp_req(struct crypto_aead
*aead
, u8
*iv
)
79 struct aead_request
*req
;
81 req
= (void *)PTR_ALIGN(iv
+ crypto_aead_ivsize(aead
),
82 crypto_tfm_ctx_alignment());
83 aead_request_set_tfm(req
, aead
);
87 static inline struct scatterlist
*esp_req_sg(struct crypto_aead
*aead
,
88 struct aead_request
*req
)
90 return (void *)ALIGN((unsigned long)(req
+ 1) +
91 crypto_aead_reqsize(aead
),
92 __alignof__(struct scatterlist
));
95 static void esp_output_done(struct crypto_async_request
*base
, int err
)
97 struct sk_buff
*skb
= base
->data
;
99 kfree(ESP_SKB_CB(skb
)->tmp
);
100 xfrm_output_resume(skb
, err
);
103 /* Move ESP header back into place. */
104 static void esp_restore_header(struct sk_buff
*skb
, unsigned int offset
)
106 struct ip_esp_hdr
*esph
= (void *)(skb
->data
+ offset
);
107 void *tmp
= ESP_SKB_CB(skb
)->tmp
;
108 __be32
*seqhi
= esp_tmp_extra(tmp
);
110 esph
->seq_no
= esph
->spi
;
114 static void esp_output_restore_header(struct sk_buff
*skb
)
116 void *tmp
= ESP_SKB_CB(skb
)->tmp
;
117 struct esp_output_extra
*extra
= esp_tmp_extra(tmp
);
119 esp_restore_header(skb
, skb_transport_offset(skb
) + extra
->esphoff
-
123 static void esp_output_done_esn(struct crypto_async_request
*base
, int err
)
125 struct sk_buff
*skb
= base
->data
;
127 esp_output_restore_header(skb
);
128 esp_output_done(base
, err
);
131 static int esp_output(struct xfrm_state
*x
, struct sk_buff
*skb
)
134 struct esp_output_extra
*extra
;
135 struct ip_esp_hdr
*esph
;
136 struct crypto_aead
*aead
;
137 struct aead_request
*req
;
138 struct scatterlist
*sg
;
139 struct sk_buff
*trailer
;
154 /* skb is pure payload to encrypt */
157 alen
= crypto_aead_authsize(aead
);
158 ivlen
= crypto_aead_ivsize(aead
);
162 struct xfrm_dst
*dst
= (struct xfrm_dst
*)skb_dst(skb
);
165 padto
= min(x
->tfcpad
, esp4_get_mtu(x
, dst
->child_mtu_cached
));
166 if (skb
->len
< padto
)
167 tfclen
= padto
- skb
->len
;
169 blksize
= ALIGN(crypto_aead_blocksize(aead
), 4);
170 clen
= ALIGN(skb
->len
+ 2 + tfclen
, blksize
);
171 plen
= clen
- skb
->len
- tfclen
;
173 err
= skb_cow_data(skb
, tfclen
+ plen
+ alen
, &trailer
);
178 assoclen
= sizeof(*esph
);
181 if (x
->props
.flags
& XFRM_STATE_ESN
) {
182 extralen
+= sizeof(*extra
);
183 assoclen
+= sizeof(__be32
);
186 tmp
= esp_alloc_tmp(aead
, nfrags
, extralen
);
192 extra
= esp_tmp_extra(tmp
);
193 iv
= esp_tmp_iv(aead
, tmp
, extralen
);
194 req
= esp_tmp_req(aead
, iv
);
195 sg
= esp_req_sg(aead
, req
);
197 /* Fill padding... */
198 tail
= skb_tail_pointer(trailer
);
200 memset(tail
, 0, tfclen
);
205 for (i
= 0; i
< plen
- 2; i
++)
208 tail
[plen
- 2] = plen
- 2;
209 tail
[plen
- 1] = *skb_mac_header(skb
);
210 pskb_put(skb
, trailer
, clen
- skb
->len
+ alen
);
212 skb_push(skb
, -skb_network_offset(skb
));
213 esph
= ip_esp_hdr(skb
);
214 *skb_mac_header(skb
) = IPPROTO_ESP
;
216 /* this is non-NULL only with UDP Encapsulation */
218 struct xfrm_encap_tmpl
*encap
= x
->encap
;
224 spin_lock_bh(&x
->lock
);
225 sport
= encap
->encap_sport
;
226 dport
= encap
->encap_dport
;
227 encap_type
= encap
->encap_type
;
228 spin_unlock_bh(&x
->lock
);
230 uh
= (struct udphdr
*)esph
;
233 uh
->len
= htons(skb
->len
- skb_transport_offset(skb
));
236 switch (encap_type
) {
238 case UDP_ENCAP_ESPINUDP
:
239 esph
= (struct ip_esp_hdr
*)(uh
+ 1);
241 case UDP_ENCAP_ESPINUDP_NON_IKE
:
242 udpdata32
= (__be32
*)(uh
+ 1);
243 udpdata32
[0] = udpdata32
[1] = 0;
244 esph
= (struct ip_esp_hdr
*)(udpdata32
+ 2);
248 *skb_mac_header(skb
) = IPPROTO_UDP
;
251 esph
->seq_no
= htonl(XFRM_SKB_CB(skb
)->seq
.output
.low
);
253 aead_request_set_callback(req
, 0, esp_output_done
, skb
);
255 /* For ESN we move the header forward by 4 bytes to
256 * accomodate the high bits. We will move it back after
259 if ((x
->props
.flags
& XFRM_STATE_ESN
)) {
260 extra
->esphoff
= (unsigned char *)esph
-
261 skb_transport_header(skb
);
262 esph
= (struct ip_esp_hdr
*)((unsigned char *)esph
- 4);
263 extra
->seqhi
= esph
->spi
;
264 esph
->seq_no
= htonl(XFRM_SKB_CB(skb
)->seq
.output
.hi
);
265 aead_request_set_callback(req
, 0, esp_output_done_esn
, skb
);
268 esph
->spi
= x
->id
.spi
;
270 sg_init_table(sg
, nfrags
);
271 err
= skb_to_sgvec(skb
, sg
,
272 (unsigned char *)esph
- skb
->data
,
273 assoclen
+ ivlen
+ clen
+ alen
);
274 if (unlikely(err
< 0))
276 aead_request_set_crypt(req
, sg
, sg
, ivlen
+ clen
, iv
);
277 aead_request_set_ad(req
, assoclen
);
279 seqno
= cpu_to_be64(XFRM_SKB_CB(skb
)->seq
.output
.low
+
280 ((u64
)XFRM_SKB_CB(skb
)->seq
.output
.hi
<< 32));
282 memset(iv
, 0, ivlen
);
283 memcpy(iv
+ ivlen
- min(ivlen
, 8), (u8
*)&seqno
+ 8 - min(ivlen
, 8),
286 ESP_SKB_CB(skb
)->tmp
= tmp
;
287 err
= crypto_aead_encrypt(req
);
298 if ((x
->props
.flags
& XFRM_STATE_ESN
))
299 esp_output_restore_header(skb
);
308 static int esp_input_done2(struct sk_buff
*skb
, int err
)
310 const struct iphdr
*iph
;
311 struct xfrm_state
*x
= xfrm_input_state(skb
);
312 struct crypto_aead
*aead
= x
->data
;
313 int alen
= crypto_aead_authsize(aead
);
314 int hlen
= sizeof(struct ip_esp_hdr
) + crypto_aead_ivsize(aead
);
315 int elen
= skb
->len
- hlen
;
320 kfree(ESP_SKB_CB(skb
)->tmp
);
325 if (skb_copy_bits(skb
, skb
->len
-alen
-2, nexthdr
, 2))
330 if (padlen
+ 2 + alen
>= elen
)
333 /* ... check padding bits here. Silly. :-) */
339 struct xfrm_encap_tmpl
*encap
= x
->encap
;
340 struct udphdr
*uh
= (void *)(skb_network_header(skb
) + ihl
);
343 * 1) if the NAT-T peer's IP or port changed then
344 * advertize the change to the keying daemon.
345 * This is an inbound SA, so just compare
348 if (iph
->saddr
!= x
->props
.saddr
.a4
||
349 uh
->source
!= encap
->encap_sport
) {
350 xfrm_address_t ipaddr
;
352 ipaddr
.a4
= iph
->saddr
;
353 km_new_mapping(x
, &ipaddr
, uh
->source
);
355 /* XXX: perhaps add an extra
356 * policy check here, to see
357 * if we should allow or
358 * reject a packet from a
365 * 2) ignore UDP/TCP checksums in case
366 * of NAT-T in Transport Mode, or
367 * perform other post-processing fixes
368 * as per draft-ietf-ipsec-udp-encaps-06,
371 if (x
->props
.mode
== XFRM_MODE_TRANSPORT
)
372 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
375 pskb_trim(skb
, skb
->len
- alen
- padlen
- 2);
376 __skb_pull(skb
, hlen
);
377 if (x
->props
.mode
== XFRM_MODE_TUNNEL
)
378 skb_reset_transport_header(skb
);
380 skb_set_transport_header(skb
, -ihl
);
384 /* RFC4303: Drop dummy packets without any error */
385 if (err
== IPPROTO_NONE
)
392 static void esp_input_done(struct crypto_async_request
*base
, int err
)
394 struct sk_buff
*skb
= base
->data
;
396 xfrm_input_resume(skb
, esp_input_done2(skb
, err
));
399 static void esp_input_restore_header(struct sk_buff
*skb
)
401 esp_restore_header(skb
, 0);
405 static void esp_input_done_esn(struct crypto_async_request
*base
, int err
)
407 struct sk_buff
*skb
= base
->data
;
409 esp_input_restore_header(skb
);
410 esp_input_done(base
, err
);
414 * Note: detecting truncated vs. non-truncated authentication data is very
415 * expensive, so we only support truncated data, which is the recommended
418 static int esp_input(struct xfrm_state
*x
, struct sk_buff
*skb
)
420 struct ip_esp_hdr
*esph
;
421 struct crypto_aead
*aead
= x
->data
;
422 struct aead_request
*req
;
423 struct sk_buff
*trailer
;
424 int ivlen
= crypto_aead_ivsize(aead
);
425 int elen
= skb
->len
- sizeof(*esph
) - ivlen
;
432 struct scatterlist
*sg
;
435 if (!pskb_may_pull(skb
, sizeof(*esph
) + ivlen
))
441 err
= skb_cow_data(skb
, 0, &trailer
);
447 assoclen
= sizeof(*esph
);
450 if (x
->props
.flags
& XFRM_STATE_ESN
) {
451 seqhilen
+= sizeof(__be32
);
452 assoclen
+= seqhilen
;
456 tmp
= esp_alloc_tmp(aead
, nfrags
, seqhilen
);
460 ESP_SKB_CB(skb
)->tmp
= tmp
;
461 seqhi
= esp_tmp_extra(tmp
);
462 iv
= esp_tmp_iv(aead
, tmp
, seqhilen
);
463 req
= esp_tmp_req(aead
, iv
);
464 sg
= esp_req_sg(aead
, req
);
466 skb
->ip_summed
= CHECKSUM_NONE
;
468 esph
= (struct ip_esp_hdr
*)skb
->data
;
470 aead_request_set_callback(req
, 0, esp_input_done
, skb
);
472 /* For ESN we move the header forward by 4 bytes to
473 * accomodate the high bits. We will move it back after
476 if ((x
->props
.flags
& XFRM_STATE_ESN
)) {
477 esph
= (void *)skb_push(skb
, 4);
479 esph
->spi
= esph
->seq_no
;
480 esph
->seq_no
= XFRM_SKB_CB(skb
)->seq
.input
.hi
;
481 aead_request_set_callback(req
, 0, esp_input_done_esn
, skb
);
484 sg_init_table(sg
, nfrags
);
485 err
= skb_to_sgvec(skb
, sg
, 0, skb
->len
);
486 if (unlikely(err
< 0))
489 aead_request_set_crypt(req
, sg
, sg
, elen
+ ivlen
, iv
);
490 aead_request_set_ad(req
, assoclen
);
492 err
= crypto_aead_decrypt(req
);
493 if (err
== -EINPROGRESS
)
496 if ((x
->props
.flags
& XFRM_STATE_ESN
))
497 esp_input_restore_header(skb
);
499 err
= esp_input_done2(skb
, err
);
505 static u32
esp4_get_mtu(struct xfrm_state
*x
, int mtu
)
507 struct crypto_aead
*aead
= x
->data
;
508 u32 blksize
= ALIGN(crypto_aead_blocksize(aead
), 4);
509 unsigned int net_adj
;
511 switch (x
->props
.mode
) {
512 case XFRM_MODE_TRANSPORT
:
514 net_adj
= sizeof(struct iphdr
);
516 case XFRM_MODE_TUNNEL
:
523 return ((mtu
- x
->props
.header_len
- crypto_aead_authsize(aead
) -
524 net_adj
) & ~(blksize
- 1)) + net_adj
- 2;
527 static int esp4_err(struct sk_buff
*skb
, u32 info
)
529 struct net
*net
= dev_net(skb
->dev
);
530 const struct iphdr
*iph
= (const struct iphdr
*)skb
->data
;
531 struct ip_esp_hdr
*esph
= (struct ip_esp_hdr
*)(skb
->data
+(iph
->ihl
<<2));
532 struct xfrm_state
*x
;
534 switch (icmp_hdr(skb
)->type
) {
535 case ICMP_DEST_UNREACH
:
536 if (icmp_hdr(skb
)->code
!= ICMP_FRAG_NEEDED
)
544 x
= xfrm_state_lookup(net
, skb
->mark
, (const xfrm_address_t
*)&iph
->daddr
,
545 esph
->spi
, IPPROTO_ESP
, AF_INET
);
549 if (icmp_hdr(skb
)->type
== ICMP_DEST_UNREACH
)
550 ipv4_update_pmtu(skb
, net
, info
, 0, 0, IPPROTO_ESP
, 0);
552 ipv4_redirect(skb
, net
, 0, 0, IPPROTO_ESP
, 0);
558 static void esp_destroy(struct xfrm_state
*x
)
560 struct crypto_aead
*aead
= x
->data
;
565 crypto_free_aead(aead
);
568 static int esp_init_aead(struct xfrm_state
*x
)
570 char aead_name
[CRYPTO_MAX_ALG_NAME
];
571 struct crypto_aead
*aead
;
575 if (snprintf(aead_name
, CRYPTO_MAX_ALG_NAME
, "%s(%s)",
576 x
->geniv
, x
->aead
->alg_name
) >= CRYPTO_MAX_ALG_NAME
)
579 aead
= crypto_alloc_aead(aead_name
, 0, 0);
586 err
= crypto_aead_setkey(aead
, x
->aead
->alg_key
,
587 (x
->aead
->alg_key_len
+ 7) / 8);
591 err
= crypto_aead_setauthsize(aead
, x
->aead
->alg_icv_len
/ 8);
599 static int esp_init_authenc(struct xfrm_state
*x
)
601 struct crypto_aead
*aead
;
602 struct crypto_authenc_key_param
*param
;
606 char authenc_name
[CRYPTO_MAX_ALG_NAME
];
616 if ((x
->props
.flags
& XFRM_STATE_ESN
)) {
617 if (snprintf(authenc_name
, CRYPTO_MAX_ALG_NAME
,
618 "%s%sauthencesn(%s,%s)%s",
619 x
->geniv
?: "", x
->geniv
? "(" : "",
620 x
->aalg
? x
->aalg
->alg_name
: "digest_null",
622 x
->geniv
? ")" : "") >= CRYPTO_MAX_ALG_NAME
)
625 if (snprintf(authenc_name
, CRYPTO_MAX_ALG_NAME
,
626 "%s%sauthenc(%s,%s)%s",
627 x
->geniv
?: "", x
->geniv
? "(" : "",
628 x
->aalg
? x
->aalg
->alg_name
: "digest_null",
630 x
->geniv
? ")" : "") >= CRYPTO_MAX_ALG_NAME
)
634 aead
= crypto_alloc_aead(authenc_name
, 0, 0);
641 keylen
= (x
->aalg
? (x
->aalg
->alg_key_len
+ 7) / 8 : 0) +
642 (x
->ealg
->alg_key_len
+ 7) / 8 + RTA_SPACE(sizeof(*param
));
644 key
= kmalloc(keylen
, GFP_KERNEL
);
650 rta
->rta_type
= CRYPTO_AUTHENC_KEYA_PARAM
;
651 rta
->rta_len
= RTA_LENGTH(sizeof(*param
));
652 param
= RTA_DATA(rta
);
653 p
+= RTA_SPACE(sizeof(*param
));
656 struct xfrm_algo_desc
*aalg_desc
;
658 memcpy(p
, x
->aalg
->alg_key
, (x
->aalg
->alg_key_len
+ 7) / 8);
659 p
+= (x
->aalg
->alg_key_len
+ 7) / 8;
661 aalg_desc
= xfrm_aalg_get_byname(x
->aalg
->alg_name
, 0);
665 if (aalg_desc
->uinfo
.auth
.icv_fullbits
/ 8 !=
666 crypto_aead_authsize(aead
)) {
667 pr_info("ESP: %s digestsize %u != %hu\n",
669 crypto_aead_authsize(aead
),
670 aalg_desc
->uinfo
.auth
.icv_fullbits
/ 8);
674 err
= crypto_aead_setauthsize(
675 aead
, x
->aalg
->alg_trunc_len
/ 8);
680 param
->enckeylen
= cpu_to_be32((x
->ealg
->alg_key_len
+ 7) / 8);
681 memcpy(p
, x
->ealg
->alg_key
, (x
->ealg
->alg_key_len
+ 7) / 8);
683 err
= crypto_aead_setkey(aead
, key
, keylen
);
692 static int esp_init_state(struct xfrm_state
*x
)
694 struct crypto_aead
*aead
;
701 err
= esp_init_aead(x
);
703 err
= esp_init_authenc(x
);
710 x
->props
.header_len
= sizeof(struct ip_esp_hdr
) +
711 crypto_aead_ivsize(aead
);
712 if (x
->props
.mode
== XFRM_MODE_TUNNEL
)
713 x
->props
.header_len
+= sizeof(struct iphdr
);
714 else if (x
->props
.mode
== XFRM_MODE_BEET
&& x
->sel
.family
!= AF_INET6
)
715 x
->props
.header_len
+= IPV4_BEET_PHMAXLEN
;
717 struct xfrm_encap_tmpl
*encap
= x
->encap
;
719 switch (encap
->encap_type
) {
722 case UDP_ENCAP_ESPINUDP
:
723 x
->props
.header_len
+= sizeof(struct udphdr
);
725 case UDP_ENCAP_ESPINUDP_NON_IKE
:
726 x
->props
.header_len
+= sizeof(struct udphdr
) + 2 * sizeof(u32
);
731 align
= ALIGN(crypto_aead_blocksize(aead
), 4);
732 x
->props
.trailer_len
= align
+ 1 + crypto_aead_authsize(aead
);
738 static int esp4_rcv_cb(struct sk_buff
*skb
, int err
)
743 static const struct xfrm_type esp_type
=
745 .description
= "ESP4",
746 .owner
= THIS_MODULE
,
747 .proto
= IPPROTO_ESP
,
748 .flags
= XFRM_TYPE_REPLAY_PROT
,
749 .init_state
= esp_init_state
,
750 .destructor
= esp_destroy
,
751 .get_mtu
= esp4_get_mtu
,
756 static struct xfrm4_protocol esp4_protocol
= {
757 .handler
= xfrm4_rcv
,
758 .input_handler
= xfrm_input
,
759 .cb_handler
= esp4_rcv_cb
,
760 .err_handler
= esp4_err
,
764 static int __init
esp4_init(void)
766 if (xfrm_register_type(&esp_type
, AF_INET
) < 0) {
767 pr_info("%s: can't add xfrm type\n", __func__
);
770 if (xfrm4_protocol_register(&esp4_protocol
, IPPROTO_ESP
) < 0) {
771 pr_info("%s: can't add protocol\n", __func__
);
772 xfrm_unregister_type(&esp_type
, AF_INET
);
778 static void __exit
esp4_fini(void)
780 if (xfrm4_protocol_deregister(&esp4_protocol
, IPPROTO_ESP
) < 0)
781 pr_info("%s: can't remove protocol\n", __func__
);
782 if (xfrm_unregister_type(&esp_type
, AF_INET
) < 0)
783 pr_info("%s: can't remove xfrm type\n", __func__
);
786 module_init(esp4_init
);
787 module_exit(esp4_fini
);
788 MODULE_LICENSE("GPL");
789 MODULE_ALIAS_XFRM_TYPE(AF_INET
, XFRM_PROTO_ESP
);