1 #define pr_fmt(fmt) "IPsec: " fmt
3 #include <crypto/aead.h>
4 #include <crypto/authenc.h>
6 #include <linux/module.h>
10 #include <linux/scatterlist.h>
11 #include <linux/kernel.h>
12 #include <linux/pfkeyv2.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/in6.h>
18 #include <net/protocol.h>
22 struct xfrm_skb_cb xfrm
;
26 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
28 static u32
esp4_get_mtu(struct xfrm_state
*x
, int mtu
);
31 * Allocate an AEAD request structure with extra space for SG and IV.
33 * For alignment considerations the IV is placed at the front, followed
34 * by the request and finally the SG list.
36 * TODO: Use spare space in skb for this where possible.
38 static void *esp_alloc_tmp(struct crypto_aead
*aead
, int nfrags
, int seqhilen
)
44 len
+= crypto_aead_ivsize(aead
);
47 len
+= crypto_aead_alignmask(aead
) &
48 ~(crypto_tfm_ctx_alignment() - 1);
49 len
= ALIGN(len
, crypto_tfm_ctx_alignment());
52 len
+= sizeof(struct aead_givcrypt_request
) + crypto_aead_reqsize(aead
);
53 len
= ALIGN(len
, __alignof__(struct scatterlist
));
55 len
+= sizeof(struct scatterlist
) * nfrags
;
57 return kmalloc(len
, GFP_ATOMIC
);
60 static inline __be32
*esp_tmp_seqhi(void *tmp
)
62 return PTR_ALIGN((__be32
*)tmp
, __alignof__(__be32
));
64 static inline u8
*esp_tmp_iv(struct crypto_aead
*aead
, void *tmp
, int seqhilen
)
66 return crypto_aead_ivsize(aead
) ?
67 PTR_ALIGN((u8
*)tmp
+ seqhilen
,
68 crypto_aead_alignmask(aead
) + 1) : tmp
+ seqhilen
;
71 static inline struct aead_givcrypt_request
*esp_tmp_givreq(
72 struct crypto_aead
*aead
, u8
*iv
)
74 struct aead_givcrypt_request
*req
;
76 req
= (void *)PTR_ALIGN(iv
+ crypto_aead_ivsize(aead
),
77 crypto_tfm_ctx_alignment());
78 aead_givcrypt_set_tfm(req
, aead
);
82 static inline struct aead_request
*esp_tmp_req(struct crypto_aead
*aead
, u8
*iv
)
84 struct aead_request
*req
;
86 req
= (void *)PTR_ALIGN(iv
+ crypto_aead_ivsize(aead
),
87 crypto_tfm_ctx_alignment());
88 aead_request_set_tfm(req
, aead
);
92 static inline struct scatterlist
*esp_req_sg(struct crypto_aead
*aead
,
93 struct aead_request
*req
)
95 return (void *)ALIGN((unsigned long)(req
+ 1) +
96 crypto_aead_reqsize(aead
),
97 __alignof__(struct scatterlist
));
100 static inline struct scatterlist
*esp_givreq_sg(
101 struct crypto_aead
*aead
, struct aead_givcrypt_request
*req
)
103 return (void *)ALIGN((unsigned long)(req
+ 1) +
104 crypto_aead_reqsize(aead
),
105 __alignof__(struct scatterlist
));
108 static void esp_output_done(struct crypto_async_request
*base
, int err
)
110 struct sk_buff
*skb
= base
->data
;
112 kfree(ESP_SKB_CB(skb
)->tmp
);
113 xfrm_output_resume(skb
, err
);
116 static int esp_output(struct xfrm_state
*x
, struct sk_buff
*skb
)
119 struct ip_esp_hdr
*esph
;
120 struct crypto_aead
*aead
;
121 struct aead_givcrypt_request
*req
;
122 struct scatterlist
*sg
;
123 struct scatterlist
*asg
;
124 struct esp_data
*esp
;
125 struct sk_buff
*trailer
;
140 /* skb is pure payload to encrypt */
146 alen
= crypto_aead_authsize(aead
);
150 struct xfrm_dst
*dst
= (struct xfrm_dst
*)skb_dst(skb
);
153 padto
= min(x
->tfcpad
, esp4_get_mtu(x
, dst
->child_mtu_cached
));
154 if (skb
->len
< padto
)
155 tfclen
= padto
- skb
->len
;
157 blksize
= ALIGN(crypto_aead_blocksize(aead
), 4);
158 clen
= ALIGN(skb
->len
+ 2 + tfclen
, blksize
);
160 clen
= ALIGN(clen
, esp
->padlen
);
161 plen
= clen
- skb
->len
- tfclen
;
163 err
= skb_cow_data(skb
, tfclen
+ plen
+ alen
, &trailer
);
168 assoclen
= sizeof(*esph
);
172 if (x
->props
.flags
& XFRM_STATE_ESN
) {
174 seqhilen
+= sizeof(__be32
);
175 assoclen
+= seqhilen
;
178 tmp
= esp_alloc_tmp(aead
, nfrags
+ sglists
, seqhilen
);
182 seqhi
= esp_tmp_seqhi(tmp
);
183 iv
= esp_tmp_iv(aead
, tmp
, seqhilen
);
184 req
= esp_tmp_givreq(aead
, iv
);
185 asg
= esp_givreq_sg(aead
, req
);
188 /* Fill padding... */
189 tail
= skb_tail_pointer(trailer
);
191 memset(tail
, 0, tfclen
);
196 for (i
= 0; i
< plen
- 2; i
++)
199 tail
[plen
- 2] = plen
- 2;
200 tail
[plen
- 1] = *skb_mac_header(skb
);
201 pskb_put(skb
, trailer
, clen
- skb
->len
+ alen
);
203 skb_push(skb
, -skb_network_offset(skb
));
204 esph
= ip_esp_hdr(skb
);
205 *skb_mac_header(skb
) = IPPROTO_ESP
;
207 /* this is non-NULL only with UDP Encapsulation */
209 struct xfrm_encap_tmpl
*encap
= x
->encap
;
215 spin_lock_bh(&x
->lock
);
216 sport
= encap
->encap_sport
;
217 dport
= encap
->encap_dport
;
218 encap_type
= encap
->encap_type
;
219 spin_unlock_bh(&x
->lock
);
221 uh
= (struct udphdr
*)esph
;
224 uh
->len
= htons(skb
->len
- skb_transport_offset(skb
));
227 switch (encap_type
) {
229 case UDP_ENCAP_ESPINUDP
:
230 esph
= (struct ip_esp_hdr
*)(uh
+ 1);
232 case UDP_ENCAP_ESPINUDP_NON_IKE
:
233 udpdata32
= (__be32
*)(uh
+ 1);
234 udpdata32
[0] = udpdata32
[1] = 0;
235 esph
= (struct ip_esp_hdr
*)(udpdata32
+ 2);
239 *skb_mac_header(skb
) = IPPROTO_UDP
;
242 esph
->spi
= x
->id
.spi
;
243 esph
->seq_no
= htonl(XFRM_SKB_CB(skb
)->seq
.output
.low
);
245 sg_init_table(sg
, nfrags
);
246 skb_to_sgvec(skb
, sg
,
247 esph
->enc_data
+ crypto_aead_ivsize(aead
) - skb
->data
,
250 if ((x
->props
.flags
& XFRM_STATE_ESN
)) {
251 sg_init_table(asg
, 3);
252 sg_set_buf(asg
, &esph
->spi
, sizeof(__be32
));
253 *seqhi
= htonl(XFRM_SKB_CB(skb
)->seq
.output
.hi
);
254 sg_set_buf(asg
+ 1, seqhi
, seqhilen
);
255 sg_set_buf(asg
+ 2, &esph
->seq_no
, sizeof(__be32
));
257 sg_init_one(asg
, esph
, sizeof(*esph
));
259 aead_givcrypt_set_callback(req
, 0, esp_output_done
, skb
);
260 aead_givcrypt_set_crypt(req
, sg
, sg
, clen
, iv
);
261 aead_givcrypt_set_assoc(req
, asg
, assoclen
);
262 aead_givcrypt_set_giv(req
, esph
->enc_data
,
263 XFRM_SKB_CB(skb
)->seq
.output
.low
);
265 ESP_SKB_CB(skb
)->tmp
= tmp
;
266 err
= crypto_aead_givencrypt(req
);
267 if (err
== -EINPROGRESS
)
279 static int esp_input_done2(struct sk_buff
*skb
, int err
)
281 const struct iphdr
*iph
;
282 struct xfrm_state
*x
= xfrm_input_state(skb
);
283 struct esp_data
*esp
= x
->data
;
284 struct crypto_aead
*aead
= esp
->aead
;
285 int alen
= crypto_aead_authsize(aead
);
286 int hlen
= sizeof(struct ip_esp_hdr
) + crypto_aead_ivsize(aead
);
287 int elen
= skb
->len
- hlen
;
292 kfree(ESP_SKB_CB(skb
)->tmp
);
297 if (skb_copy_bits(skb
, skb
->len
-alen
-2, nexthdr
, 2))
302 if (padlen
+ 2 + alen
>= elen
)
305 /* ... check padding bits here. Silly. :-) */
311 struct xfrm_encap_tmpl
*encap
= x
->encap
;
312 struct udphdr
*uh
= (void *)(skb_network_header(skb
) + ihl
);
315 * 1) if the NAT-T peer's IP or port changed then
316 * advertize the change to the keying daemon.
317 * This is an inbound SA, so just compare
320 if (iph
->saddr
!= x
->props
.saddr
.a4
||
321 uh
->source
!= encap
->encap_sport
) {
322 xfrm_address_t ipaddr
;
324 ipaddr
.a4
= iph
->saddr
;
325 km_new_mapping(x
, &ipaddr
, uh
->source
);
327 /* XXX: perhaps add an extra
328 * policy check here, to see
329 * if we should allow or
330 * reject a packet from a
337 * 2) ignore UDP/TCP checksums in case
338 * of NAT-T in Transport Mode, or
339 * perform other post-processing fixes
340 * as per draft-ietf-ipsec-udp-encaps-06,
343 if (x
->props
.mode
== XFRM_MODE_TRANSPORT
)
344 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
347 pskb_trim(skb
, skb
->len
- alen
- padlen
- 2);
348 __skb_pull(skb
, hlen
);
349 if (x
->props
.mode
== XFRM_MODE_TUNNEL
)
350 skb_reset_transport_header(skb
);
352 skb_set_transport_header(skb
, -ihl
);
356 /* RFC4303: Drop dummy packets without any error */
357 if (err
== IPPROTO_NONE
)
364 static void esp_input_done(struct crypto_async_request
*base
, int err
)
366 struct sk_buff
*skb
= base
->data
;
368 xfrm_input_resume(skb
, esp_input_done2(skb
, err
));
372 * Note: detecting truncated vs. non-truncated authentication data is very
373 * expensive, so we only support truncated data, which is the recommended
376 static int esp_input(struct xfrm_state
*x
, struct sk_buff
*skb
)
378 struct ip_esp_hdr
*esph
;
379 struct esp_data
*esp
= x
->data
;
380 struct crypto_aead
*aead
= esp
->aead
;
381 struct aead_request
*req
;
382 struct sk_buff
*trailer
;
383 int elen
= skb
->len
- sizeof(*esph
) - crypto_aead_ivsize(aead
);
391 struct scatterlist
*sg
;
392 struct scatterlist
*asg
;
395 if (!pskb_may_pull(skb
, sizeof(*esph
) + crypto_aead_ivsize(aead
)))
401 if ((err
= skb_cow_data(skb
, 0, &trailer
)) < 0)
405 assoclen
= sizeof(*esph
);
409 if (x
->props
.flags
& XFRM_STATE_ESN
) {
411 seqhilen
+= sizeof(__be32
);
412 assoclen
+= seqhilen
;
416 tmp
= esp_alloc_tmp(aead
, nfrags
+ sglists
, seqhilen
);
420 ESP_SKB_CB(skb
)->tmp
= tmp
;
421 seqhi
= esp_tmp_seqhi(tmp
);
422 iv
= esp_tmp_iv(aead
, tmp
, seqhilen
);
423 req
= esp_tmp_req(aead
, iv
);
424 asg
= esp_req_sg(aead
, req
);
427 skb
->ip_summed
= CHECKSUM_NONE
;
429 esph
= (struct ip_esp_hdr
*)skb
->data
;
431 /* Get ivec. This can be wrong, check against another impls. */
434 sg_init_table(sg
, nfrags
);
435 skb_to_sgvec(skb
, sg
, sizeof(*esph
) + crypto_aead_ivsize(aead
), elen
);
437 if ((x
->props
.flags
& XFRM_STATE_ESN
)) {
438 sg_init_table(asg
, 3);
439 sg_set_buf(asg
, &esph
->spi
, sizeof(__be32
));
440 *seqhi
= XFRM_SKB_CB(skb
)->seq
.input
.hi
;
441 sg_set_buf(asg
+ 1, seqhi
, seqhilen
);
442 sg_set_buf(asg
+ 2, &esph
->seq_no
, sizeof(__be32
));
444 sg_init_one(asg
, esph
, sizeof(*esph
));
446 aead_request_set_callback(req
, 0, esp_input_done
, skb
);
447 aead_request_set_crypt(req
, sg
, sg
, elen
, iv
);
448 aead_request_set_assoc(req
, asg
, assoclen
);
450 err
= crypto_aead_decrypt(req
);
451 if (err
== -EINPROGRESS
)
454 err
= esp_input_done2(skb
, err
);
460 static u32
esp4_get_mtu(struct xfrm_state
*x
, int mtu
)
462 struct esp_data
*esp
= x
->data
;
463 u32 blksize
= ALIGN(crypto_aead_blocksize(esp
->aead
), 4);
464 u32 align
= max_t(u32
, blksize
, esp
->padlen
);
465 unsigned int net_adj
;
467 switch (x
->props
.mode
) {
468 case XFRM_MODE_TRANSPORT
:
470 net_adj
= sizeof(struct iphdr
);
472 case XFRM_MODE_TUNNEL
:
479 return ((mtu
- x
->props
.header_len
- crypto_aead_authsize(esp
->aead
) -
480 net_adj
) & ~(align
- 1)) + (net_adj
- 2);
483 static void esp4_err(struct sk_buff
*skb
, u32 info
)
485 struct net
*net
= dev_net(skb
->dev
);
486 const struct iphdr
*iph
= (const struct iphdr
*)skb
->data
;
487 struct ip_esp_hdr
*esph
= (struct ip_esp_hdr
*)(skb
->data
+(iph
->ihl
<<2));
488 struct xfrm_state
*x
;
490 switch (icmp_hdr(skb
)->type
) {
491 case ICMP_DEST_UNREACH
:
492 if (icmp_hdr(skb
)->code
!= ICMP_FRAG_NEEDED
)
500 x
= xfrm_state_lookup(net
, skb
->mark
, (const xfrm_address_t
*)&iph
->daddr
,
501 esph
->spi
, IPPROTO_ESP
, AF_INET
);
505 if (icmp_hdr(skb
)->type
== ICMP_DEST_UNREACH
) {
506 atomic_inc(&flow_cache_genid
);
509 ipv4_update_pmtu(skb
, net
, info
, 0, 0, IPPROTO_ESP
, 0);
511 ipv4_redirect(skb
, net
, 0, 0, IPPROTO_ESP
, 0);
515 static void esp_destroy(struct xfrm_state
*x
)
517 struct esp_data
*esp
= x
->data
;
522 crypto_free_aead(esp
->aead
);
526 static int esp_init_aead(struct xfrm_state
*x
)
528 struct esp_data
*esp
= x
->data
;
529 struct crypto_aead
*aead
;
532 aead
= crypto_alloc_aead(x
->aead
->alg_name
, 0, 0);
539 err
= crypto_aead_setkey(aead
, x
->aead
->alg_key
,
540 (x
->aead
->alg_key_len
+ 7) / 8);
544 err
= crypto_aead_setauthsize(aead
, x
->aead
->alg_icv_len
/ 8);
552 static int esp_init_authenc(struct xfrm_state
*x
)
554 struct esp_data
*esp
= x
->data
;
555 struct crypto_aead
*aead
;
556 struct crypto_authenc_key_param
*param
;
560 char authenc_name
[CRYPTO_MAX_ALG_NAME
];
570 if ((x
->props
.flags
& XFRM_STATE_ESN
)) {
571 if (snprintf(authenc_name
, CRYPTO_MAX_ALG_NAME
,
573 x
->aalg
? x
->aalg
->alg_name
: "digest_null",
574 x
->ealg
->alg_name
) >= CRYPTO_MAX_ALG_NAME
)
577 if (snprintf(authenc_name
, CRYPTO_MAX_ALG_NAME
,
579 x
->aalg
? x
->aalg
->alg_name
: "digest_null",
580 x
->ealg
->alg_name
) >= CRYPTO_MAX_ALG_NAME
)
584 aead
= crypto_alloc_aead(authenc_name
, 0, 0);
591 keylen
= (x
->aalg
? (x
->aalg
->alg_key_len
+ 7) / 8 : 0) +
592 (x
->ealg
->alg_key_len
+ 7) / 8 + RTA_SPACE(sizeof(*param
));
594 key
= kmalloc(keylen
, GFP_KERNEL
);
600 rta
->rta_type
= CRYPTO_AUTHENC_KEYA_PARAM
;
601 rta
->rta_len
= RTA_LENGTH(sizeof(*param
));
602 param
= RTA_DATA(rta
);
603 p
+= RTA_SPACE(sizeof(*param
));
606 struct xfrm_algo_desc
*aalg_desc
;
608 memcpy(p
, x
->aalg
->alg_key
, (x
->aalg
->alg_key_len
+ 7) / 8);
609 p
+= (x
->aalg
->alg_key_len
+ 7) / 8;
611 aalg_desc
= xfrm_aalg_get_byname(x
->aalg
->alg_name
, 0);
615 if (aalg_desc
->uinfo
.auth
.icv_fullbits
/8 !=
616 crypto_aead_authsize(aead
)) {
617 NETDEBUG(KERN_INFO
"ESP: %s digestsize %u != %hu\n",
619 crypto_aead_authsize(aead
),
620 aalg_desc
->uinfo
.auth
.icv_fullbits
/8);
624 err
= crypto_aead_setauthsize(
625 aead
, x
->aalg
->alg_trunc_len
/ 8);
630 param
->enckeylen
= cpu_to_be32((x
->ealg
->alg_key_len
+ 7) / 8);
631 memcpy(p
, x
->ealg
->alg_key
, (x
->ealg
->alg_key_len
+ 7) / 8);
633 err
= crypto_aead_setkey(aead
, key
, keylen
);
642 static int esp_init_state(struct xfrm_state
*x
)
644 struct esp_data
*esp
;
645 struct crypto_aead
*aead
;
649 esp
= kzalloc(sizeof(*esp
), GFP_KERNEL
);
656 err
= esp_init_aead(x
);
658 err
= esp_init_authenc(x
);
667 x
->props
.header_len
= sizeof(struct ip_esp_hdr
) +
668 crypto_aead_ivsize(aead
);
669 if (x
->props
.mode
== XFRM_MODE_TUNNEL
)
670 x
->props
.header_len
+= sizeof(struct iphdr
);
671 else if (x
->props
.mode
== XFRM_MODE_BEET
&& x
->sel
.family
!= AF_INET6
)
672 x
->props
.header_len
+= IPV4_BEET_PHMAXLEN
;
674 struct xfrm_encap_tmpl
*encap
= x
->encap
;
676 switch (encap
->encap_type
) {
679 case UDP_ENCAP_ESPINUDP
:
680 x
->props
.header_len
+= sizeof(struct udphdr
);
682 case UDP_ENCAP_ESPINUDP_NON_IKE
:
683 x
->props
.header_len
+= sizeof(struct udphdr
) + 2 * sizeof(u32
);
688 align
= ALIGN(crypto_aead_blocksize(aead
), 4);
690 align
= max_t(u32
, align
, esp
->padlen
);
691 x
->props
.trailer_len
= align
+ 1 + crypto_aead_authsize(esp
->aead
);
697 static const struct xfrm_type esp_type
=
699 .description
= "ESP4",
700 .owner
= THIS_MODULE
,
701 .proto
= IPPROTO_ESP
,
702 .flags
= XFRM_TYPE_REPLAY_PROT
,
703 .init_state
= esp_init_state
,
704 .destructor
= esp_destroy
,
705 .get_mtu
= esp4_get_mtu
,
710 static const struct net_protocol esp4_protocol
= {
711 .handler
= xfrm4_rcv
,
712 .err_handler
= esp4_err
,
717 static int __init
esp4_init(void)
719 if (xfrm_register_type(&esp_type
, AF_INET
) < 0) {
720 pr_info("%s: can't add xfrm type\n", __func__
);
723 if (inet_add_protocol(&esp4_protocol
, IPPROTO_ESP
) < 0) {
724 pr_info("%s: can't add protocol\n", __func__
);
725 xfrm_unregister_type(&esp_type
, AF_INET
);
731 static void __exit
esp4_fini(void)
733 if (inet_del_protocol(&esp4_protocol
, IPPROTO_ESP
) < 0)
734 pr_info("%s: can't remove protocol\n", __func__
);
735 if (xfrm_unregister_type(&esp_type
, AF_INET
) < 0)
736 pr_info("%s: can't remove xfrm type\n", __func__
);
739 module_init(esp4_init
);
740 module_exit(esp4_fini
);
741 MODULE_LICENSE("GPL");
742 MODULE_ALIAS_XFRM_TYPE(AF_INET
, XFRM_PROTO_ESP
);