staging:iio: dummy driver additions to show shared_by_dir infomask usage
[linux/fpc-iii.git] / net / ipv4 / esp4.c
blobab3d814bc80af8f377da971af189a49ae4f2f094
1 #define pr_fmt(fmt) "IPsec: " fmt
3 #include <crypto/aead.h>
4 #include <crypto/authenc.h>
5 #include <linux/err.h>
6 #include <linux/module.h>
7 #include <net/ip.h>
8 #include <net/xfrm.h>
9 #include <net/esp.h>
10 #include <linux/scatterlist.h>
11 #include <linux/kernel.h>
12 #include <linux/pfkeyv2.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/in6.h>
17 #include <net/icmp.h>
18 #include <net/protocol.h>
19 #include <net/udp.h>
21 struct esp_skb_cb {
22 struct xfrm_skb_cb xfrm;
23 void *tmp;
26 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
28 static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
31 * Allocate an AEAD request structure with extra space for SG and IV.
33 * For alignment considerations the IV is placed at the front, followed
34 * by the request and finally the SG list.
36 * TODO: Use spare space in skb for this where possible.
38 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
40 unsigned int len;
42 len = seqhilen;
44 len += crypto_aead_ivsize(aead);
46 if (len) {
47 len += crypto_aead_alignmask(aead) &
48 ~(crypto_tfm_ctx_alignment() - 1);
49 len = ALIGN(len, crypto_tfm_ctx_alignment());
52 len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead);
53 len = ALIGN(len, __alignof__(struct scatterlist));
55 len += sizeof(struct scatterlist) * nfrags;
57 return kmalloc(len, GFP_ATOMIC);
60 static inline __be32 *esp_tmp_seqhi(void *tmp)
62 return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
64 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
66 return crypto_aead_ivsize(aead) ?
67 PTR_ALIGN((u8 *)tmp + seqhilen,
68 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
71 static inline struct aead_givcrypt_request *esp_tmp_givreq(
72 struct crypto_aead *aead, u8 *iv)
74 struct aead_givcrypt_request *req;
76 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
77 crypto_tfm_ctx_alignment());
78 aead_givcrypt_set_tfm(req, aead);
79 return req;
82 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
84 struct aead_request *req;
86 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
87 crypto_tfm_ctx_alignment());
88 aead_request_set_tfm(req, aead);
89 return req;
92 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
93 struct aead_request *req)
95 return (void *)ALIGN((unsigned long)(req + 1) +
96 crypto_aead_reqsize(aead),
97 __alignof__(struct scatterlist));
100 static inline struct scatterlist *esp_givreq_sg(
101 struct crypto_aead *aead, struct aead_givcrypt_request *req)
103 return (void *)ALIGN((unsigned long)(req + 1) +
104 crypto_aead_reqsize(aead),
105 __alignof__(struct scatterlist));
108 static void esp_output_done(struct crypto_async_request *base, int err)
110 struct sk_buff *skb = base->data;
112 kfree(ESP_SKB_CB(skb)->tmp);
113 xfrm_output_resume(skb, err);
116 static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
118 int err;
119 struct ip_esp_hdr *esph;
120 struct crypto_aead *aead;
121 struct aead_givcrypt_request *req;
122 struct scatterlist *sg;
123 struct scatterlist *asg;
124 struct esp_data *esp;
125 struct sk_buff *trailer;
126 void *tmp;
127 u8 *iv;
128 u8 *tail;
129 int blksize;
130 int clen;
131 int alen;
132 int plen;
133 int tfclen;
134 int nfrags;
135 int assoclen;
136 int sglists;
137 int seqhilen;
138 __be32 *seqhi;
140 /* skb is pure payload to encrypt */
142 esp = x->data;
143 aead = esp->aead;
144 alen = crypto_aead_authsize(aead);
146 tfclen = 0;
147 if (x->tfcpad) {
148 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
149 u32 padto;
151 padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached));
152 if (skb->len < padto)
153 tfclen = padto - skb->len;
155 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
156 clen = ALIGN(skb->len + 2 + tfclen, blksize);
157 if (esp->padlen)
158 clen = ALIGN(clen, esp->padlen);
159 plen = clen - skb->len - tfclen;
161 err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
162 if (err < 0)
163 goto error;
164 nfrags = err;
166 assoclen = sizeof(*esph);
167 sglists = 1;
168 seqhilen = 0;
170 if (x->props.flags & XFRM_STATE_ESN) {
171 sglists += 2;
172 seqhilen += sizeof(__be32);
173 assoclen += seqhilen;
176 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
177 if (!tmp) {
178 err = -ENOMEM;
179 goto error;
182 seqhi = esp_tmp_seqhi(tmp);
183 iv = esp_tmp_iv(aead, tmp, seqhilen);
184 req = esp_tmp_givreq(aead, iv);
185 asg = esp_givreq_sg(aead, req);
186 sg = asg + sglists;
188 /* Fill padding... */
189 tail = skb_tail_pointer(trailer);
190 if (tfclen) {
191 memset(tail, 0, tfclen);
192 tail += tfclen;
194 do {
195 int i;
196 for (i = 0; i < plen - 2; i++)
197 tail[i] = i + 1;
198 } while (0);
199 tail[plen - 2] = plen - 2;
200 tail[plen - 1] = *skb_mac_header(skb);
201 pskb_put(skb, trailer, clen - skb->len + alen);
203 skb_push(skb, -skb_network_offset(skb));
204 esph = ip_esp_hdr(skb);
205 *skb_mac_header(skb) = IPPROTO_ESP;
207 /* this is non-NULL only with UDP Encapsulation */
208 if (x->encap) {
209 struct xfrm_encap_tmpl *encap = x->encap;
210 struct udphdr *uh;
211 __be32 *udpdata32;
212 __be16 sport, dport;
213 int encap_type;
215 spin_lock_bh(&x->lock);
216 sport = encap->encap_sport;
217 dport = encap->encap_dport;
218 encap_type = encap->encap_type;
219 spin_unlock_bh(&x->lock);
221 uh = (struct udphdr *)esph;
222 uh->source = sport;
223 uh->dest = dport;
224 uh->len = htons(skb->len - skb_transport_offset(skb));
225 uh->check = 0;
227 switch (encap_type) {
228 default:
229 case UDP_ENCAP_ESPINUDP:
230 esph = (struct ip_esp_hdr *)(uh + 1);
231 break;
232 case UDP_ENCAP_ESPINUDP_NON_IKE:
233 udpdata32 = (__be32 *)(uh + 1);
234 udpdata32[0] = udpdata32[1] = 0;
235 esph = (struct ip_esp_hdr *)(udpdata32 + 2);
236 break;
239 *skb_mac_header(skb) = IPPROTO_UDP;
242 esph->spi = x->id.spi;
243 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
245 sg_init_table(sg, nfrags);
246 skb_to_sgvec(skb, sg,
247 esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
248 clen + alen);
250 if ((x->props.flags & XFRM_STATE_ESN)) {
251 sg_init_table(asg, 3);
252 sg_set_buf(asg, &esph->spi, sizeof(__be32));
253 *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
254 sg_set_buf(asg + 1, seqhi, seqhilen);
255 sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
256 } else
257 sg_init_one(asg, esph, sizeof(*esph));
259 aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
260 aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
261 aead_givcrypt_set_assoc(req, asg, assoclen);
262 aead_givcrypt_set_giv(req, esph->enc_data,
263 XFRM_SKB_CB(skb)->seq.output.low);
265 ESP_SKB_CB(skb)->tmp = tmp;
266 err = crypto_aead_givencrypt(req);
267 if (err == -EINPROGRESS)
268 goto error;
270 if (err == -EBUSY)
271 err = NET_XMIT_DROP;
273 kfree(tmp);
275 error:
276 return err;
279 static int esp_input_done2(struct sk_buff *skb, int err)
281 const struct iphdr *iph;
282 struct xfrm_state *x = xfrm_input_state(skb);
283 struct esp_data *esp = x->data;
284 struct crypto_aead *aead = esp->aead;
285 int alen = crypto_aead_authsize(aead);
286 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
287 int elen = skb->len - hlen;
288 int ihl;
289 u8 nexthdr[2];
290 int padlen;
292 kfree(ESP_SKB_CB(skb)->tmp);
294 if (unlikely(err))
295 goto out;
297 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
298 BUG();
300 err = -EINVAL;
301 padlen = nexthdr[0];
302 if (padlen + 2 + alen >= elen)
303 goto out;
305 /* ... check padding bits here. Silly. :-) */
307 iph = ip_hdr(skb);
308 ihl = iph->ihl * 4;
310 if (x->encap) {
311 struct xfrm_encap_tmpl *encap = x->encap;
312 struct udphdr *uh = (void *)(skb_network_header(skb) + ihl);
315 * 1) if the NAT-T peer's IP or port changed then
316 * advertize the change to the keying daemon.
317 * This is an inbound SA, so just compare
318 * SRC ports.
320 if (iph->saddr != x->props.saddr.a4 ||
321 uh->source != encap->encap_sport) {
322 xfrm_address_t ipaddr;
324 ipaddr.a4 = iph->saddr;
325 km_new_mapping(x, &ipaddr, uh->source);
327 /* XXX: perhaps add an extra
328 * policy check here, to see
329 * if we should allow or
330 * reject a packet from a
331 * different source
332 * address/port.
337 * 2) ignore UDP/TCP checksums in case
338 * of NAT-T in Transport Mode, or
339 * perform other post-processing fixes
340 * as per draft-ietf-ipsec-udp-encaps-06,
341 * section 3.1.2
343 if (x->props.mode == XFRM_MODE_TRANSPORT)
344 skb->ip_summed = CHECKSUM_UNNECESSARY;
347 pskb_trim(skb, skb->len - alen - padlen - 2);
348 __skb_pull(skb, hlen);
349 if (x->props.mode == XFRM_MODE_TUNNEL)
350 skb_reset_transport_header(skb);
351 else
352 skb_set_transport_header(skb, -ihl);
354 err = nexthdr[1];
356 /* RFC4303: Drop dummy packets without any error */
357 if (err == IPPROTO_NONE)
358 err = -EINVAL;
360 out:
361 return err;
364 static void esp_input_done(struct crypto_async_request *base, int err)
366 struct sk_buff *skb = base->data;
368 xfrm_input_resume(skb, esp_input_done2(skb, err));
372 * Note: detecting truncated vs. non-truncated authentication data is very
373 * expensive, so we only support truncated data, which is the recommended
374 * and common case.
376 static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
378 struct ip_esp_hdr *esph;
379 struct esp_data *esp = x->data;
380 struct crypto_aead *aead = esp->aead;
381 struct aead_request *req;
382 struct sk_buff *trailer;
383 int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
384 int nfrags;
385 int assoclen;
386 int sglists;
387 int seqhilen;
388 __be32 *seqhi;
389 void *tmp;
390 u8 *iv;
391 struct scatterlist *sg;
392 struct scatterlist *asg;
393 int err = -EINVAL;
395 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
396 goto out;
398 if (elen <= 0)
399 goto out;
401 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
402 goto out;
403 nfrags = err;
405 assoclen = sizeof(*esph);
406 sglists = 1;
407 seqhilen = 0;
409 if (x->props.flags & XFRM_STATE_ESN) {
410 sglists += 2;
411 seqhilen += sizeof(__be32);
412 assoclen += seqhilen;
415 err = -ENOMEM;
416 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
417 if (!tmp)
418 goto out;
420 ESP_SKB_CB(skb)->tmp = tmp;
421 seqhi = esp_tmp_seqhi(tmp);
422 iv = esp_tmp_iv(aead, tmp, seqhilen);
423 req = esp_tmp_req(aead, iv);
424 asg = esp_req_sg(aead, req);
425 sg = asg + sglists;
427 skb->ip_summed = CHECKSUM_NONE;
429 esph = (struct ip_esp_hdr *)skb->data;
431 /* Get ivec. This can be wrong, check against another impls. */
432 iv = esph->enc_data;
434 sg_init_table(sg, nfrags);
435 skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
437 if ((x->props.flags & XFRM_STATE_ESN)) {
438 sg_init_table(asg, 3);
439 sg_set_buf(asg, &esph->spi, sizeof(__be32));
440 *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
441 sg_set_buf(asg + 1, seqhi, seqhilen);
442 sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
443 } else
444 sg_init_one(asg, esph, sizeof(*esph));
446 aead_request_set_callback(req, 0, esp_input_done, skb);
447 aead_request_set_crypt(req, sg, sg, elen, iv);
448 aead_request_set_assoc(req, asg, assoclen);
450 err = crypto_aead_decrypt(req);
451 if (err == -EINPROGRESS)
452 goto out;
454 err = esp_input_done2(skb, err);
456 out:
457 return err;
460 static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
462 struct esp_data *esp = x->data;
463 u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
464 u32 align = max_t(u32, blksize, esp->padlen);
465 unsigned int net_adj;
467 switch (x->props.mode) {
468 case XFRM_MODE_TRANSPORT:
469 case XFRM_MODE_BEET:
470 net_adj = sizeof(struct iphdr);
471 break;
472 case XFRM_MODE_TUNNEL:
473 net_adj = 0;
474 break;
475 default:
476 BUG();
479 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
480 net_adj) & ~(align - 1)) + (net_adj - 2);
483 static void esp4_err(struct sk_buff *skb, u32 info)
485 struct net *net = dev_net(skb->dev);
486 const struct iphdr *iph = (const struct iphdr *)skb->data;
487 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
488 struct xfrm_state *x;
490 switch (icmp_hdr(skb)->type) {
491 case ICMP_DEST_UNREACH:
492 if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
493 return;
494 case ICMP_REDIRECT:
495 break;
496 default:
497 return;
500 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
501 esph->spi, IPPROTO_ESP, AF_INET);
502 if (!x)
503 return;
505 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
506 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
507 else
508 ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0);
509 xfrm_state_put(x);
512 static void esp_destroy(struct xfrm_state *x)
514 struct esp_data *esp = x->data;
516 if (!esp)
517 return;
519 crypto_free_aead(esp->aead);
520 kfree(esp);
523 static int esp_init_aead(struct xfrm_state *x)
525 struct esp_data *esp = x->data;
526 struct crypto_aead *aead;
527 int err;
529 aead = crypto_alloc_aead(x->aead->alg_name, 0, 0);
530 err = PTR_ERR(aead);
531 if (IS_ERR(aead))
532 goto error;
534 esp->aead = aead;
536 err = crypto_aead_setkey(aead, x->aead->alg_key,
537 (x->aead->alg_key_len + 7) / 8);
538 if (err)
539 goto error;
541 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
542 if (err)
543 goto error;
545 error:
546 return err;
549 static int esp_init_authenc(struct xfrm_state *x)
551 struct esp_data *esp = x->data;
552 struct crypto_aead *aead;
553 struct crypto_authenc_key_param *param;
554 struct rtattr *rta;
555 char *key;
556 char *p;
557 char authenc_name[CRYPTO_MAX_ALG_NAME];
558 unsigned int keylen;
559 int err;
561 err = -EINVAL;
562 if (x->ealg == NULL)
563 goto error;
565 err = -ENAMETOOLONG;
567 if ((x->props.flags & XFRM_STATE_ESN)) {
568 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
569 "authencesn(%s,%s)",
570 x->aalg ? x->aalg->alg_name : "digest_null",
571 x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
572 goto error;
573 } else {
574 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
575 "authenc(%s,%s)",
576 x->aalg ? x->aalg->alg_name : "digest_null",
577 x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
578 goto error;
581 aead = crypto_alloc_aead(authenc_name, 0, 0);
582 err = PTR_ERR(aead);
583 if (IS_ERR(aead))
584 goto error;
586 esp->aead = aead;
588 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
589 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
590 err = -ENOMEM;
591 key = kmalloc(keylen, GFP_KERNEL);
592 if (!key)
593 goto error;
595 p = key;
596 rta = (void *)p;
597 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
598 rta->rta_len = RTA_LENGTH(sizeof(*param));
599 param = RTA_DATA(rta);
600 p += RTA_SPACE(sizeof(*param));
602 if (x->aalg) {
603 struct xfrm_algo_desc *aalg_desc;
605 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
606 p += (x->aalg->alg_key_len + 7) / 8;
608 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
609 BUG_ON(!aalg_desc);
611 err = -EINVAL;
612 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
613 crypto_aead_authsize(aead)) {
614 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
615 x->aalg->alg_name,
616 crypto_aead_authsize(aead),
617 aalg_desc->uinfo.auth.icv_fullbits/8);
618 goto free_key;
621 err = crypto_aead_setauthsize(
622 aead, x->aalg->alg_trunc_len / 8);
623 if (err)
624 goto free_key;
627 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
628 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
630 err = crypto_aead_setkey(aead, key, keylen);
632 free_key:
633 kfree(key);
635 error:
636 return err;
639 static int esp_init_state(struct xfrm_state *x)
641 struct esp_data *esp;
642 struct crypto_aead *aead;
643 u32 align;
644 int err;
646 esp = kzalloc(sizeof(*esp), GFP_KERNEL);
647 if (esp == NULL)
648 return -ENOMEM;
650 x->data = esp;
652 if (x->aead)
653 err = esp_init_aead(x);
654 else
655 err = esp_init_authenc(x);
657 if (err)
658 goto error;
660 aead = esp->aead;
662 esp->padlen = 0;
664 x->props.header_len = sizeof(struct ip_esp_hdr) +
665 crypto_aead_ivsize(aead);
666 if (x->props.mode == XFRM_MODE_TUNNEL)
667 x->props.header_len += sizeof(struct iphdr);
668 else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6)
669 x->props.header_len += IPV4_BEET_PHMAXLEN;
670 if (x->encap) {
671 struct xfrm_encap_tmpl *encap = x->encap;
673 switch (encap->encap_type) {
674 default:
675 goto error;
676 case UDP_ENCAP_ESPINUDP:
677 x->props.header_len += sizeof(struct udphdr);
678 break;
679 case UDP_ENCAP_ESPINUDP_NON_IKE:
680 x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
681 break;
685 align = ALIGN(crypto_aead_blocksize(aead), 4);
686 if (esp->padlen)
687 align = max_t(u32, align, esp->padlen);
688 x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead);
690 error:
691 return err;
694 static const struct xfrm_type esp_type =
696 .description = "ESP4",
697 .owner = THIS_MODULE,
698 .proto = IPPROTO_ESP,
699 .flags = XFRM_TYPE_REPLAY_PROT,
700 .init_state = esp_init_state,
701 .destructor = esp_destroy,
702 .get_mtu = esp4_get_mtu,
703 .input = esp_input,
704 .output = esp_output
707 static const struct net_protocol esp4_protocol = {
708 .handler = xfrm4_rcv,
709 .err_handler = esp4_err,
710 .no_policy = 1,
711 .netns_ok = 1,
714 static int __init esp4_init(void)
716 if (xfrm_register_type(&esp_type, AF_INET) < 0) {
717 pr_info("%s: can't add xfrm type\n", __func__);
718 return -EAGAIN;
720 if (inet_add_protocol(&esp4_protocol, IPPROTO_ESP) < 0) {
721 pr_info("%s: can't add protocol\n", __func__);
722 xfrm_unregister_type(&esp_type, AF_INET);
723 return -EAGAIN;
725 return 0;
728 static void __exit esp4_fini(void)
730 if (inet_del_protocol(&esp4_protocol, IPPROTO_ESP) < 0)
731 pr_info("%s: can't remove protocol\n", __func__);
732 if (xfrm_unregister_type(&esp_type, AF_INET) < 0)
733 pr_info("%s: can't remove xfrm type\n", __func__);
736 module_init(esp4_init);
737 module_exit(esp4_fini);
738 MODULE_LICENSE("GPL");
739 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);