ASoC: intel: Replace kthread with work
[linux/fpc-iii.git] / net / ipv6 / esp6.c
blob060a60b2f8a6db074167e389b56893337c887fe9
1 /*
2 * Copyright (C)2002 USAGI/WIDE Project
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 * Authors
19 * Mitsuru KANDA @USAGI : IPv6 Support
20 * Kazunori MIYAZAWA @USAGI :
21 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
23 * This file is derived from net/ipv4/esp.c
26 #define pr_fmt(fmt) "IPv6: " fmt
28 #include <crypto/aead.h>
29 #include <crypto/authenc.h>
30 #include <linux/err.h>
31 #include <linux/module.h>
32 #include <net/ip.h>
33 #include <net/xfrm.h>
34 #include <net/esp.h>
35 #include <linux/scatterlist.h>
36 #include <linux/kernel.h>
37 #include <linux/pfkeyv2.h>
38 #include <linux/random.h>
39 #include <linux/slab.h>
40 #include <linux/spinlock.h>
41 #include <net/ip6_route.h>
42 #include <net/icmp.h>
43 #include <net/ipv6.h>
44 #include <net/protocol.h>
45 #include <linux/icmpv6.h>
47 struct esp_skb_cb {
48 struct xfrm_skb_cb xfrm;
49 void *tmp;
52 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
54 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu);
57 * Allocate an AEAD request structure with extra space for SG and IV.
59 * For alignment considerations the upper 32 bits of the sequence number are
60 * placed at the front, if present. Followed by the IV, the request and finally
61 * the SG list.
63 * TODO: Use spare space in skb for this where possible.
65 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
67 unsigned int len;
69 len = seqihlen;
71 len += crypto_aead_ivsize(aead);
73 if (len) {
74 len += crypto_aead_alignmask(aead) &
75 ~(crypto_tfm_ctx_alignment() - 1);
76 len = ALIGN(len, crypto_tfm_ctx_alignment());
79 len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
80 len = ALIGN(len, __alignof__(struct scatterlist));
82 len += sizeof(struct scatterlist) * nfrags;
84 return kmalloc(len, GFP_ATOMIC);
87 static inline __be32 *esp_tmp_seqhi(void *tmp)
89 return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
92 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
94 return crypto_aead_ivsize(aead) ?
95 PTR_ALIGN((u8 *)tmp + seqhilen,
96 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
99 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
101 struct aead_request *req;
103 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
104 crypto_tfm_ctx_alignment());
105 aead_request_set_tfm(req, aead);
106 return req;
109 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
110 struct aead_request *req)
112 return (void *)ALIGN((unsigned long)(req + 1) +
113 crypto_aead_reqsize(aead),
114 __alignof__(struct scatterlist));
117 static void esp_output_done(struct crypto_async_request *base, int err)
119 struct sk_buff *skb = base->data;
121 kfree(ESP_SKB_CB(skb)->tmp);
122 xfrm_output_resume(skb, err);
125 /* Move ESP header back into place. */
126 static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
128 struct ip_esp_hdr *esph = (void *)(skb->data + offset);
129 void *tmp = ESP_SKB_CB(skb)->tmp;
130 __be32 *seqhi = esp_tmp_seqhi(tmp);
132 esph->seq_no = esph->spi;
133 esph->spi = *seqhi;
136 static void esp_output_restore_header(struct sk_buff *skb)
138 esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
141 static void esp_output_done_esn(struct crypto_async_request *base, int err)
143 struct sk_buff *skb = base->data;
145 esp_output_restore_header(skb);
146 esp_output_done(base, err);
149 static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
151 int err;
152 struct ip_esp_hdr *esph;
153 struct crypto_aead *aead;
154 struct aead_request *req;
155 struct scatterlist *sg;
156 struct sk_buff *trailer;
157 void *tmp;
158 int blksize;
159 int clen;
160 int alen;
161 int plen;
162 int ivlen;
163 int tfclen;
164 int nfrags;
165 int assoclen;
166 int seqhilen;
167 u8 *iv;
168 u8 *tail;
169 __be32 *seqhi;
170 __be64 seqno;
172 /* skb is pure payload to encrypt */
173 aead = x->data;
174 alen = crypto_aead_authsize(aead);
175 ivlen = crypto_aead_ivsize(aead);
177 tfclen = 0;
178 if (x->tfcpad) {
179 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
180 u32 padto;
182 padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached));
183 if (skb->len < padto)
184 tfclen = padto - skb->len;
186 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
187 clen = ALIGN(skb->len + 2 + tfclen, blksize);
188 plen = clen - skb->len - tfclen;
190 err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
191 if (err < 0)
192 goto error;
193 nfrags = err;
195 assoclen = sizeof(*esph);
196 seqhilen = 0;
198 if (x->props.flags & XFRM_STATE_ESN) {
199 seqhilen += sizeof(__be32);
200 assoclen += seqhilen;
203 tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
204 if (!tmp) {
205 err = -ENOMEM;
206 goto error;
209 seqhi = esp_tmp_seqhi(tmp);
210 iv = esp_tmp_iv(aead, tmp, seqhilen);
211 req = esp_tmp_req(aead, iv);
212 sg = esp_req_sg(aead, req);
214 /* Fill padding... */
215 tail = skb_tail_pointer(trailer);
216 if (tfclen) {
217 memset(tail, 0, tfclen);
218 tail += tfclen;
220 do {
221 int i;
222 for (i = 0; i < plen - 2; i++)
223 tail[i] = i + 1;
224 } while (0);
225 tail[plen - 2] = plen - 2;
226 tail[plen - 1] = *skb_mac_header(skb);
227 pskb_put(skb, trailer, clen - skb->len + alen);
229 skb_push(skb, -skb_network_offset(skb));
230 esph = ip_esp_hdr(skb);
231 *skb_mac_header(skb) = IPPROTO_ESP;
233 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
235 aead_request_set_callback(req, 0, esp_output_done, skb);
237 /* For ESN we move the header forward by 4 bytes to
238 * accomodate the high bits. We will move it back after
239 * encryption.
241 if ((x->props.flags & XFRM_STATE_ESN)) {
242 esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
243 *seqhi = esph->spi;
244 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
245 aead_request_set_callback(req, 0, esp_output_done_esn, skb);
248 esph->spi = x->id.spi;
250 sg_init_table(sg, nfrags);
251 skb_to_sgvec(skb, sg,
252 (unsigned char *)esph - skb->data,
253 assoclen + ivlen + clen + alen);
255 aead_request_set_crypt(req, sg, sg, ivlen + clen, iv);
256 aead_request_set_ad(req, assoclen);
258 seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
259 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
261 memset(iv, 0, ivlen);
262 memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&seqno + 8 - min(ivlen, 8),
263 min(ivlen, 8));
265 ESP_SKB_CB(skb)->tmp = tmp;
266 err = crypto_aead_encrypt(req);
268 switch (err) {
269 case -EINPROGRESS:
270 goto error;
272 case -EBUSY:
273 err = NET_XMIT_DROP;
274 break;
276 case 0:
277 if ((x->props.flags & XFRM_STATE_ESN))
278 esp_output_restore_header(skb);
281 kfree(tmp);
283 error:
284 return err;
287 static int esp_input_done2(struct sk_buff *skb, int err)
289 struct xfrm_state *x = xfrm_input_state(skb);
290 struct crypto_aead *aead = x->data;
291 int alen = crypto_aead_authsize(aead);
292 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
293 int elen = skb->len - hlen;
294 int hdr_len = skb_network_header_len(skb);
295 int padlen;
296 u8 nexthdr[2];
298 kfree(ESP_SKB_CB(skb)->tmp);
300 if (unlikely(err))
301 goto out;
303 if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
304 BUG();
306 err = -EINVAL;
307 padlen = nexthdr[0];
308 if (padlen + 2 + alen >= elen) {
309 net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
310 padlen + 2, elen - alen);
311 goto out;
314 /* ... check padding bits here. Silly. :-) */
316 pskb_trim(skb, skb->len - alen - padlen - 2);
317 __skb_pull(skb, hlen);
318 if (x->props.mode == XFRM_MODE_TUNNEL)
319 skb_reset_transport_header(skb);
320 else
321 skb_set_transport_header(skb, -hdr_len);
323 err = nexthdr[1];
325 /* RFC4303: Drop dummy packets without any error */
326 if (err == IPPROTO_NONE)
327 err = -EINVAL;
329 out:
330 return err;
333 static void esp_input_done(struct crypto_async_request *base, int err)
335 struct sk_buff *skb = base->data;
337 xfrm_input_resume(skb, esp_input_done2(skb, err));
340 static void esp_input_restore_header(struct sk_buff *skb)
342 esp_restore_header(skb, 0);
343 __skb_pull(skb, 4);
346 static void esp_input_done_esn(struct crypto_async_request *base, int err)
348 struct sk_buff *skb = base->data;
350 esp_input_restore_header(skb);
351 esp_input_done(base, err);
354 static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
356 struct ip_esp_hdr *esph;
357 struct crypto_aead *aead = x->data;
358 struct aead_request *req;
359 struct sk_buff *trailer;
360 int ivlen = crypto_aead_ivsize(aead);
361 int elen = skb->len - sizeof(*esph) - ivlen;
362 int nfrags;
363 int assoclen;
364 int seqhilen;
365 int ret = 0;
366 void *tmp;
367 __be32 *seqhi;
368 u8 *iv;
369 struct scatterlist *sg;
371 if (!pskb_may_pull(skb, sizeof(*esph) + ivlen)) {
372 ret = -EINVAL;
373 goto out;
376 if (elen <= 0) {
377 ret = -EINVAL;
378 goto out;
381 nfrags = skb_cow_data(skb, 0, &trailer);
382 if (nfrags < 0) {
383 ret = -EINVAL;
384 goto out;
387 ret = -ENOMEM;
389 assoclen = sizeof(*esph);
390 seqhilen = 0;
392 if (x->props.flags & XFRM_STATE_ESN) {
393 seqhilen += sizeof(__be32);
394 assoclen += seqhilen;
397 tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
398 if (!tmp)
399 goto out;
401 ESP_SKB_CB(skb)->tmp = tmp;
402 seqhi = esp_tmp_seqhi(tmp);
403 iv = esp_tmp_iv(aead, tmp, seqhilen);
404 req = esp_tmp_req(aead, iv);
405 sg = esp_req_sg(aead, req);
407 skb->ip_summed = CHECKSUM_NONE;
409 esph = (struct ip_esp_hdr *)skb->data;
411 aead_request_set_callback(req, 0, esp_input_done, skb);
413 /* For ESN we move the header forward by 4 bytes to
414 * accomodate the high bits. We will move it back after
415 * decryption.
417 if ((x->props.flags & XFRM_STATE_ESN)) {
418 esph = (void *)skb_push(skb, 4);
419 *seqhi = esph->spi;
420 esph->spi = esph->seq_no;
421 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi);
422 aead_request_set_callback(req, 0, esp_input_done_esn, skb);
425 sg_init_table(sg, nfrags);
426 skb_to_sgvec(skb, sg, 0, skb->len);
428 aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
429 aead_request_set_ad(req, assoclen);
431 ret = crypto_aead_decrypt(req);
432 if (ret == -EINPROGRESS)
433 goto out;
435 if ((x->props.flags & XFRM_STATE_ESN))
436 esp_input_restore_header(skb);
438 ret = esp_input_done2(skb, ret);
440 out:
441 return ret;
444 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
446 struct crypto_aead *aead = x->data;
447 u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
448 unsigned int net_adj;
450 if (x->props.mode != XFRM_MODE_TUNNEL)
451 net_adj = sizeof(struct ipv6hdr);
452 else
453 net_adj = 0;
455 return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
456 net_adj) & ~(blksize - 1)) + net_adj - 2;
459 static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
460 u8 type, u8 code, int offset, __be32 info)
462 struct net *net = dev_net(skb->dev);
463 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
464 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
465 struct xfrm_state *x;
467 if (type != ICMPV6_PKT_TOOBIG &&
468 type != NDISC_REDIRECT)
469 return 0;
471 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
472 esph->spi, IPPROTO_ESP, AF_INET6);
473 if (!x)
474 return 0;
476 if (type == NDISC_REDIRECT)
477 ip6_redirect(skb, net, skb->dev->ifindex, 0);
478 else
479 ip6_update_pmtu(skb, net, info, 0, 0);
480 xfrm_state_put(x);
482 return 0;
485 static void esp6_destroy(struct xfrm_state *x)
487 struct crypto_aead *aead = x->data;
489 if (!aead)
490 return;
492 crypto_free_aead(aead);
495 static int esp_init_aead(struct xfrm_state *x)
497 char aead_name[CRYPTO_MAX_ALG_NAME];
498 struct crypto_aead *aead;
499 int err;
501 err = -ENAMETOOLONG;
502 if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
503 x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
504 goto error;
506 aead = crypto_alloc_aead(aead_name, 0, 0);
507 err = PTR_ERR(aead);
508 if (IS_ERR(aead))
509 goto error;
511 x->data = aead;
513 err = crypto_aead_setkey(aead, x->aead->alg_key,
514 (x->aead->alg_key_len + 7) / 8);
515 if (err)
516 goto error;
518 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
519 if (err)
520 goto error;
522 error:
523 return err;
526 static int esp_init_authenc(struct xfrm_state *x)
528 struct crypto_aead *aead;
529 struct crypto_authenc_key_param *param;
530 struct rtattr *rta;
531 char *key;
532 char *p;
533 char authenc_name[CRYPTO_MAX_ALG_NAME];
534 unsigned int keylen;
535 int err;
537 err = -EINVAL;
538 if (!x->ealg)
539 goto error;
541 err = -ENAMETOOLONG;
543 if ((x->props.flags & XFRM_STATE_ESN)) {
544 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
545 "%s%sauthencesn(%s,%s)%s",
546 x->geniv ?: "", x->geniv ? "(" : "",
547 x->aalg ? x->aalg->alg_name : "digest_null",
548 x->ealg->alg_name,
549 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
550 goto error;
551 } else {
552 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
553 "%s%sauthenc(%s,%s)%s",
554 x->geniv ?: "", x->geniv ? "(" : "",
555 x->aalg ? x->aalg->alg_name : "digest_null",
556 x->ealg->alg_name,
557 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
558 goto error;
561 aead = crypto_alloc_aead(authenc_name, 0, 0);
562 err = PTR_ERR(aead);
563 if (IS_ERR(aead))
564 goto error;
566 x->data = aead;
568 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
569 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
570 err = -ENOMEM;
571 key = kmalloc(keylen, GFP_KERNEL);
572 if (!key)
573 goto error;
575 p = key;
576 rta = (void *)p;
577 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
578 rta->rta_len = RTA_LENGTH(sizeof(*param));
579 param = RTA_DATA(rta);
580 p += RTA_SPACE(sizeof(*param));
582 if (x->aalg) {
583 struct xfrm_algo_desc *aalg_desc;
585 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
586 p += (x->aalg->alg_key_len + 7) / 8;
588 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
589 BUG_ON(!aalg_desc);
591 err = -EINVAL;
592 if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
593 crypto_aead_authsize(aead)) {
594 pr_info("ESP: %s digestsize %u != %hu\n",
595 x->aalg->alg_name,
596 crypto_aead_authsize(aead),
597 aalg_desc->uinfo.auth.icv_fullbits / 8);
598 goto free_key;
601 err = crypto_aead_setauthsize(
602 aead, x->aalg->alg_trunc_len / 8);
603 if (err)
604 goto free_key;
607 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
608 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
610 err = crypto_aead_setkey(aead, key, keylen);
612 free_key:
613 kfree(key);
615 error:
616 return err;
619 static int esp6_init_state(struct xfrm_state *x)
621 struct crypto_aead *aead;
622 u32 align;
623 int err;
625 if (x->encap)
626 return -EINVAL;
628 x->data = NULL;
630 if (x->aead)
631 err = esp_init_aead(x);
632 else
633 err = esp_init_authenc(x);
635 if (err)
636 goto error;
638 aead = x->data;
640 x->props.header_len = sizeof(struct ip_esp_hdr) +
641 crypto_aead_ivsize(aead);
642 switch (x->props.mode) {
643 case XFRM_MODE_BEET:
644 if (x->sel.family != AF_INET6)
645 x->props.header_len += IPV4_BEET_PHMAXLEN +
646 (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
647 break;
648 case XFRM_MODE_TRANSPORT:
649 break;
650 case XFRM_MODE_TUNNEL:
651 x->props.header_len += sizeof(struct ipv6hdr);
652 break;
653 default:
654 goto error;
657 align = ALIGN(crypto_aead_blocksize(aead), 4);
658 x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
660 error:
661 return err;
664 static int esp6_rcv_cb(struct sk_buff *skb, int err)
666 return 0;
669 static const struct xfrm_type esp6_type = {
670 .description = "ESP6",
671 .owner = THIS_MODULE,
672 .proto = IPPROTO_ESP,
673 .flags = XFRM_TYPE_REPLAY_PROT,
674 .init_state = esp6_init_state,
675 .destructor = esp6_destroy,
676 .get_mtu = esp6_get_mtu,
677 .input = esp6_input,
678 .output = esp6_output,
679 .hdr_offset = xfrm6_find_1stfragopt,
682 static struct xfrm6_protocol esp6_protocol = {
683 .handler = xfrm6_rcv,
684 .cb_handler = esp6_rcv_cb,
685 .err_handler = esp6_err,
686 .priority = 0,
689 static int __init esp6_init(void)
691 if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
692 pr_info("%s: can't add xfrm type\n", __func__);
693 return -EAGAIN;
695 if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
696 pr_info("%s: can't add protocol\n", __func__);
697 xfrm_unregister_type(&esp6_type, AF_INET6);
698 return -EAGAIN;
701 return 0;
704 static void __exit esp6_fini(void)
706 if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
707 pr_info("%s: can't remove protocol\n", __func__);
708 if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0)
709 pr_info("%s: can't remove xfrm type\n", __func__);
712 module_init(esp6_init);
713 module_exit(esp6_fini);
715 MODULE_LICENSE("GPL");
716 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);