bumped version.
[cryptodev-linux.git] / authenc.c
blob9b2ce0971dd4fe8be71be1792e57cecbe4986ad4
1 /*
2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2011, 2012 OpenSSL Software Foundation, Inc.
6 * Author: Nikos Mavrogiannopoulos
8 * This file is part of linux cryptodev.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc.,
23 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
27 * This file handles the AEAD part of /dev/crypto.
31 #include <crypto/hash.h>
32 #include <linux/crypto.h>
33 #include <linux/mm.h>
34 #include <linux/highmem.h>
35 #include <linux/ioctl.h>
36 #include <linux/random.h>
37 #include <linux/syscalls.h>
38 #include <linux/pagemap.h>
39 #include <linux/poll.h>
40 #include <linux/uaccess.h>
41 #include <crypto/cryptodev.h>
42 #include <crypto/scatterwalk.h>
43 #include <linux/scatterlist.h>
44 #include "cryptodev_int.h"
45 #include "zc.h"
46 #include "util.h"
47 #include "cryptlib.h"
48 #include "version.h"
51 /* make caop->dst available in scatterlist.
52 * (caop->src is assumed to be equal to caop->dst)
54 static int get_userbuf_tls(struct csession *ses, struct kernel_crypt_auth_op *kcaop,
55 struct scatterlist **dst_sg,
56 int *tot_pages)
58 int pagecount = 0;
59 struct crypt_auth_op *caop = &kcaop->caop;
60 int rc;
62 if (caop->dst == NULL)
63 return -EINVAL;
65 if (ses->alignmask) {
66 if (!IS_ALIGNED((unsigned long)caop->dst, ses->alignmask))
67 dprintk(2, KERN_WARNING, "%s: careful - source address %lx is not %d byte aligned\n",
68 __func__, (unsigned long)caop->dst, ses->alignmask + 1);
71 if (kcaop->dst_len == 0) {
72 dprintk(1, KERN_WARNING, "Destination length cannot be zero\n");
73 return -EINVAL;
76 pagecount = PAGECOUNT(caop->dst, kcaop->dst_len);
78 (*tot_pages) = pagecount;
80 rc = adjust_sg_array(ses, pagecount);
81 if (rc)
82 return rc;
84 rc = __get_userbuf(caop->dst, kcaop->dst_len, 1, pagecount,
85 ses->pages, ses->sg, kcaop->task, kcaop->mm);
86 if (unlikely(rc)) {
87 dprintk(1, KERN_ERR,
88 "failed to get user pages for data input\n");
89 return -EINVAL;
92 (*dst_sg) = ses->sg;
94 return 0;
98 #define MAX_SRTP_AUTH_DATA_DIFF 256
100 /* Makes caop->auth_src available as scatterlist.
101 * It also provides a pointer to caop->dst, which however,
102 * is assumed to be within the caop->auth_src buffer. If not
103 * (if their difference exceeds MAX_SRTP_AUTH_DATA_DIFF) it
104 * returns error.
106 static int get_userbuf_srtp(struct csession *ses, struct kernel_crypt_auth_op *kcaop,
107 struct scatterlist **auth_sg, struct scatterlist **dst_sg,
108 int *tot_pages)
110 int pagecount, diff;
111 int auth_pagecount = 0;
112 struct crypt_auth_op *caop = &kcaop->caop;
113 int rc;
115 if (caop->dst == NULL && caop->auth_src == NULL) {
116 dprintk(1, KERN_ERR, "dst and auth_src cannot be both null\n");
117 return -EINVAL;
120 if (ses->alignmask) {
121 if (!IS_ALIGNED((unsigned long)caop->dst, ses->alignmask))
122 dprintk(2, KERN_WARNING, "%s: careful - source address %lx is not %d byte aligned\n",
123 __func__, (unsigned long)caop->dst, ses->alignmask + 1);
124 if (!IS_ALIGNED((unsigned long)caop->auth_src, ses->alignmask))
125 dprintk(2, KERN_WARNING, "%s: careful - source address %lx is not %d byte aligned\n",
126 __func__, (unsigned long)caop->auth_src, ses->alignmask + 1);
129 if (unlikely(kcaop->dst_len == 0 || caop->auth_len == 0)) {
130 dprintk(1, KERN_WARNING, "Destination length cannot be zero\n");
131 return -EINVAL;
134 /* Note that in SRTP auth data overlap with data to be encrypted (dst)
137 auth_pagecount = PAGECOUNT(caop->auth_src, caop->auth_len);
138 diff = (int)(caop->src - caop->auth_src);
139 if (diff > MAX_SRTP_AUTH_DATA_DIFF || diff < 0) {
140 dprintk(1, KERN_WARNING, "auth_src must overlap with src (diff: %d).\n", diff);
141 return -EINVAL;
144 (*tot_pages) = pagecount = auth_pagecount;
146 rc = adjust_sg_array(ses, pagecount*2); /* double pages to have pages for dst(=auth_src) */
147 if (rc) {
148 dprintk(1, KERN_ERR, "cannot adjust sg array\n");
149 return rc;
152 rc = __get_userbuf(caop->auth_src, caop->auth_len, 1, auth_pagecount,
153 ses->pages, ses->sg, kcaop->task, kcaop->mm);
154 if (unlikely(rc)) {
155 dprintk(1, KERN_ERR,
156 "failed to get user pages for data input\n");
157 return -EINVAL;
159 (*auth_sg) = ses->sg;
161 (*dst_sg) = ses->sg + auth_pagecount;
162 sg_init_table(*dst_sg, auth_pagecount);
163 sg_copy(ses->sg, (*dst_sg), caop->auth_len);
164 (*dst_sg) = sg_advance(*dst_sg, diff);
165 if (*dst_sg == NULL) {
166 release_user_pages(ses->pages, pagecount);
167 dprintk(1, KERN_ERR,
168 "failed to get enough pages for auth data\n");
169 return -EINVAL;
172 return 0;
175 static int fill_kcaop_from_caop(struct kernel_crypt_auth_op *kcaop, struct fcrypt *fcr)
177 struct crypt_auth_op *caop = &kcaop->caop;
178 struct csession *ses_ptr;
179 int ret;
181 /* this also enters ses_ptr->sem */
182 ses_ptr = crypto_get_session_by_sid(fcr, caop->ses);
183 if (unlikely(!ses_ptr)) {
184 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", caop->ses);
185 return -EINVAL;
188 if (caop->flags & COP_FLAG_AEAD_TLS_TYPE || caop->flags & COP_FLAG_AEAD_SRTP_TYPE) {
189 if (caop->src != caop->dst) {
190 dprintk(1, KERN_ERR,
191 "Non-inplace encryption and decryption is not efficient and not implemented\n");
192 ret = -EINVAL;
193 goto out_unlock;
197 if (caop->tag_len == 0)
198 caop->tag_len = ses_ptr->hdata.digestsize;
200 kcaop->ivlen = caop->iv ? ses_ptr->cdata.ivsize : 0;
202 if (caop->flags & COP_FLAG_AEAD_TLS_TYPE)
203 kcaop->dst_len = caop->len + ses_ptr->cdata.blocksize /* pad */ + caop->tag_len;
204 else
205 kcaop->dst_len = caop->len;
207 kcaop->task = current;
208 kcaop->mm = current->mm;
210 if (caop->iv) {
211 ret = copy_from_user(kcaop->iv, caop->iv, kcaop->ivlen);
212 if (unlikely(ret)) {
213 dprintk(1, KERN_ERR,
214 "error copying IV (%d bytes), copy_from_user returned %d for address %lx\n",
215 kcaop->ivlen, ret, (unsigned long)caop->iv);
216 ret = -EFAULT;
217 goto out_unlock;
221 ret = 0;
223 out_unlock:
224 crypto_put_session(ses_ptr);
225 return ret;
229 static int fill_caop_from_kcaop(struct kernel_crypt_auth_op *kcaop, struct fcrypt *fcr)
231 int ret;
233 kcaop->caop.len = kcaop->dst_len;
235 if (kcaop->ivlen && kcaop->caop.flags & COP_FLAG_WRITE_IV) {
236 ret = copy_to_user(kcaop->caop.iv,
237 kcaop->iv, kcaop->ivlen);
238 if (unlikely(ret)) {
239 dprintk(1, KERN_ERR, "Error in copying to userspace\n");
240 return -EFAULT;
243 return 0;
247 int kcaop_from_user(struct kernel_crypt_auth_op *kcaop,
248 struct fcrypt *fcr, void __user *arg)
250 if (unlikely(copy_from_user(&kcaop->caop, arg, sizeof(kcaop->caop)))) {
251 dprintk(1, KERN_ERR, "Error in copying from userspace\n");
252 return -EFAULT;
255 return fill_kcaop_from_caop(kcaop, fcr);
258 int kcaop_to_user(struct kernel_crypt_auth_op *kcaop,
259 struct fcrypt *fcr, void __user *arg)
261 int ret;
263 ret = fill_caop_from_kcaop(kcaop, fcr);
264 if (unlikely(ret)) {
265 dprintk(1, KERN_ERR, "fill_caop_from_kcaop\n");
266 return ret;
269 if (unlikely(copy_to_user(arg, &kcaop->caop, sizeof(kcaop->caop)))) {
270 dprintk(1, KERN_ERR, "Error in copying to userspace\n");
271 return -EFAULT;
273 return 0;
276 static void copy_tls_hash( struct scatterlist *dst_sg, int len, void* hash, int hash_len)
278 scatterwalk_map_and_copy(hash, dst_sg, len, hash_len, 1);
281 static void read_tls_hash( struct scatterlist *dst_sg, int len, void* hash, int hash_len)
283 scatterwalk_map_and_copy(hash, dst_sg, len-hash_len, hash_len, 0);
286 static int pad_record( struct scatterlist *dst_sg, int len, int block_size)
288 uint8_t pad[block_size];
289 int pad_size = block_size - (len % block_size);
291 memset(pad, pad_size-1, pad_size);
293 scatterwalk_map_and_copy(pad, dst_sg, len, pad_size, 1);
295 return pad_size;
298 static int verify_tls_record_pad( struct scatterlist *dst_sg, int len, int block_size)
300 uint8_t pad[256]; /* the maximum allowed */
301 uint8_t pad_size;
302 int i;
304 scatterwalk_map_and_copy(&pad_size, dst_sg, len-1, 1, 0);
306 if (pad_size+1 > len) {
307 dprintk(1, KERN_ERR, "Pad size: %d\n", pad_size);
308 return -ECANCELED;
311 scatterwalk_map_and_copy(pad, dst_sg, len-pad_size-1, pad_size+1, 0);
313 for (i=0;i<pad_size;i++)
314 if (pad[i] != pad_size) {
315 dprintk(1, KERN_ERR, "Pad size: %d, pad: %d\n", pad_size, (int)pad[i]);
316 return -ECANCELED;
319 return pad_size+1;
322 /* Authenticate and encrypt the TLS way (also perform padding).
323 * During decryption it verifies the pad and tag and returns -ECANCELED on error.
325 static int
326 tls_auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
327 struct scatterlist *auth_sg, uint32_t auth_len,
328 struct scatterlist *dst_sg, uint32_t len)
330 int ret, fail = 0;
331 struct crypt_auth_op *caop = &kcaop->caop;
332 uint8_t vhash[AALG_MAX_RESULT_LEN];
333 uint8_t hash_output[AALG_MAX_RESULT_LEN];
335 /* TLS authenticates the plaintext except for the padding.
337 if (caop->op == COP_ENCRYPT) {
338 if (ses_ptr->hdata.init != 0) {
339 if (auth_len > 0) {
340 ret = cryptodev_hash_update(&ses_ptr->hdata,
341 auth_sg, auth_len);
342 if (unlikely(ret)) {
343 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
344 return ret;
348 if (len > 0) {
349 ret = cryptodev_hash_update(&ses_ptr->hdata,
350 dst_sg, len);
351 if (unlikely(ret)) {
352 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
353 return ret;
357 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
358 if (unlikely(ret)) {
359 dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
360 return ret;
363 copy_tls_hash( dst_sg, len, hash_output, caop->tag_len);
364 len += caop->tag_len;
367 if (ses_ptr->cdata.init != 0) {
368 if (ses_ptr->cdata.blocksize > 1) {
369 ret = pad_record(dst_sg, len, ses_ptr->cdata.blocksize);
370 len += ret;
373 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
374 dst_sg, dst_sg, len);
375 if (unlikely(ret)) {
376 dprintk(0, KERN_ERR, "cryptodev_cipher_encrypt: %d\n", ret);
377 return ret;
380 } else {
381 if (ses_ptr->cdata.init != 0) {
382 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
383 dst_sg, dst_sg, len);
385 if (unlikely(ret)) {
386 dprintk(0, KERN_ERR, "cryptodev_cipher_decrypt: %d\n", ret);
387 return ret;
390 if (ses_ptr->cdata.blocksize > 1) {
391 ret = verify_tls_record_pad(dst_sg, len, ses_ptr->cdata.blocksize);
392 if (unlikely(ret < 0)) {
393 dprintk(2, KERN_ERR, "verify_record_pad: %d\n", ret);
394 fail = 1;
395 } else {
396 len -= ret;
401 if (ses_ptr->hdata.init != 0) {
402 if (unlikely(caop->tag_len > sizeof(vhash) || caop->tag_len > len)) {
403 dprintk(1, KERN_ERR, "Illegal tag len size\n");
404 return -EINVAL;
407 read_tls_hash( dst_sg, len, vhash, caop->tag_len);
408 len -= caop->tag_len;
410 if (auth_len > 0) {
411 ret = cryptodev_hash_update(&ses_ptr->hdata,
412 auth_sg, auth_len);
413 if (unlikely(ret)) {
414 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
415 return ret;
419 if (len > 0) {
420 ret = cryptodev_hash_update(&ses_ptr->hdata,
421 dst_sg, len);
422 if (unlikely(ret)) {
423 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
424 return ret;
428 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
429 if (unlikely(ret)) {
430 dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
431 return ret;
434 if (memcmp(vhash, hash_output, caop->tag_len) != 0 || fail != 0) {
435 dprintk(2, KERN_ERR, "MAC verification failed (tag_len: %d)\n", caop->tag_len);
436 return -ECANCELED;
440 kcaop->dst_len = len;
441 return 0;
444 /* Authenticate and encrypt the SRTP way. During decryption
445 * it verifies the tag and returns -ECANCELED on error.
447 static int
448 srtp_auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
449 struct scatterlist *auth_sg, uint32_t auth_len,
450 struct scatterlist *dst_sg, uint32_t len)
452 int ret, fail = 0;
453 struct crypt_auth_op *caop = &kcaop->caop;
454 uint8_t vhash[AALG_MAX_RESULT_LEN];
455 uint8_t hash_output[AALG_MAX_RESULT_LEN];
457 /* SRTP authenticates the encrypted data.
459 if (caop->op == COP_ENCRYPT) {
460 if (ses_ptr->cdata.init != 0) {
461 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
462 dst_sg, dst_sg, len);
463 if (unlikely(ret)) {
464 dprintk(0, KERN_ERR, "cryptodev_cipher_encrypt: %d\n", ret);
465 return ret;
469 if (ses_ptr->hdata.init != 0) {
470 if (auth_len > 0) {
471 ret = cryptodev_hash_update(&ses_ptr->hdata,
472 auth_sg, auth_len);
473 if (unlikely(ret)) {
474 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
475 return ret;
479 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
480 if (unlikely(ret)) {
481 dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
482 return ret;
485 if (unlikely(copy_to_user(caop->tag, hash_output, caop->tag_len))) {
486 return -EFAULT;
490 } else {
491 if (ses_ptr->hdata.init != 0) {
492 if (unlikely(caop->tag_len > sizeof(vhash) || caop->tag_len > len)) {
493 dprintk(1, KERN_ERR, "Illegal tag len size\n");
494 return -EINVAL;
497 if (unlikely(copy_from_user(vhash, caop->tag, caop->tag_len))) {
498 return -EFAULT;
501 ret = cryptodev_hash_update(&ses_ptr->hdata,
502 auth_sg, auth_len);
503 if (unlikely(ret)) {
504 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
505 return ret;
508 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
509 if (unlikely(ret)) {
510 dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
511 return ret;
514 if (memcmp(vhash, hash_output, caop->tag_len) != 0 || fail != 0) {
515 dprintk(2, KERN_ERR, "MAC verification failed\n");
516 return -ECANCELED;
520 if (ses_ptr->cdata.init != 0) {
521 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
522 dst_sg, dst_sg, len);
524 if (unlikely(ret)) {
525 dprintk(0, KERN_ERR, "cryptodev_cipher_decrypt: %d\n", ret);
526 return ret;
531 kcaop->dst_len = len;
532 return 0;
535 /* Typical AEAD (i.e. GCM) encryption/decryption.
536 * During decryption the tag is verified.
538 static int
539 auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
540 struct scatterlist *auth_sg, uint32_t auth_len,
541 struct scatterlist *src_sg,
542 struct scatterlist *dst_sg, uint32_t len)
544 int ret;
545 struct crypt_auth_op *caop = &kcaop->caop;
546 int max_tag_len;
548 max_tag_len = cryptodev_cipher_get_tag_size(&ses_ptr->cdata);
549 if (unlikely(caop->tag_len > max_tag_len)) {
550 dprintk(0, KERN_ERR, "Illegal tag length: %d\n", caop->tag_len);
551 return -EINVAL;
554 if (caop->tag_len)
555 cryptodev_cipher_set_tag_size(&ses_ptr->cdata, caop->tag_len);
556 else
557 caop->tag_len = max_tag_len;
559 if (caop->op == COP_ENCRYPT) {
560 if (auth_len > 0)
561 cryptodev_cipher_auth(&ses_ptr->cdata, auth_sg, auth_len);
562 else /* for some reason we _have_ to call that */
563 cryptodev_cipher_auth(&ses_ptr->cdata, NULL, 0);
565 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
566 src_sg, dst_sg, len);
567 if (unlikely(ret)) {
568 dprintk(0, KERN_ERR, "cryptodev_cipher_encrypt: %d\n", ret);
569 return ret;
571 kcaop->dst_len = len + caop->tag_len;
572 caop->tag = caop->dst + len;
573 } else {
574 if (auth_len > 0)
575 cryptodev_cipher_auth(&ses_ptr->cdata, auth_sg, auth_len);
577 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
578 src_sg, dst_sg, len);
580 if (unlikely(ret)) {
581 dprintk(0, KERN_ERR, "cryptodev_cipher_decrypt: %d\n", ret);
582 return ret;
584 kcaop->dst_len = len - caop->tag_len;
585 caop->tag = caop->dst + len - caop->tag_len;
588 return 0;
591 /* This is the main crypto function - zero-copy edition */
592 static int
593 __crypto_auth_run_zc(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop)
595 struct scatterlist *dst_sg, *auth_sg, *src_sg;
596 struct crypt_auth_op *caop = &kcaop->caop;
597 int ret = 0, pagecount = 0;
599 if (caop->flags & COP_FLAG_AEAD_SRTP_TYPE) {
600 if (unlikely(ses_ptr->cdata.init != 0 &&
601 (ses_ptr->cdata.stream == 0 || ses_ptr->cdata.aead != 0)))
603 dprintk(0, KERN_ERR, "Only stream modes are allowed in SRTP mode (but not AEAD)\n");
604 return -EINVAL;
607 ret = get_userbuf_srtp(ses_ptr, kcaop, &auth_sg, &dst_sg, &pagecount);
608 if (unlikely(ret)) {
609 dprintk(1, KERN_ERR, "get_userbuf_srtp(): Error getting user pages.\n");
610 return ret;
613 ret = srtp_auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
614 dst_sg, caop->len);
615 } else { /* TLS and normal cases. Here auth data are usually small
616 * so we just copy them to a free page, instead of trying
617 * to map them.
619 unsigned char* auth_buf = NULL;
620 struct scatterlist tmp;
622 if (unlikely(caop->auth_len > PAGE_SIZE))
623 return -EINVAL;
625 auth_buf = (char *)__get_free_page(GFP_KERNEL);
626 if (unlikely(!auth_buf))
627 return -ENOMEM;
629 if (caop->auth_len > 0) {
630 if (unlikely(copy_from_user(auth_buf, caop->auth_src, caop->auth_len))) {
631 ret = -EFAULT;
632 goto fail;
635 sg_init_one(&tmp, auth_buf, caop->auth_len);
636 auth_sg = &tmp;
637 } else {
638 auth_sg = NULL;
641 if (caop->flags & COP_FLAG_AEAD_TLS_TYPE && ses_ptr->cdata.aead == 0) {
642 ret = get_userbuf_tls(ses_ptr, kcaop, &dst_sg, &pagecount);
643 if (unlikely(ret)) {
644 dprintk(1, KERN_ERR, "get_userbuf_tls(): Error getting user pages.\n");
645 goto fail;
648 ret = tls_auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
649 dst_sg, caop->len);
650 } else {
651 int dst_len;
653 if (unlikely(ses_ptr->cdata.init == 0 ||
654 ses_ptr->cdata.stream == 0 ||
655 ses_ptr->cdata.aead == 0))
657 dprintk(0, KERN_ERR, "Only stream and AEAD ciphers are allowed for authenc\n");
658 return -EINVAL;
661 if (caop->op == COP_ENCRYPT) dst_len = caop->len + cryptodev_cipher_get_tag_size(&ses_ptr->cdata);
662 else dst_len = caop->len - cryptodev_cipher_get_tag_size(&ses_ptr->cdata);
664 ret = get_userbuf(ses_ptr, caop->src, caop->len, caop->dst, dst_len,
665 kcaop->task, kcaop->mm, &src_sg, &dst_sg, &pagecount);
666 if (unlikely(ret)) {
667 dprintk(1, KERN_ERR, "get_userbuf(): Error getting user pages.\n");
668 goto fail;
671 ret = auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
672 src_sg, dst_sg, caop->len);
675 fail:
676 free_page((unsigned long)auth_buf);
679 release_user_pages(ses_ptr->pages, pagecount);
680 return ret;
684 int crypto_auth_run(struct fcrypt *fcr, struct kernel_crypt_auth_op *kcaop)
686 struct csession *ses_ptr;
687 struct crypt_auth_op *caop = &kcaop->caop;
688 int ret;
690 if (unlikely(caop->op != COP_ENCRYPT && caop->op != COP_DECRYPT)) {
691 dprintk(1, KERN_DEBUG, "invalid operation op=%u\n", caop->op);
692 return -EINVAL;
695 /* this also enters ses_ptr->sem */
696 ses_ptr = crypto_get_session_by_sid(fcr, caop->ses);
697 if (unlikely(!ses_ptr)) {
698 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", caop->ses);
699 return -EINVAL;
702 if (unlikely(ses_ptr->cdata.init == 0)) {
703 dprintk(1, KERN_ERR, "cipher context not initialized\n");
704 ret = -EINVAL;
705 goto out_unlock;
708 /* If we have a hash/mac handle reset its state */
709 if (ses_ptr->hdata.init != 0) {
710 ret = cryptodev_hash_reset(&ses_ptr->hdata);
711 if (unlikely(ret)) {
712 dprintk(1, KERN_ERR,
713 "error in cryptodev_hash_reset()\n");
714 goto out_unlock;
718 cryptodev_cipher_set_iv(&ses_ptr->cdata, kcaop->iv,
719 min(ses_ptr->cdata.ivsize, kcaop->ivlen));
721 if (likely(caop->len || caop->auth_len)) {
722 ret = __crypto_auth_run_zc(ses_ptr, kcaop);
723 if (unlikely(ret))
724 goto out_unlock;
725 } else {
726 ret = -EINVAL;
727 goto out_unlock;
730 ret = 0;
732 cryptodev_cipher_get_iv(&ses_ptr->cdata, kcaop->iv,
733 min(ses_ptr->cdata.ivsize, kcaop->ivlen));
735 out_unlock:
736 crypto_put_session(ses_ptr);
737 return ret;