set null auth on decr too.
[cryptodev-linux.git] / authenc.c
blobfaf064150f56b5bbbd962632a8b32f544658ac5d
1 /*
2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2011, 2012 OpenSSL Software Foundation, Inc.
6 * Author: Nikos Mavrogiannopoulos
8 * This file is part of linux cryptodev.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc.,
23 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
27 * This file handles the AEAD part of /dev/crypto.
31 #include <crypto/hash.h>
32 #include <linux/crypto.h>
33 #include <linux/mm.h>
34 #include <linux/highmem.h>
35 #include <linux/ioctl.h>
36 #include <linux/random.h>
37 #include <linux/syscalls.h>
38 #include <linux/pagemap.h>
39 #include <linux/poll.h>
40 #include <linux/uaccess.h>
41 #include <crypto/cryptodev.h>
42 #include <crypto/scatterwalk.h>
43 #include <linux/scatterlist.h>
44 #include "cryptodev_int.h"
45 #include "zc.h"
46 #include "util.h"
47 #include "cryptlib.h"
48 #include "version.h"
51 /* make caop->dst available in scatterlist.
52 * (caop->src is assumed to be equal to caop->dst)
54 static int get_userbuf_tls(struct csession *ses, struct kernel_crypt_auth_op *kcaop,
55 struct scatterlist **dst_sg)
57 int pagecount = 0;
58 struct crypt_auth_op *caop = &kcaop->caop;
59 int rc;
61 if (caop->dst == NULL)
62 return -EINVAL;
64 if (ses->alignmask) {
65 if (!IS_ALIGNED((unsigned long)caop->dst, ses->alignmask))
66 dprintk(2, KERN_WARNING, "careful - source address %lx is not %d byte aligned\n",
67 (unsigned long)caop->dst, ses->alignmask + 1);
70 if (kcaop->dst_len == 0) {
71 dprintk(1, KERN_WARNING, "Destination length cannot be zero\n");
72 return -EINVAL;
75 pagecount = PAGECOUNT(caop->dst, kcaop->dst_len);
77 ses->used_pages = pagecount;
78 ses->readonly_pages = 0;
80 rc = adjust_sg_array(ses, pagecount);
81 if (rc)
82 return rc;
84 rc = __get_userbuf(caop->dst, kcaop->dst_len, 1, pagecount,
85 ses->pages, ses->sg, kcaop->task, kcaop->mm);
86 if (unlikely(rc)) {
87 dprintk(1, KERN_ERR,
88 "failed to get user pages for data input\n");
89 return -EINVAL;
92 (*dst_sg) = ses->sg;
94 return 0;
98 #define MAX_SRTP_AUTH_DATA_DIFF 256
100 /* Makes caop->auth_src available as scatterlist.
101 * It also provides a pointer to caop->dst, which however,
102 * is assumed to be within the caop->auth_src buffer. If not
103 * (if their difference exceeds MAX_SRTP_AUTH_DATA_DIFF) it
104 * returns error.
106 static int get_userbuf_srtp(struct csession *ses, struct kernel_crypt_auth_op *kcaop,
107 struct scatterlist **auth_sg, struct scatterlist **dst_sg)
109 int pagecount, diff;
110 int auth_pagecount = 0;
111 struct crypt_auth_op *caop = &kcaop->caop;
112 int rc;
114 if (caop->dst == NULL && caop->auth_src == NULL) {
115 dprintk(1, KERN_ERR, "dst and auth_src cannot be both null\n");
116 return -EINVAL;
119 if (ses->alignmask) {
120 if (!IS_ALIGNED((unsigned long)caop->dst, ses->alignmask))
121 dprintk(2, KERN_WARNING, "careful - source address %lx is not %d byte aligned\n",
122 (unsigned long)caop->dst, ses->alignmask + 1);
123 if (!IS_ALIGNED((unsigned long)caop->auth_src, ses->alignmask))
124 dprintk(2, KERN_WARNING, "careful - source address %lx is not %d byte aligned\n",
125 (unsigned long)caop->auth_src, ses->alignmask + 1);
128 if (unlikely(kcaop->dst_len == 0 || caop->auth_len == 0)) {
129 dprintk(1, KERN_WARNING, "Destination length cannot be zero\n");
130 return -EINVAL;
133 /* Note that in SRTP auth data overlap with data to be encrypted (dst)
136 auth_pagecount = PAGECOUNT(caop->auth_src, caop->auth_len);
137 diff = (int)(caop->src - caop->auth_src);
138 if (diff > MAX_SRTP_AUTH_DATA_DIFF || diff < 0) {
139 dprintk(1, KERN_WARNING, "auth_src must overlap with src (diff: %d).\n", diff);
140 return -EINVAL;
143 pagecount = auth_pagecount;
145 rc = adjust_sg_array(ses, pagecount*2); /* double pages to have pages for dst(=auth_src) */
146 if (rc) {
147 dprintk(1, KERN_ERR, "cannot adjust sg array\n");
148 return rc;
151 rc = __get_userbuf(caop->auth_src, caop->auth_len, 1, auth_pagecount,
152 ses->pages, ses->sg, kcaop->task, kcaop->mm);
153 if (unlikely(rc)) {
154 dprintk(1, KERN_ERR,
155 "failed to get user pages for data input\n");
156 return -EINVAL;
159 ses->used_pages = pagecount;
160 ses->readonly_pages = 0;
162 (*auth_sg) = ses->sg;
164 (*dst_sg) = ses->sg + auth_pagecount;
165 sg_init_table(*dst_sg, auth_pagecount);
166 sg_copy(ses->sg, (*dst_sg), caop->auth_len);
167 (*dst_sg) = sg_advance(*dst_sg, diff);
168 if (*dst_sg == NULL) {
169 release_user_pages(ses);
170 dprintk(1, KERN_ERR,
171 "failed to get enough pages for auth data\n");
172 return -EINVAL;
175 return 0;
178 static int fill_kcaop_from_caop(struct kernel_crypt_auth_op *kcaop, struct fcrypt *fcr)
180 struct crypt_auth_op *caop = &kcaop->caop;
181 struct csession *ses_ptr;
182 int ret;
184 /* this also enters ses_ptr->sem */
185 ses_ptr = crypto_get_session_by_sid(fcr, caop->ses);
186 if (unlikely(!ses_ptr)) {
187 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", caop->ses);
188 return -EINVAL;
191 if (caop->flags & COP_FLAG_AEAD_TLS_TYPE || caop->flags & COP_FLAG_AEAD_SRTP_TYPE) {
192 if (caop->src != caop->dst) {
193 dprintk(1, KERN_ERR,
194 "Non-inplace encryption and decryption is not efficient and not implemented\n");
195 ret = -EINVAL;
196 goto out_unlock;
200 if (caop->tag_len == 0)
201 caop->tag_len = ses_ptr->hdata.digestsize;
203 kcaop->ivlen = caop->iv ? ses_ptr->cdata.ivsize : 0;
205 if (caop->flags & COP_FLAG_AEAD_TLS_TYPE)
206 kcaop->dst_len = caop->len + ses_ptr->cdata.blocksize /* pad */ + caop->tag_len;
207 else
208 kcaop->dst_len = caop->len;
210 kcaop->task = current;
211 kcaop->mm = current->mm;
213 if (caop->iv) {
214 ret = copy_from_user(kcaop->iv, caop->iv, kcaop->ivlen);
215 if (unlikely(ret)) {
216 dprintk(1, KERN_ERR,
217 "error copying IV (%d bytes), copy_from_user returned %d for address %lx\n",
218 kcaop->ivlen, ret, (unsigned long)caop->iv);
219 ret = -EFAULT;
220 goto out_unlock;
224 ret = 0;
226 out_unlock:
227 crypto_put_session(ses_ptr);
228 return ret;
232 static int fill_caop_from_kcaop(struct kernel_crypt_auth_op *kcaop, struct fcrypt *fcr)
234 int ret;
236 kcaop->caop.len = kcaop->dst_len;
238 if (kcaop->ivlen && kcaop->caop.flags & COP_FLAG_WRITE_IV) {
239 ret = copy_to_user(kcaop->caop.iv,
240 kcaop->iv, kcaop->ivlen);
241 if (unlikely(ret)) {
242 dprintk(1, KERN_ERR, "Error in copying to userspace\n");
243 return -EFAULT;
246 return 0;
250 int kcaop_from_user(struct kernel_crypt_auth_op *kcaop,
251 struct fcrypt *fcr, void __user *arg)
253 if (unlikely(copy_from_user(&kcaop->caop, arg, sizeof(kcaop->caop)))) {
254 dprintk(1, KERN_ERR, "Error in copying from userspace\n");
255 return -EFAULT;
258 return fill_kcaop_from_caop(kcaop, fcr);
261 int kcaop_to_user(struct kernel_crypt_auth_op *kcaop,
262 struct fcrypt *fcr, void __user *arg)
264 int ret;
266 ret = fill_caop_from_kcaop(kcaop, fcr);
267 if (unlikely(ret)) {
268 dprintk(1, KERN_ERR, "fill_caop_from_kcaop\n");
269 return ret;
272 if (unlikely(copy_to_user(arg, &kcaop->caop, sizeof(kcaop->caop)))) {
273 dprintk(1, KERN_ERR, "Error in copying to userspace\n");
274 return -EFAULT;
276 return 0;
279 static void copy_tls_hash( struct scatterlist *dst_sg, int len, void* hash, int hash_len)
281 scatterwalk_map_and_copy(hash, dst_sg, len, hash_len, 1);
284 static void read_tls_hash( struct scatterlist *dst_sg, int len, void* hash, int hash_len)
286 scatterwalk_map_and_copy(hash, dst_sg, len-hash_len, hash_len, 0);
289 static int pad_record( struct scatterlist *dst_sg, int len, int block_size)
291 uint8_t pad[block_size];
292 int pad_size = block_size - (len % block_size);
294 memset(pad, pad_size-1, pad_size);
296 scatterwalk_map_and_copy(pad, dst_sg, len, pad_size, 1);
298 return pad_size;
301 static int verify_tls_record_pad( struct scatterlist *dst_sg, int len, int block_size)
303 uint8_t pad[256]; /* the maximum allowed */
304 uint8_t pad_size;
305 int i;
307 scatterwalk_map_and_copy(&pad_size, dst_sg, len-1, 1, 0);
309 if (pad_size+1 > len) {
310 dprintk(1, KERN_ERR, "Pad size: %d\n", pad_size);
311 return -EBADMSG;
314 scatterwalk_map_and_copy(pad, dst_sg, len-pad_size-1, pad_size+1, 0);
316 for (i=0;i<pad_size;i++)
317 if (pad[i] != pad_size) {
318 dprintk(1, KERN_ERR, "Pad size: %d, pad: %d\n", pad_size, (int)pad[i]);
319 return -EBADMSG;
322 return pad_size+1;
325 /* Authenticate and encrypt the TLS way (also perform padding).
326 * During decryption it verifies the pad and tag and returns -EBADMSG on error.
328 static int
329 tls_auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
330 struct scatterlist *auth_sg, uint32_t auth_len,
331 struct scatterlist *dst_sg, uint32_t len)
333 int ret, fail = 0;
334 struct crypt_auth_op *caop = &kcaop->caop;
335 uint8_t vhash[AALG_MAX_RESULT_LEN];
336 uint8_t hash_output[AALG_MAX_RESULT_LEN];
338 /* TLS authenticates the plaintext except for the padding.
340 if (caop->op == COP_ENCRYPT) {
341 if (ses_ptr->hdata.init != 0) {
342 if (auth_len > 0) {
343 ret = cryptodev_hash_update(&ses_ptr->hdata,
344 auth_sg, auth_len);
345 if (unlikely(ret)) {
346 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
347 return ret;
351 if (len > 0) {
352 ret = cryptodev_hash_update(&ses_ptr->hdata,
353 dst_sg, len);
354 if (unlikely(ret)) {
355 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
356 return ret;
360 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
361 if (unlikely(ret)) {
362 dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
363 return ret;
366 copy_tls_hash( dst_sg, len, hash_output, caop->tag_len);
367 len += caop->tag_len;
370 if (ses_ptr->cdata.init != 0) {
371 if (ses_ptr->cdata.blocksize > 1) {
372 ret = pad_record(dst_sg, len, ses_ptr->cdata.blocksize);
373 len += ret;
376 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
377 dst_sg, dst_sg, len);
378 if (unlikely(ret)) {
379 dprintk(0, KERN_ERR, "cryptodev_cipher_encrypt: %d\n", ret);
380 return ret;
383 } else {
384 if (ses_ptr->cdata.init != 0) {
385 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
386 dst_sg, dst_sg, len);
388 if (unlikely(ret)) {
389 dprintk(0, KERN_ERR, "cryptodev_cipher_decrypt: %d\n", ret);
390 return ret;
393 if (ses_ptr->cdata.blocksize > 1) {
394 ret = verify_tls_record_pad(dst_sg, len, ses_ptr->cdata.blocksize);
395 if (unlikely(ret < 0)) {
396 dprintk(2, KERN_ERR, "verify_record_pad: %d\n", ret);
397 fail = 1;
398 } else {
399 len -= ret;
404 if (ses_ptr->hdata.init != 0) {
405 if (unlikely(caop->tag_len > sizeof(vhash) || caop->tag_len > len)) {
406 dprintk(1, KERN_ERR, "Illegal tag len size\n");
407 return -EINVAL;
410 read_tls_hash( dst_sg, len, vhash, caop->tag_len);
411 len -= caop->tag_len;
413 if (auth_len > 0) {
414 ret = cryptodev_hash_update(&ses_ptr->hdata,
415 auth_sg, auth_len);
416 if (unlikely(ret)) {
417 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
418 return ret;
422 if (len > 0) {
423 ret = cryptodev_hash_update(&ses_ptr->hdata,
424 dst_sg, len);
425 if (unlikely(ret)) {
426 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
427 return ret;
431 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
432 if (unlikely(ret)) {
433 dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
434 return ret;
437 if (memcmp(vhash, hash_output, caop->tag_len) != 0 || fail != 0) {
438 dprintk(2, KERN_ERR, "MAC verification failed (tag_len: %d)\n", caop->tag_len);
439 return -EBADMSG;
443 kcaop->dst_len = len;
444 return 0;
447 /* Authenticate and encrypt the SRTP way. During decryption
448 * it verifies the tag and returns -EBADMSG on error.
450 static int
451 srtp_auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
452 struct scatterlist *auth_sg, uint32_t auth_len,
453 struct scatterlist *dst_sg, uint32_t len)
455 int ret, fail = 0;
456 struct crypt_auth_op *caop = &kcaop->caop;
457 uint8_t vhash[AALG_MAX_RESULT_LEN];
458 uint8_t hash_output[AALG_MAX_RESULT_LEN];
460 /* SRTP authenticates the encrypted data.
462 if (caop->op == COP_ENCRYPT) {
463 if (ses_ptr->cdata.init != 0) {
464 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
465 dst_sg, dst_sg, len);
466 if (unlikely(ret)) {
467 dprintk(0, KERN_ERR, "cryptodev_cipher_encrypt: %d\n", ret);
468 return ret;
472 if (ses_ptr->hdata.init != 0) {
473 if (auth_len > 0) {
474 ret = cryptodev_hash_update(&ses_ptr->hdata,
475 auth_sg, auth_len);
476 if (unlikely(ret)) {
477 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
478 return ret;
482 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
483 if (unlikely(ret)) {
484 dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
485 return ret;
488 if (unlikely(copy_to_user(caop->tag, hash_output, caop->tag_len))) {
489 return -EFAULT;
493 } else {
494 if (ses_ptr->hdata.init != 0) {
495 if (unlikely(caop->tag_len > sizeof(vhash) || caop->tag_len > len)) {
496 dprintk(1, KERN_ERR, "Illegal tag len size\n");
497 return -EINVAL;
500 if (unlikely(copy_from_user(vhash, caop->tag, caop->tag_len))) {
501 return -EFAULT;
504 ret = cryptodev_hash_update(&ses_ptr->hdata,
505 auth_sg, auth_len);
506 if (unlikely(ret)) {
507 dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
508 return ret;
511 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
512 if (unlikely(ret)) {
513 dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
514 return ret;
517 if (memcmp(vhash, hash_output, caop->tag_len) != 0 || fail != 0) {
518 dprintk(2, KERN_ERR, "MAC verification failed\n");
519 return -EBADMSG;
523 if (ses_ptr->cdata.init != 0) {
524 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
525 dst_sg, dst_sg, len);
527 if (unlikely(ret)) {
528 dprintk(0, KERN_ERR, "cryptodev_cipher_decrypt: %d\n", ret);
529 return ret;
534 kcaop->dst_len = len;
535 return 0;
538 /* Typical AEAD (i.e. GCM) encryption/decryption.
539 * During decryption the tag is verified.
541 static int
542 auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
543 struct scatterlist *auth_sg, uint32_t auth_len,
544 struct scatterlist *src_sg,
545 struct scatterlist *dst_sg, uint32_t len)
547 int ret;
548 struct crypt_auth_op *caop = &kcaop->caop;
549 int max_tag_len;
551 max_tag_len = cryptodev_cipher_get_tag_size(&ses_ptr->cdata);
552 if (unlikely(caop->tag_len > max_tag_len)) {
553 dprintk(0, KERN_ERR, "Illegal tag length: %d\n", caop->tag_len);
554 return -EINVAL;
557 if (caop->tag_len)
558 cryptodev_cipher_set_tag_size(&ses_ptr->cdata, caop->tag_len);
559 else
560 caop->tag_len = max_tag_len;
562 cryptodev_cipher_auth(&ses_ptr->cdata, auth_sg, auth_len);
564 if (caop->op == COP_ENCRYPT) {
565 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
566 src_sg, dst_sg, len);
567 if (unlikely(ret)) {
568 dprintk(0, KERN_ERR, "cryptodev_cipher_encrypt: %d\n", ret);
569 return ret;
571 kcaop->dst_len = len + caop->tag_len;
572 caop->tag = caop->dst + len;
573 } else {
574 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
575 src_sg, dst_sg, len);
577 if (unlikely(ret)) {
578 dprintk(0, KERN_ERR, "cryptodev_cipher_decrypt: %d\n", ret);
579 return ret;
581 kcaop->dst_len = len - caop->tag_len;
582 caop->tag = caop->dst + len - caop->tag_len;
585 return 0;
588 /* This is the main crypto function - zero-copy edition */
589 static int
590 __crypto_auth_run_zc(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop)
592 struct scatterlist *dst_sg, *auth_sg, *src_sg;
593 struct crypt_auth_op *caop = &kcaop->caop;
594 int ret = 0;
596 if (caop->flags & COP_FLAG_AEAD_SRTP_TYPE) {
597 if (unlikely(ses_ptr->cdata.init != 0 &&
598 (ses_ptr->cdata.stream == 0 || ses_ptr->cdata.aead != 0)))
600 dprintk(0, KERN_ERR, "Only stream modes are allowed in SRTP mode (but not AEAD)\n");
601 return -EINVAL;
604 ret = get_userbuf_srtp(ses_ptr, kcaop, &auth_sg, &dst_sg);
605 if (unlikely(ret)) {
606 dprintk(1, KERN_ERR, "get_userbuf_srtp(): Error getting user pages.\n");
607 return ret;
610 ret = srtp_auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
611 dst_sg, caop->len);
613 release_user_pages(ses_ptr);
614 } else { /* TLS and normal cases. Here auth data are usually small
615 * so we just copy them to a free page, instead of trying
616 * to map them.
618 unsigned char* auth_buf = NULL;
619 struct scatterlist tmp;
621 if (unlikely(caop->auth_len > PAGE_SIZE)) {
622 dprintk(1, KERN_ERR, "auth data len is excessive.\n");
623 return -EINVAL;
626 auth_buf = (char *)__get_free_page(GFP_KERNEL);
627 if (unlikely(!auth_buf)) {
628 dprintk(1, KERN_ERR, "unable to get a free page.\n");
629 return -ENOMEM;
632 if (caop->auth_src && caop->auth_len > 0) {
633 if (unlikely(copy_from_user(auth_buf, caop->auth_src, caop->auth_len))) {
634 dprintk(1, KERN_ERR, "unable to copy auth data from userspace.\n");
635 ret = -EFAULT;
636 goto free_auth_buf;
639 sg_init_one(&tmp, auth_buf, caop->auth_len);
640 auth_sg = &tmp;
641 } else {
642 auth_sg = NULL;
645 if (caop->flags & COP_FLAG_AEAD_TLS_TYPE && ses_ptr->cdata.aead == 0) {
646 ret = get_userbuf_tls(ses_ptr, kcaop, &dst_sg);
647 if (unlikely(ret)) {
648 dprintk(1, KERN_ERR, "get_userbuf_tls(): Error getting user pages.\n");
649 goto free_auth_buf;
652 ret = tls_auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
653 dst_sg, caop->len);
654 } else {
655 int dst_len;
657 if (unlikely(ses_ptr->cdata.init == 0 ||
658 ses_ptr->cdata.stream == 0 ||
659 ses_ptr->cdata.aead == 0))
661 dprintk(0, KERN_ERR, "Only stream and AEAD ciphers are allowed for authenc\n");
662 ret = -EINVAL;
663 goto free_auth_buf;
666 if (caop->op == COP_ENCRYPT) dst_len = caop->len + cryptodev_cipher_get_tag_size(&ses_ptr->cdata);
667 else dst_len = caop->len - cryptodev_cipher_get_tag_size(&ses_ptr->cdata);
669 ret = get_userbuf(ses_ptr, caop->src, caop->len, caop->dst, dst_len,
670 kcaop->task, kcaop->mm, &src_sg, &dst_sg);
671 if (unlikely(ret)) {
672 dprintk(1, KERN_ERR, "get_userbuf(): Error getting user pages.\n");
673 goto free_auth_buf;
676 ret = auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
677 src_sg, dst_sg, caop->len);
680 release_user_pages(ses_ptr);
682 free_auth_buf:
683 free_page((unsigned long)auth_buf);
686 return ret;
690 int crypto_auth_run(struct fcrypt *fcr, struct kernel_crypt_auth_op *kcaop)
692 struct csession *ses_ptr;
693 struct crypt_auth_op *caop = &kcaop->caop;
694 int ret;
696 if (unlikely(caop->op != COP_ENCRYPT && caop->op != COP_DECRYPT)) {
697 dprintk(1, KERN_DEBUG, "invalid operation op=%u\n", caop->op);
698 return -EINVAL;
701 /* this also enters ses_ptr->sem */
702 ses_ptr = crypto_get_session_by_sid(fcr, caop->ses);
703 if (unlikely(!ses_ptr)) {
704 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", caop->ses);
705 return -EINVAL;
708 if (unlikely(ses_ptr->cdata.init == 0)) {
709 dprintk(1, KERN_ERR, "cipher context not initialized\n");
710 ret = -EINVAL;
711 goto out_unlock;
714 /* If we have a hash/mac handle reset its state */
715 if (ses_ptr->hdata.init != 0) {
716 ret = cryptodev_hash_reset(&ses_ptr->hdata);
717 if (unlikely(ret)) {
718 dprintk(1, KERN_ERR,
719 "error in cryptodev_hash_reset()\n");
720 goto out_unlock;
724 cryptodev_cipher_set_iv(&ses_ptr->cdata, kcaop->iv,
725 min(ses_ptr->cdata.ivsize, kcaop->ivlen));
727 ret = __crypto_auth_run_zc(ses_ptr, kcaop);
728 if (unlikely(ret)) {
729 dprintk(1, KERN_ERR,
730 "error in __crypto_auth_run_zc()\n");
731 goto out_unlock;
734 ret = 0;
736 cryptodev_cipher_get_iv(&ses_ptr->cdata, kcaop->iv,
737 min(ses_ptr->cdata.ivsize, kcaop->ivlen));
739 out_unlock:
740 crypto_put_session(ses_ptr);
741 return ret;