examples/*cipher: print buffers on mismatch
[cryptodev-linux.git] / cryptodev_main.c
blob2a6e4bcd0a59298e4452b189ea0c29eef26559c5
1 /*
2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2004 Michal Ludvig <mludvig@logix.net.nz>, SuSE Labs
5 * Copyright (c) 2009,2010 Nikos Mavrogiannopoulos <nmav@gnutls.org>
7 * This file is part of linux cryptodev.
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc.,
22 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
26 * Device /dev/crypto provides an interface for
27 * accessing kernel CryptoAPI algorithms (ciphers,
28 * hashes) from userspace programs.
30 * /dev/crypto interface was originally introduced in
31 * OpenBSD and this module attempts to keep the API.
35 #include <linux/crypto.h>
36 #include <linux/mm.h>
37 #include <linux/highmem.h>
38 #include <linux/ioctl.h>
39 #include <linux/random.h>
40 #include <linux/syscalls.h>
41 #include <linux/pagemap.h>
42 #include <linux/poll.h>
43 #include <linux/uaccess.h>
44 #include "cryptodev.h"
45 #include <linux/scatterlist.h>
46 #include "cryptodev_int.h"
47 #include "version.h"
49 MODULE_AUTHOR("Nikos Mavrogiannopoulos <nmav@gnutls.org>");
50 MODULE_DESCRIPTION("CryptoDev driver");
51 MODULE_LICENSE("GPL");
53 /* ====== Compile-time config ====== */
55 #define CRYPTODEV_STATS
57 /* Default (pre-allocated) and maximum size of the job queue.
58 * These are free, pending and done items all together. */
59 #define DEF_COP_RINGSIZE 16
60 #define MAX_COP_RINGSIZE 64
62 /* ====== Module parameters ====== */
64 int cryptodev_verbosity;
65 module_param(cryptodev_verbosity, int, 0644);
66 MODULE_PARM_DESC(cryptodev_verbosity, "0: normal, 1: verbose, 2: debug");
68 #ifdef CRYPTODEV_STATS
69 static int enable_stats;
70 module_param(enable_stats, int, 0644);
71 MODULE_PARM_DESC(enable_stats, "collect statictics about cryptodev usage");
72 #endif
74 /* ====== CryptoAPI ====== */
75 struct fcrypt {
76 struct list_head list;
77 struct mutex sem;
80 struct todo_list_item {
81 struct list_head __hook;
82 struct kernel_crypt_op kcop;
83 int result;
86 struct locked_list {
87 struct list_head list;
88 struct mutex lock;
91 struct crypt_priv {
92 struct fcrypt fcrypt;
93 struct locked_list free, todo, done;
94 int itemcount;
95 struct work_struct cryptask;
96 wait_queue_head_t user_waiter;
99 #define FILL_SG(sg, ptr, len) \
100 do { \
101 (sg)->page = virt_to_page(ptr); \
102 (sg)->offset = offset_in_page(ptr); \
103 (sg)->length = len; \
104 (sg)->dma_address = 0; \
105 } while (0)
107 struct csession {
108 struct list_head entry;
109 struct mutex sem;
110 struct cipher_data cdata;
111 struct hash_data hdata;
112 uint32_t sid;
113 uint32_t alignmask;
114 #ifdef CRYPTODEV_STATS
115 #if !((COP_ENCRYPT < 2) && (COP_DECRYPT < 2))
116 #error Struct csession.stat uses COP_{ENCRYPT,DECRYPT} as indices. Do something!
117 #endif
118 unsigned long long stat[2];
119 size_t stat_max_size, stat_count;
120 #endif
121 int array_size;
122 struct page **pages;
123 struct scatterlist *sg;
126 /* cryptodev's own workqueue, keeps crypto tasks from disturbing the force */
127 static struct workqueue_struct *cryptodev_wq;
129 /* Prepare session for future use. */
130 static int
131 crypto_create_session(struct fcrypt *fcr, struct session_op *sop)
133 struct csession *ses_new = NULL, *ses_ptr;
134 int ret = 0;
135 const char *alg_name = NULL;
136 const char *hash_name = NULL;
137 int hmac_mode = 1;
139 /* Does the request make sense? */
140 if (unlikely(!sop->cipher && !sop->mac)) {
141 dprintk(1, KERN_DEBUG, "Both 'cipher' and 'mac' unset.\n");
142 return -EINVAL;
145 switch (sop->cipher) {
146 case 0:
147 break;
148 case CRYPTO_DES_CBC:
149 alg_name = "cbc(des)";
150 break;
151 case CRYPTO_3DES_CBC:
152 alg_name = "cbc(des3_ede)";
153 break;
154 case CRYPTO_BLF_CBC:
155 alg_name = "cbc(blowfish)";
156 break;
157 case CRYPTO_AES_CBC:
158 alg_name = "cbc(aes)";
159 break;
160 case CRYPTO_AES_ECB:
161 alg_name = "ecb(aes)";
162 break;
163 case CRYPTO_CAMELLIA_CBC:
164 alg_name = "cbc(camelia)";
165 break;
166 case CRYPTO_AES_CTR:
167 alg_name = "ctr(aes)";
168 break;
169 case CRYPTO_NULL:
170 alg_name = "ecb(cipher_null)";
171 break;
172 default:
173 dprintk(1, KERN_DEBUG, "%s: bad cipher: %d\n", __func__,
174 sop->cipher);
175 return -EINVAL;
178 switch (sop->mac) {
179 case 0:
180 break;
181 case CRYPTO_MD5_HMAC:
182 hash_name = "hmac(md5)";
183 break;
184 case CRYPTO_RIPEMD160_HMAC:
185 hash_name = "hmac(rmd160)";
186 break;
187 case CRYPTO_SHA1_HMAC:
188 hash_name = "hmac(sha1)";
189 break;
190 case CRYPTO_SHA2_256_HMAC:
191 hash_name = "hmac(sha256)";
192 break;
193 case CRYPTO_SHA2_384_HMAC:
194 hash_name = "hmac(sha384)";
195 break;
196 case CRYPTO_SHA2_512_HMAC:
197 hash_name = "hmac(sha512)";
198 break;
200 /* non-hmac cases */
201 case CRYPTO_MD5:
202 hash_name = "md5";
203 hmac_mode = 0;
204 break;
205 case CRYPTO_RIPEMD160:
206 hash_name = "rmd160";
207 hmac_mode = 0;
208 break;
209 case CRYPTO_SHA1:
210 hash_name = "sha1";
211 hmac_mode = 0;
212 break;
213 case CRYPTO_SHA2_256:
214 hash_name = "sha256";
215 hmac_mode = 0;
216 break;
217 case CRYPTO_SHA2_384:
218 hash_name = "sha384";
219 hmac_mode = 0;
220 break;
221 case CRYPTO_SHA2_512:
222 hash_name = "sha512";
223 hmac_mode = 0;
224 break;
226 default:
227 dprintk(1, KERN_DEBUG, "%s: bad mac: %d\n", __func__,
228 sop->mac);
229 return -EINVAL;
232 /* Create a session and put it to the list. */
233 ses_new = kzalloc(sizeof(*ses_new), GFP_KERNEL);
234 if (!ses_new)
235 return -ENOMEM;
237 /* Set-up crypto transform. */
238 if (alg_name) {
239 uint8_t keyp[CRYPTO_CIPHER_MAX_KEY_LEN];
241 if (unlikely(sop->keylen > CRYPTO_CIPHER_MAX_KEY_LEN)) {
242 dprintk(1, KERN_DEBUG,
243 "Setting key failed for %s-%zu.\n",
244 alg_name, (size_t)sop->keylen*8);
245 ret = -EINVAL;
246 goto error_cipher;
249 if (unlikely(copy_from_user(keyp, sop->key, sop->keylen))) {
250 ret = -EFAULT;
251 goto error_cipher;
254 ret = cryptodev_cipher_init(&ses_new->cdata, alg_name, keyp,
255 sop->keylen);
256 if (ret < 0) {
257 dprintk(1, KERN_DEBUG,
258 "%s: Failed to load cipher for %s\n",
259 __func__, alg_name);
260 ret = -EINVAL;
261 goto error_cipher;
265 if (hash_name) {
266 uint8_t keyp[CRYPTO_HMAC_MAX_KEY_LEN];
268 if (unlikely(sop->mackeylen > CRYPTO_HMAC_MAX_KEY_LEN)) {
269 dprintk(1, KERN_DEBUG,
270 "Setting key failed for %s-%zu.\n",
271 alg_name, (size_t)sop->mackeylen*8);
272 ret = -EINVAL;
273 goto error_hash;
276 if (sop->mackey && unlikely(copy_from_user(keyp, sop->mackey,
277 sop->mackeylen))) {
278 ret = -EFAULT;
279 goto error_hash;
282 ret = cryptodev_hash_init(&ses_new->hdata, hash_name, hmac_mode,
283 keyp, sop->mackeylen);
284 if (ret != 0) {
285 dprintk(1, KERN_DEBUG,
286 "%s: Failed to load hash for %s\n",
287 __func__, hash_name);
288 ret = -EINVAL;
289 goto error_hash;
293 sop->alignmask = ses_new->alignmask = max(ses_new->cdata.alignmask,
294 ses_new->hdata.alignmask);
295 dprintk(2, KERN_DEBUG, "%s: got alignmask %d\n", __func__, ses_new->alignmask);
297 ses_new->array_size = DEFAULT_PREALLOC_PAGES;
298 dprintk(2, KERN_DEBUG, "%s: preallocating for %d user pages\n",
299 __func__, ses_new->array_size);
300 ses_new->pages = kzalloc(ses_new->array_size *
301 sizeof(struct page *), GFP_KERNEL);
302 ses_new->sg = kzalloc(ses_new->array_size *
303 sizeof(struct scatterlist), GFP_KERNEL);
304 if (ses_new->sg == NULL || ses_new->pages == NULL) {
305 dprintk(0, KERN_DEBUG, "Memory error\n");
306 ret = -ENOMEM;
307 goto error_hash;
310 /* put the new session to the list */
311 get_random_bytes(&ses_new->sid, sizeof(ses_new->sid));
312 mutex_init(&ses_new->sem);
314 mutex_lock(&fcr->sem);
315 restart:
316 list_for_each_entry(ses_ptr, &fcr->list, entry) {
317 /* Check for duplicate SID */
318 if (unlikely(ses_new->sid == ses_ptr->sid)) {
319 get_random_bytes(&ses_new->sid, sizeof(ses_new->sid));
320 /* Unless we have a broken RNG this
321 shouldn't loop forever... ;-) */
322 goto restart;
326 list_add(&ses_new->entry, &fcr->list);
327 mutex_unlock(&fcr->sem);
329 /* Fill in some values for the user. */
330 sop->ses = ses_new->sid;
332 return 0;
334 error_hash:
335 cryptodev_cipher_deinit(&ses_new->cdata);
336 kfree(ses_new->sg);
337 kfree(ses_new->pages);
338 error_cipher:
339 kfree(ses_new);
341 return ret;
345 /* Everything that needs to be done when remowing a session. */
346 static inline void
347 crypto_destroy_session(struct csession *ses_ptr)
349 if (!mutex_trylock(&ses_ptr->sem)) {
350 dprintk(2, KERN_DEBUG, "Waiting for semaphore of sid=0x%08X\n",
351 ses_ptr->sid);
352 mutex_lock(&ses_ptr->sem);
354 dprintk(2, KERN_DEBUG, "Removed session 0x%08X\n", ses_ptr->sid);
355 #if defined(CRYPTODEV_STATS)
356 if (enable_stats)
357 dprintk(2, KERN_DEBUG,
358 "Usage in Bytes: enc=%llu, dec=%llu, "
359 "max=%zu, avg=%lu, cnt=%zu\n",
360 ses_ptr->stat[COP_ENCRYPT], ses_ptr->stat[COP_DECRYPT],
361 ses_ptr->stat_max_size, ses_ptr->stat_count > 0
362 ? ((unsigned long)(ses_ptr->stat[COP_ENCRYPT]+
363 ses_ptr->stat[COP_DECRYPT]) /
364 ses_ptr->stat_count) : 0,
365 ses_ptr->stat_count);
366 #endif
367 cryptodev_cipher_deinit(&ses_ptr->cdata);
368 cryptodev_hash_deinit(&ses_ptr->hdata);
369 dprintk(2, KERN_DEBUG, "%s: freeing space for %d user pages\n",
370 __func__, ses_ptr->array_size);
371 kfree(ses_ptr->pages);
372 kfree(ses_ptr->sg);
373 mutex_unlock(&ses_ptr->sem);
374 kfree(ses_ptr);
377 /* Look up a session by ID and remove. */
378 static int
379 crypto_finish_session(struct fcrypt *fcr, uint32_t sid)
381 struct csession *tmp, *ses_ptr;
382 struct list_head *head;
383 int ret = 0;
385 mutex_lock(&fcr->sem);
386 head = &fcr->list;
387 list_for_each_entry_safe(ses_ptr, tmp, head, entry) {
388 if (ses_ptr->sid == sid) {
389 list_del(&ses_ptr->entry);
390 crypto_destroy_session(ses_ptr);
391 break;
395 if (unlikely(!ses_ptr)) {
396 dprintk(1, KERN_ERR, "Session with sid=0x%08X not found!\n",
397 sid);
398 ret = -ENOENT;
400 mutex_unlock(&fcr->sem);
402 return ret;
405 /* Remove all sessions when closing the file */
406 static int
407 crypto_finish_all_sessions(struct fcrypt *fcr)
409 struct csession *tmp, *ses_ptr;
410 struct list_head *head;
412 mutex_lock(&fcr->sem);
414 head = &fcr->list;
415 list_for_each_entry_safe(ses_ptr, tmp, head, entry) {
416 list_del(&ses_ptr->entry);
417 crypto_destroy_session(ses_ptr);
419 mutex_unlock(&fcr->sem);
421 return 0;
424 /* Look up session by session ID. The returned session is locked. */
425 static struct csession *
426 crypto_get_session_by_sid(struct fcrypt *fcr, uint32_t sid)
428 struct csession *ses_ptr;
430 mutex_lock(&fcr->sem);
431 list_for_each_entry(ses_ptr, &fcr->list, entry) {
432 if (ses_ptr->sid == sid) {
433 mutex_lock(&ses_ptr->sem);
434 break;
437 mutex_unlock(&fcr->sem);
439 return ses_ptr;
442 static int
443 hash_n_crypt(struct csession *ses_ptr, struct crypt_op *cop,
444 struct scatterlist *src_sg, struct scatterlist *dst_sg,
445 uint32_t len)
447 int ret;
449 /* Always hash before encryption and after decryption. Maybe
450 * we should introduce a flag to switch... TBD later on.
452 if (cop->op == COP_ENCRYPT) {
453 if (ses_ptr->hdata.init != 0) {
454 ret = cryptodev_hash_update(&ses_ptr->hdata,
455 src_sg, len);
456 if (unlikely(ret))
457 goto out_err;
459 if (ses_ptr->cdata.init != 0) {
460 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
461 src_sg, dst_sg, len);
463 if (unlikely(ret))
464 goto out_err;
466 } else {
467 if (ses_ptr->cdata.init != 0) {
468 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
469 src_sg, dst_sg, len);
471 if (unlikely(ret))
472 goto out_err;
475 if (ses_ptr->hdata.init != 0) {
476 ret = cryptodev_hash_update(&ses_ptr->hdata,
477 dst_sg, len);
478 if (unlikely(ret))
479 goto out_err;
482 return 0;
483 out_err:
484 dprintk(0, KERN_ERR, "CryptoAPI failure: %d\n", ret);
485 return ret;
489 /* This is the main crypto function - feed it with plaintext
490 and get a ciphertext (or vice versa :-) */
491 static int
492 __crypto_run_std(struct csession *ses_ptr, struct crypt_op *cop)
494 char *data;
495 char __user *src, *dst;
496 struct scatterlist sg;
497 size_t nbytes, bufsize;
498 int ret = 0;
500 nbytes = cop->len;
501 data = (char *)__get_free_page(GFP_KERNEL);
503 if (unlikely(!data))
504 return -ENOMEM;
506 bufsize = PAGE_SIZE < nbytes ? PAGE_SIZE : nbytes;
508 src = cop->src;
509 dst = cop->dst;
511 while (nbytes > 0) {
512 size_t current_len = nbytes > bufsize ? bufsize : nbytes;
514 if (unlikely(copy_from_user(data, src, current_len))) {
515 ret = -EFAULT;
516 break;
519 sg_init_one(&sg, data, current_len);
521 ret = hash_n_crypt(ses_ptr, cop, &sg, &sg, current_len);
523 if (unlikely(ret))
524 break;
526 if (ses_ptr->cdata.init != 0) {
527 if (unlikely(copy_to_user(dst, data, current_len))) {
528 ret = -EFAULT;
529 break;
533 dst += current_len;
534 nbytes -= current_len;
535 src += current_len;
538 free_page((unsigned long)data);
539 return ret;
542 void release_user_pages(struct page **pg, int pagecount)
544 while (pagecount--) {
545 if (!PageReserved(pg[pagecount]))
546 SetPageDirty(pg[pagecount]);
547 page_cache_release(pg[pagecount]);
551 /* offset of buf in it's first page */
552 #define PAGEOFFSET(buf) ((unsigned long)buf & ~PAGE_MASK)
554 /* fetch the pages addr resides in into pg and initialise sg with them */
555 int __get_userbuf(uint8_t __user *addr, uint32_t len, int write,
556 int pgcount, struct page **pg, struct scatterlist *sg,
557 struct task_struct *task, struct mm_struct *mm)
559 int ret, pglen, i = 0;
560 struct scatterlist *sgp;
562 down_write(&mm->mmap_sem);
563 ret = get_user_pages(task, mm,
564 (unsigned long)addr, pgcount, write, 0, pg, NULL);
565 up_write(&mm->mmap_sem);
566 if (ret != pgcount)
567 return -EINVAL;
569 sg_init_table(sg, pgcount);
571 pglen = min((ptrdiff_t)(PAGE_SIZE - PAGEOFFSET(addr)), (ptrdiff_t)len);
572 sg_set_page(sg, pg[i++], pglen, PAGEOFFSET(addr));
574 len -= pglen;
575 for (sgp = sg_next(sg); len; sgp = sg_next(sgp)) {
576 pglen = min((uint32_t)PAGE_SIZE, len);
577 sg_set_page(sgp, pg[i++], pglen, 0);
578 len -= pglen;
580 sg_mark_end(sg_last(sg, pgcount));
581 return 0;
584 /* make cop->src and cop->dst available in scatterlists */
585 static int get_userbuf(struct csession *ses, struct kernel_crypt_op *kcop,
586 struct scatterlist **src_sg, struct scatterlist **dst_sg,
587 int *tot_pages)
589 int src_pagecount, dst_pagecount = 0, pagecount, write_src = 1;
590 struct crypt_op *cop = &kcop->cop;
591 int rc;
593 if (cop->src == NULL)
594 return -EINVAL;
596 if (!IS_ALIGNED((unsigned long)cop->src, ses->alignmask)) {
597 dprintk(2, KERN_WARNING, "%s: careful - source address %lx is not %d byte aligned\n",
598 __func__, (unsigned long)cop->src, ses->alignmask + 1);
601 src_pagecount = PAGECOUNT(cop->src, cop->len);
602 if (!ses->cdata.init) { /* hashing only */
603 write_src = 0;
604 } else if (cop->src != cop->dst) { /* non-in-situ transformation */
605 if (cop->dst == NULL)
606 return -EINVAL;
608 dst_pagecount = PAGECOUNT(cop->dst, cop->len);
609 write_src = 0;
611 if (!IS_ALIGNED((unsigned long)cop->dst, ses->alignmask)) {
612 dprintk(2, KERN_WARNING, "%s: careful - destination address %lx is not %d byte aligned\n",
613 __func__, (unsigned long)cop->dst, ses->alignmask + 1);
617 (*tot_pages) = pagecount = src_pagecount + dst_pagecount;
619 if (pagecount > ses->array_size) {
620 struct scatterlist *sg;
621 struct page **pages;
622 int array_size;
624 for (array_size = ses->array_size; array_size < pagecount;
625 array_size *= 2)
628 dprintk(2, KERN_DEBUG, "%s: reallocating to %d elements\n",
629 __func__, array_size);
630 pages = krealloc(ses->pages, array_size * sizeof(struct page *),
631 GFP_KERNEL);
632 if (unlikely(!pages))
633 return -ENOMEM;
634 ses->pages = pages;
635 sg = krealloc(ses->sg, array_size * sizeof(struct scatterlist),
636 GFP_KERNEL);
637 if (unlikely(!sg))
638 return -ENOMEM;
639 ses->sg = sg;
640 ses->array_size = array_size;
643 rc = __get_userbuf(cop->src, cop->len, write_src, src_pagecount,
644 ses->pages, ses->sg, kcop->task, kcop->mm);
645 if (unlikely(rc)) {
646 dprintk(1, KERN_ERR,
647 "failed to get user pages for data input\n");
648 return -EINVAL;
650 (*src_sg) = (*dst_sg) = ses->sg;
652 if (!dst_pagecount)
653 return 0;
655 (*dst_sg) = ses->sg + src_pagecount;
657 rc = __get_userbuf(cop->dst, cop->len, 1, dst_pagecount,
658 ses->pages + src_pagecount, *dst_sg,
659 kcop->task, kcop->mm);
660 if (unlikely(rc)) {
661 dprintk(1, KERN_ERR,
662 "failed to get user pages for data output\n");
663 release_user_pages(ses->pages, src_pagecount);
664 return -EINVAL;
666 return 0;
669 /* This is the main crypto function - zero-copy edition */
670 static int
671 __crypto_run_zc(struct csession *ses_ptr, struct kernel_crypt_op *kcop)
673 struct scatterlist *src_sg, *dst_sg;
674 struct crypt_op *cop = &kcop->cop;
675 int ret = 0, pagecount;
677 ret = get_userbuf(ses_ptr, kcop, &src_sg, &dst_sg, &pagecount);
678 if (unlikely(ret)) {
679 dprintk(1, KERN_ERR, "Error getting user pages. "
680 "Falling back to non zero copy.\n");
681 return __crypto_run_std(ses_ptr, cop);
684 ret = hash_n_crypt(ses_ptr, cop, src_sg, dst_sg, cop->len);
686 release_user_pages(ses_ptr->pages, pagecount);
687 return ret;
690 static int crypto_run(struct fcrypt *fcr, struct kernel_crypt_op *kcop)
692 struct csession *ses_ptr;
693 struct crypt_op *cop = &kcop->cop;
694 int ret;
696 if (unlikely(cop->op != COP_ENCRYPT && cop->op != COP_DECRYPT)) {
697 dprintk(1, KERN_DEBUG, "invalid operation op=%u\n", cop->op);
698 return -EINVAL;
701 /* this also enters ses_ptr->sem */
702 ses_ptr = crypto_get_session_by_sid(fcr, cop->ses);
703 if (unlikely(!ses_ptr)) {
704 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", cop->ses);
705 return -EINVAL;
708 if (ses_ptr->hdata.init != 0 && !(cop->flags & COP_FLAG_UPDATE) &&
709 !(cop->flags & COP_FLAG_FINAL)) {
710 ret = cryptodev_hash_reset(&ses_ptr->hdata);
711 if (unlikely(ret)) {
712 dprintk(1, KERN_ERR,
713 "error in cryptodev_hash_reset()\n");
714 goto out_unlock;
718 if (ses_ptr->cdata.init != 0) {
719 int blocksize = ses_ptr->cdata.blocksize;
721 if (unlikely(cop->len % blocksize)) {
722 dprintk(1, KERN_ERR,
723 "data size (%u) isn't a multiple "
724 "of block size (%u)\n",
725 cop->len, blocksize);
726 ret = -EINVAL;
727 goto out_unlock;
730 cryptodev_cipher_set_iv(&ses_ptr->cdata, kcop->iv,
731 min(ses_ptr->cdata.ivsize, kcop->ivlen));
734 if (cop->len != 0) {
735 ret = __crypto_run_zc(ses_ptr, kcop);
736 if (unlikely(ret))
737 goto out_unlock;
740 if (ses_ptr->hdata.init != 0 &&
741 ((cop->flags & COP_FLAG_FINAL) ||
742 (!(cop->flags & COP_FLAG_UPDATE) || cop->len == 0))) {
744 ret = cryptodev_hash_final(&ses_ptr->hdata, kcop->hash_output);
745 if (unlikely(ret)) {
746 dprintk(0, KERN_ERR, "CryptoAPI failure: %d\n", ret);
747 goto out_unlock;
749 kcop->digestsize = ses_ptr->hdata.digestsize;
752 #if defined(CRYPTODEV_STATS)
753 if (enable_stats) {
754 /* this is safe - we check cop->op at the function entry */
755 ses_ptr->stat[cop->op] += cop->len;
756 if (ses_ptr->stat_max_size < cop->len)
757 ses_ptr->stat_max_size = cop->len;
758 ses_ptr->stat_count++;
760 #endif
762 out_unlock:
763 mutex_unlock(&ses_ptr->sem);
764 return ret;
767 static void cryptask_routine(struct work_struct *work)
769 struct crypt_priv *pcr = container_of(work, struct crypt_priv, cryptask);
770 struct todo_list_item *item;
771 LIST_HEAD(tmp);
773 /* fetch all pending jobs into the temporary list */
774 mutex_lock(&pcr->todo.lock);
775 list_cut_position(&tmp, &pcr->todo.list, pcr->todo.list.prev);
776 mutex_unlock(&pcr->todo.lock);
778 /* handle each job locklessly */
779 list_for_each_entry(item, &tmp, __hook) {
780 item->result = crypto_run(&pcr->fcrypt, &item->kcop);
781 if (unlikely(item->result))
782 dprintk(0, KERN_ERR, "%s: crypto_run() failed: %d\n",
783 __func__, item->result);
786 /* push all handled jobs to the done list at once */
787 mutex_lock(&pcr->done.lock);
788 list_splice_tail(&tmp, &pcr->done.list);
789 mutex_unlock(&pcr->done.lock);
791 /* wake for POLLIN */
792 wake_up_interruptible(&pcr->user_waiter);
795 /* ====== /dev/crypto ====== */
797 static int
798 cryptodev_open(struct inode *inode, struct file *filp)
800 struct todo_list_item *tmp;
801 struct crypt_priv *pcr;
802 int i;
804 pcr = kmalloc(sizeof(*pcr), GFP_KERNEL);
805 if (!pcr)
806 return -ENOMEM;
808 memset(pcr, 0, sizeof(*pcr));
809 mutex_init(&pcr->fcrypt.sem);
810 INIT_LIST_HEAD(&pcr->fcrypt.list);
812 INIT_LIST_HEAD(&pcr->free.list);
813 INIT_LIST_HEAD(&pcr->todo.list);
814 INIT_LIST_HEAD(&pcr->done.list);
815 INIT_WORK(&pcr->cryptask, cryptask_routine);
816 mutex_init(&pcr->free.lock);
817 mutex_init(&pcr->todo.lock);
818 mutex_init(&pcr->done.lock);
819 init_waitqueue_head(&pcr->user_waiter);
821 for (i = 0; i < DEF_COP_RINGSIZE; i++) {
822 tmp = kzalloc(sizeof(struct todo_list_item), GFP_KERNEL);
823 pcr->itemcount++;
824 dprintk(2, KERN_DEBUG, "%s: allocated new item at %lx\n",
825 __func__, (unsigned long)tmp);
826 list_add(&tmp->__hook, &pcr->free.list);
829 filp->private_data = pcr;
830 dprintk(2, KERN_DEBUG,
831 "Cryptodev handle initialised, %d elements in queue\n",
832 DEF_COP_RINGSIZE);
833 return 0;
836 static int
837 cryptodev_release(struct inode *inode, struct file *filp)
839 struct crypt_priv *pcr = filp->private_data;
840 struct todo_list_item *item, *item_safe;
841 int items_freed = 0;
843 if (!pcr)
844 return 0;
846 cancel_work_sync(&pcr->cryptask);
848 mutex_destroy(&pcr->todo.lock);
849 mutex_destroy(&pcr->done.lock);
850 mutex_destroy(&pcr->free.lock);
852 list_splice_tail(&pcr->todo.list, &pcr->free.list);
853 list_splice_tail(&pcr->done.list, &pcr->free.list);
855 list_for_each_entry_safe(item, item_safe, &pcr->free.list, __hook) {
856 dprintk(2, KERN_DEBUG, "%s: freeing item at %lx\n",
857 __func__, (unsigned long)item);
858 list_del(&item->__hook);
859 kfree(item);
860 items_freed++;
863 if (items_freed != pcr->itemcount) {
864 dprintk(0, KERN_ERR,
865 "%s: freed %d items, but %d should exist!\n",
866 __func__, items_freed, pcr->itemcount);
869 crypto_finish_all_sessions(&pcr->fcrypt);
870 kfree(pcr);
871 filp->private_data = NULL;
873 dprintk(2, KERN_DEBUG,
874 "Cryptodev handle deinitialised, %d elements freed\n",
875 items_freed);
876 return 0;
879 static int
880 clonefd(struct file *filp)
882 int ret;
883 ret = get_unused_fd();
884 if (ret >= 0) {
885 get_file(filp);
886 fd_install(ret, filp);
889 return ret;
892 /* enqueue a job for asynchronous completion
894 * returns:
895 * -EBUSY when there are no free queue slots left
896 * (and the number of slots has reached it MAX_COP_RINGSIZE)
897 * -EFAULT when there was a memory allocation error
898 * 0 on success */
899 static int crypto_async_run(struct crypt_priv *pcr, struct kernel_crypt_op *kcop)
901 struct todo_list_item *item = NULL;
903 mutex_lock(&pcr->free.lock);
904 if (likely(!list_empty(&pcr->free.list))) {
905 item = list_first_entry(&pcr->free.list,
906 struct todo_list_item, __hook);
907 list_del(&item->__hook);
908 } else if (pcr->itemcount < MAX_COP_RINGSIZE) {
909 pcr->itemcount++;
910 } else {
911 mutex_unlock(&pcr->free.lock);
912 return -EBUSY;
914 mutex_unlock(&pcr->free.lock);
916 if (unlikely(!item)) {
917 item = kzalloc(sizeof(struct todo_list_item), GFP_KERNEL);
918 if (unlikely(!item))
919 return -EFAULT;
920 dprintk(1, KERN_INFO, "%s: increased item count to %d\n",
921 __func__, pcr->itemcount);
924 memcpy(&item->kcop, kcop, sizeof(struct kernel_crypt_op));
926 mutex_lock(&pcr->todo.lock);
927 list_add_tail(&item->__hook, &pcr->todo.list);
928 mutex_unlock(&pcr->todo.lock);
930 queue_work(cryptodev_wq, &pcr->cryptask);
931 return 0;
934 /* get the first completed job from the "done" queue
936 * returns:
937 * -EBUSY if no completed jobs are ready (yet)
938 * the return value of crypto_run() otherwise */
939 static int crypto_async_fetch(struct crypt_priv *pcr,
940 struct kernel_crypt_op *kcop)
942 struct todo_list_item *item;
943 int retval;
945 mutex_lock(&pcr->done.lock);
946 if (list_empty(&pcr->done.list)) {
947 mutex_unlock(&pcr->done.lock);
948 return -EBUSY;
950 item = list_first_entry(&pcr->done.list, struct todo_list_item, __hook);
951 list_del(&item->__hook);
952 mutex_unlock(&pcr->done.lock);
954 memcpy(kcop, &item->kcop, sizeof(struct kernel_crypt_op));
955 retval = item->result;
957 mutex_lock(&pcr->free.lock);
958 list_add_tail(&item->__hook, &pcr->free.list);
959 mutex_unlock(&pcr->free.lock);
961 /* wake for POLLOUT */
962 wake_up_interruptible(&pcr->user_waiter);
964 return retval;
967 /* this function has to be called from process context */
968 static int fill_kcop_from_cop(struct kernel_crypt_op *kcop, struct fcrypt *fcr)
970 struct crypt_op *cop = &kcop->cop;
971 struct csession *ses_ptr;
972 int rc;
974 /* this also enters ses_ptr->sem */
975 ses_ptr = crypto_get_session_by_sid(fcr, cop->ses);
976 if (unlikely(!ses_ptr)) {
977 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", cop->ses);
978 return -EINVAL;
980 kcop->ivlen = cop->iv ? ses_ptr->cdata.ivsize : 0;
981 kcop->digestsize = 0; /* will be updated during operation */
983 mutex_unlock(&ses_ptr->sem);
985 kcop->task = current;
986 kcop->mm = current->mm;
988 if (cop->iv) {
989 rc = copy_from_user(kcop->iv, cop->iv, kcop->ivlen);
990 if (unlikely(rc)) {
991 dprintk(1, KERN_ERR,
992 "error copying IV (%d bytes), copy_from_user returned %d for address %lx\n",
993 kcop->ivlen, rc, (unsigned long)cop->iv);
994 return -EFAULT;
998 return 0;
1001 static int kcop_from_user(struct kernel_crypt_op *kcop,
1002 struct fcrypt *fcr, void __user *arg)
1004 if (unlikely(copy_from_user(&kcop->cop, arg, sizeof(kcop->cop))))
1005 return -EFAULT;
1007 return fill_kcop_from_cop(kcop, fcr);
1010 static long
1011 cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
1013 void __user *arg = (void __user *)arg_;
1014 int __user *p = arg;
1015 struct session_op sop;
1016 struct kernel_crypt_op kcop;
1017 struct crypt_priv *pcr = filp->private_data;
1018 struct fcrypt *fcr;
1019 uint32_t ses;
1020 int ret, fd;
1022 if (unlikely(!pcr))
1023 BUG();
1025 fcr = &pcr->fcrypt;
1027 switch (cmd) {
1028 case CIOCASYMFEAT:
1029 return put_user(0, p);
1030 case CRIOGET:
1031 fd = clonefd(filp);
1032 ret = put_user(fd, p);
1033 if (unlikely(ret)) {
1034 sys_close(fd);
1035 return ret;
1037 return ret;
1038 case CIOCGSESSION:
1039 if (unlikely(copy_from_user(&sop, arg, sizeof(sop))))
1040 return -EFAULT;
1042 ret = crypto_create_session(fcr, &sop);
1043 if (unlikely(ret))
1044 return ret;
1045 ret = copy_to_user(arg, &sop, sizeof(sop));
1046 if (unlikely(ret)) {
1047 crypto_finish_session(fcr, sop.ses);
1048 return -EFAULT;
1050 return ret;
1051 case CIOCFSESSION:
1052 ret = get_user(ses, (uint32_t __user *)arg);
1053 if (unlikely(ret))
1054 return ret;
1055 ret = crypto_finish_session(fcr, ses);
1056 return ret;
1057 case CIOCCRYPT:
1058 if (unlikely(ret = kcop_from_user(&kcop, fcr, arg)))
1059 return ret;
1061 ret = crypto_run(fcr, &kcop);
1062 if (unlikely(ret))
1063 return ret;
1065 if (kcop.digestsize) {
1066 ret = copy_to_user(kcop.cop.mac,
1067 kcop.hash_output, kcop.digestsize);
1068 if (unlikely(ret))
1069 return -EFAULT;
1071 if (unlikely(copy_to_user(arg, &kcop.cop, sizeof(kcop.cop))))
1072 return -EFAULT;
1073 return 0;
1074 case CIOCASYNCCRYPT:
1075 if (unlikely(ret = kcop_from_user(&kcop, fcr, arg)))
1076 return ret;
1078 return crypto_async_run(pcr, &kcop);
1079 case CIOCASYNCFETCH:
1080 ret = crypto_async_fetch(pcr, &kcop);
1081 if (unlikely(ret))
1082 return ret;
1084 if (kcop.digestsize) {
1085 ret = copy_to_user(kcop.cop.mac,
1086 kcop.hash_output, kcop.digestsize);
1087 if (unlikely(ret))
1088 return -EFAULT;
1091 return copy_to_user(arg, &kcop.cop, sizeof(kcop.cop));
1093 default:
1094 return -EINVAL;
1098 /* compatibility code for 32bit userlands */
1099 #ifdef CONFIG_COMPAT
1101 static inline void
1102 compat_to_session_op(struct compat_session_op *compat, struct session_op *sop)
1104 sop->cipher = compat->cipher;
1105 sop->mac = compat->mac;
1106 sop->keylen = compat->keylen;
1108 sop->key = compat_ptr(compat->key);
1109 sop->mackeylen = compat->mackeylen;
1110 sop->mackey = compat_ptr(compat->mackey);
1111 sop->ses = compat->ses;
1114 static inline void
1115 session_op_to_compat(struct session_op *sop, struct compat_session_op *compat)
1117 compat->cipher = sop->cipher;
1118 compat->mac = sop->mac;
1119 compat->keylen = sop->keylen;
1121 compat->key = ptr_to_compat(sop->key);
1122 compat->mackeylen = sop->mackeylen;
1123 compat->mackey = ptr_to_compat(sop->mackey);
1124 compat->ses = sop->ses;
1127 static inline void
1128 compat_to_crypt_op(struct compat_crypt_op *compat, struct crypt_op *cop)
1130 cop->ses = compat->ses;
1131 cop->op = compat->op;
1132 cop->flags = compat->flags;
1133 cop->len = compat->len;
1135 cop->src = compat_ptr(compat->src);
1136 cop->dst = compat_ptr(compat->dst);
1137 cop->mac = compat_ptr(compat->mac);
1138 cop->iv = compat_ptr(compat->iv);
1141 static inline void
1142 crypt_op_to_compat(struct crypt_op *cop, struct compat_crypt_op *compat)
1144 compat->ses = cop->ses;
1145 compat->op = cop->op;
1146 compat->flags = cop->flags;
1147 compat->len = cop->len;
1149 compat->src = ptr_to_compat(cop->src);
1150 compat->dst = ptr_to_compat(cop->dst);
1151 compat->mac = ptr_to_compat(cop->mac);
1152 compat->iv = ptr_to_compat(cop->iv);
1155 static int compat_kcop_from_user(struct kernel_crypt_op *kcop,
1156 struct fcrypt *fcr, void __user *arg)
1158 struct compat_crypt_op compat_cop;
1160 if (unlikely(copy_from_user(&compat_cop, arg, sizeof(compat_cop))))
1161 return -EFAULT;
1162 compat_to_crypt_op(&compat_cop, &kcop->cop);
1164 return fill_kcop_from_cop(kcop, fcr);
1167 static long
1168 cryptodev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg_)
1170 void __user *arg = (void __user *)arg_;
1171 struct crypt_priv *pcr = file->private_data;
1172 struct fcrypt *fcr;
1173 struct session_op sop;
1174 struct compat_session_op compat_sop;
1175 struct kernel_crypt_op kcop;
1176 struct compat_crypt_op compat_cop;
1177 int ret;
1179 if (unlikely(!pcr))
1180 BUG();
1182 fcr = &pcr->fcrypt;
1184 switch (cmd) {
1185 case CIOCASYMFEAT:
1186 case CRIOGET:
1187 case CIOCFSESSION:
1188 return cryptodev_ioctl(file, cmd, arg_);
1190 case COMPAT_CIOCGSESSION:
1191 if (unlikely(copy_from_user(&compat_sop, arg,
1192 sizeof(compat_sop))))
1193 return -EFAULT;
1194 compat_to_session_op(&compat_sop, &sop);
1196 ret = crypto_create_session(fcr, &sop);
1197 if (unlikely(ret))
1198 return ret;
1200 session_op_to_compat(&sop, &compat_sop);
1201 ret = copy_to_user(arg, &compat_sop, sizeof(compat_sop));
1202 if (unlikely(ret)) {
1203 crypto_finish_session(fcr, sop.ses);
1204 return -EFAULT;
1206 return ret;
1208 case COMPAT_CIOCCRYPT:
1209 ret = compat_kcop_from_user(&kcop, fcr, arg);
1210 if (unlikely(ret))
1211 return ret;
1213 ret = crypto_run(fcr, &kcop);
1214 if (unlikely(ret))
1215 return ret;
1217 if (kcop.digestsize) {
1218 ret = copy_to_user(kcop.cop.mac,
1219 kcop.hash_output, kcop.digestsize);
1220 if (unlikely(ret))
1221 return -EFAULT;
1224 crypt_op_to_compat(&kcop.cop, &compat_cop);
1225 if (unlikely(copy_to_user(arg, &compat_cop,
1226 sizeof(compat_cop))))
1227 return -EFAULT;
1228 return 0;
1229 case COMPAT_CIOCASYNCCRYPT:
1230 if (unlikely(ret = compat_kcop_from_user(&kcop, fcr, arg)))
1231 return ret;
1233 return crypto_async_run(pcr, &kcop);
1234 case COMPAT_CIOCASYNCFETCH:
1235 ret = crypto_async_fetch(pcr, &kcop);
1236 if (unlikely(ret))
1237 return ret;
1239 if (kcop.digestsize) {
1240 ret = copy_to_user(kcop.cop.mac,
1241 kcop.hash_output, kcop.digestsize);
1242 if (unlikely(ret))
1243 return -EFAULT;
1246 crypt_op_to_compat(&kcop.cop, &compat_cop);
1247 return copy_to_user(arg, &compat_cop, sizeof(compat_cop));
1249 default:
1250 return -EINVAL;
1254 #endif /* CONFIG_COMPAT */
1256 static unsigned int cryptodev_poll(struct file *file, poll_table *wait)
1258 struct crypt_priv *pcr = file->private_data;
1259 int ret = 0;
1261 poll_wait(file, &pcr->user_waiter, wait);
1263 if (!list_empty_careful(&pcr->done.list))
1264 ret |= POLLIN | POLLRDNORM;
1265 if (!list_empty_careful(&pcr->free.list) || pcr->itemcount < MAX_COP_RINGSIZE)
1266 ret |= POLLOUT | POLLWRNORM;
1268 return ret;
1271 static const struct file_operations cryptodev_fops = {
1272 .owner = THIS_MODULE,
1273 .open = cryptodev_open,
1274 .release = cryptodev_release,
1275 .unlocked_ioctl = cryptodev_ioctl,
1276 #ifdef CONFIG_COMPAT
1277 .compat_ioctl = cryptodev_compat_ioctl,
1278 #endif /* CONFIG_COMPAT */
1279 .poll = cryptodev_poll,
1282 static struct miscdevice cryptodev = {
1283 .minor = MISC_DYNAMIC_MINOR,
1284 .name = "crypto",
1285 .fops = &cryptodev_fops,
1288 static int __init
1289 cryptodev_register(void)
1291 int rc;
1293 rc = misc_register(&cryptodev);
1294 if (unlikely(rc)) {
1295 printk(KERN_ERR PFX "registration of /dev/crypto failed\n");
1296 return rc;
1299 return 0;
1302 static void __exit
1303 cryptodev_deregister(void)
1305 misc_deregister(&cryptodev);
1308 /* ====== Module init/exit ====== */
1309 static int __init init_cryptodev(void)
1311 int rc;
1313 cryptodev_wq = create_workqueue("cryptodev_queue");
1314 if (unlikely(!cryptodev_wq)) {
1315 printk(KERN_ERR PFX "failed to allocate the cryptodev workqueue\n");
1316 return -EFAULT;
1319 rc = cryptodev_register();
1320 if (unlikely(rc)) {
1321 destroy_workqueue(cryptodev_wq);
1322 return rc;
1325 printk(KERN_INFO PFX "driver %s loaded.\n", VERSION);
1327 return 0;
1330 static void __exit exit_cryptodev(void)
1332 flush_workqueue(cryptodev_wq);
1333 destroy_workqueue(cryptodev_wq);
1335 cryptodev_deregister();
1336 printk(KERN_INFO PFX "driver unloaded.\n");
1339 module_init(init_cryptodev);
1340 module_exit(exit_cryptodev);