add support for CRYPTO_AES_ECB
[cryptodev-linux.git] / cryptodev_main.c
blob7326c3de2d03845a9a903c5a8e337ae532b0bfc9
1 /*
2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2004 Michal Ludvig <mludvig@logix.net.nz>, SuSE Labs
5 * Copyright (c) 2009,2010 Nikos Mavrogiannopoulos <nmav@gnutls.org>
7 * This file is part of linux cryptodev.
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc.,
22 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
26 * Device /dev/crypto provides an interface for
27 * accessing kernel CryptoAPI algorithms (ciphers,
28 * hashes) from userspace programs.
30 * /dev/crypto interface was originally introduced in
31 * OpenBSD and this module attempts to keep the API.
35 #include <linux/crypto.h>
36 #include <linux/mm.h>
37 #include <linux/highmem.h>
38 #include <linux/ioctl.h>
39 #include <linux/random.h>
40 #include <linux/syscalls.h>
41 #include <linux/pagemap.h>
42 #include <linux/poll.h>
43 #include <linux/uaccess.h>
44 #include "cryptodev.h"
45 #include <linux/scatterlist.h>
46 #include "cryptodev_int.h"
47 #include "version.h"
49 MODULE_AUTHOR("Nikos Mavrogiannopoulos <nmav@gnutls.org>");
50 MODULE_DESCRIPTION("CryptoDev driver");
51 MODULE_LICENSE("GPL");
53 /* ====== Compile-time config ====== */
55 #define CRYPTODEV_STATS
57 /* Default (pre-allocated) and maximum size of the job queue.
58 * These are free, pending and done items all together. */
59 #define DEF_COP_RINGSIZE 16
60 #define MAX_COP_RINGSIZE 64
62 /* ====== Module parameters ====== */
64 int cryptodev_verbosity;
65 module_param(cryptodev_verbosity, int, 0644);
66 MODULE_PARM_DESC(cryptodev_verbosity, "0: normal, 1: verbose, 2: debug");
68 #ifdef CRYPTODEV_STATS
69 static int enable_stats;
70 module_param(enable_stats, int, 0644);
71 MODULE_PARM_DESC(enable_stats, "collect statictics about cryptodev usage");
72 #endif
74 /* ====== CryptoAPI ====== */
75 struct fcrypt {
76 struct list_head list;
77 struct mutex sem;
80 struct todo_list_item {
81 struct list_head __hook;
82 struct kernel_crypt_op kcop;
83 int result;
86 struct locked_list {
87 struct list_head list;
88 struct mutex lock;
91 struct crypt_priv {
92 struct fcrypt fcrypt;
93 struct locked_list free, todo, done;
94 int itemcount;
95 struct work_struct cryptask;
96 wait_queue_head_t user_waiter;
99 #define FILL_SG(sg, ptr, len) \
100 do { \
101 (sg)->page = virt_to_page(ptr); \
102 (sg)->offset = offset_in_page(ptr); \
103 (sg)->length = len; \
104 (sg)->dma_address = 0; \
105 } while (0)
107 struct csession {
108 struct list_head entry;
109 struct mutex sem;
110 struct cipher_data cdata;
111 struct hash_data hdata;
112 uint32_t sid;
113 #ifdef CRYPTODEV_STATS
114 #if !((COP_ENCRYPT < 2) && (COP_DECRYPT < 2))
115 #error Struct csession.stat uses COP_{ENCRYPT,DECRYPT} as indices. Do something!
116 #endif
117 unsigned long long stat[2];
118 size_t stat_max_size, stat_count;
119 #endif
120 int array_size;
121 struct page **pages;
122 struct scatterlist *sg;
125 /* cryptodev's own workqueue, keeps crypto tasks from disturbing the force */
126 static struct workqueue_struct *cryptodev_wq;
128 /* Prepare session for future use. */
129 static int
130 crypto_create_session(struct fcrypt *fcr, struct session_op *sop)
132 struct csession *ses_new = NULL, *ses_ptr;
133 int ret = 0;
134 const char *alg_name = NULL;
135 const char *hash_name = NULL;
136 int hmac_mode = 1;
138 /* Does the request make sense? */
139 if (unlikely(!sop->cipher && !sop->mac)) {
140 dprintk(1, KERN_DEBUG, "Both 'cipher' and 'mac' unset.\n");
141 return -EINVAL;
144 switch (sop->cipher) {
145 case 0:
146 break;
147 case CRYPTO_DES_CBC:
148 alg_name = "cbc(des)";
149 break;
150 case CRYPTO_3DES_CBC:
151 alg_name = "cbc(des3_ede)";
152 break;
153 case CRYPTO_BLF_CBC:
154 alg_name = "cbc(blowfish)";
155 break;
156 case CRYPTO_AES_CBC:
157 alg_name = "cbc(aes)";
158 break;
159 case CRYPTO_AES_ECB:
160 alg_name = "ecb(aes)";
161 break;
162 case CRYPTO_CAMELLIA_CBC:
163 alg_name = "cbc(camelia)";
164 break;
165 case CRYPTO_AES_CTR:
166 alg_name = "ctr(aes)";
167 break;
168 case CRYPTO_NULL:
169 alg_name = "ecb(cipher_null)";
170 break;
171 default:
172 dprintk(1, KERN_DEBUG, "%s: bad cipher: %d\n", __func__,
173 sop->cipher);
174 return -EINVAL;
177 switch (sop->mac) {
178 case 0:
179 break;
180 case CRYPTO_MD5_HMAC:
181 hash_name = "hmac(md5)";
182 break;
183 case CRYPTO_RIPEMD160_HMAC:
184 hash_name = "hmac(rmd160)";
185 break;
186 case CRYPTO_SHA1_HMAC:
187 hash_name = "hmac(sha1)";
188 break;
189 case CRYPTO_SHA2_256_HMAC:
190 hash_name = "hmac(sha256)";
191 break;
192 case CRYPTO_SHA2_384_HMAC:
193 hash_name = "hmac(sha384)";
194 break;
195 case CRYPTO_SHA2_512_HMAC:
196 hash_name = "hmac(sha512)";
197 break;
199 /* non-hmac cases */
200 case CRYPTO_MD5:
201 hash_name = "md5";
202 hmac_mode = 0;
203 break;
204 case CRYPTO_RIPEMD160:
205 hash_name = "rmd160";
206 hmac_mode = 0;
207 break;
208 case CRYPTO_SHA1:
209 hash_name = "sha1";
210 hmac_mode = 0;
211 break;
212 case CRYPTO_SHA2_256:
213 hash_name = "sha256";
214 hmac_mode = 0;
215 break;
216 case CRYPTO_SHA2_384:
217 hash_name = "sha384";
218 hmac_mode = 0;
219 break;
220 case CRYPTO_SHA2_512:
221 hash_name = "sha512";
222 hmac_mode = 0;
223 break;
225 default:
226 dprintk(1, KERN_DEBUG, "%s: bad mac: %d\n", __func__,
227 sop->mac);
228 return -EINVAL;
231 /* Create a session and put it to the list. */
232 ses_new = kzalloc(sizeof(*ses_new), GFP_KERNEL);
233 if (!ses_new)
234 return -ENOMEM;
236 /* Set-up crypto transform. */
237 if (alg_name) {
238 uint8_t keyp[CRYPTO_CIPHER_MAX_KEY_LEN];
240 if (unlikely(sop->keylen > CRYPTO_CIPHER_MAX_KEY_LEN)) {
241 dprintk(1, KERN_DEBUG,
242 "Setting key failed for %s-%zu.\n",
243 alg_name, (size_t)sop->keylen*8);
244 ret = -EINVAL;
245 goto error_cipher;
248 if (unlikely(copy_from_user(keyp, sop->key, sop->keylen))) {
249 ret = -EFAULT;
250 goto error_cipher;
253 ret = cryptodev_cipher_init(&ses_new->cdata, alg_name, keyp,
254 sop->keylen);
255 if (ret < 0) {
256 dprintk(1, KERN_DEBUG,
257 "%s: Failed to load cipher for %s\n",
258 __func__, alg_name);
259 ret = -EINVAL;
260 goto error_cipher;
264 if (hash_name) {
265 uint8_t keyp[CRYPTO_HMAC_MAX_KEY_LEN];
267 if (unlikely(sop->mackeylen > CRYPTO_HMAC_MAX_KEY_LEN)) {
268 dprintk(1, KERN_DEBUG,
269 "Setting key failed for %s-%zu.\n",
270 alg_name, (size_t)sop->mackeylen*8);
271 ret = -EINVAL;
272 goto error_hash;
275 if (unlikely(copy_from_user(keyp, sop->mackey,
276 sop->mackeylen))) {
277 ret = -EFAULT;
278 goto error_hash;
281 ret = cryptodev_hash_init(&ses_new->hdata, hash_name, hmac_mode,
282 keyp, sop->mackeylen);
283 if (ret != 0) {
284 dprintk(1, KERN_DEBUG,
285 "%s: Failed to load hash for %s\n",
286 __func__, hash_name);
287 ret = -EINVAL;
288 goto error_hash;
292 ses_new->array_size = DEFAULT_PREALLOC_PAGES;
293 dprintk(2, KERN_DEBUG, "%s: preallocating for %d user pages\n",
294 __func__, ses_new->array_size);
295 ses_new->pages = kzalloc(ses_new->array_size *
296 sizeof(struct page *), GFP_KERNEL);
297 ses_new->sg = kzalloc(ses_new->array_size *
298 sizeof(struct scatterlist), GFP_KERNEL);
299 if (ses_new->sg == NULL || ses_new->pages == NULL) {
300 dprintk(0, KERN_DEBUG, "Memory error\n");
301 ret = -ENOMEM;
302 goto error_hash;
305 /* put the new session to the list */
306 get_random_bytes(&ses_new->sid, sizeof(ses_new->sid));
307 mutex_init(&ses_new->sem);
309 mutex_lock(&fcr->sem);
310 restart:
311 list_for_each_entry(ses_ptr, &fcr->list, entry) {
312 /* Check for duplicate SID */
313 if (unlikely(ses_new->sid == ses_ptr->sid)) {
314 get_random_bytes(&ses_new->sid, sizeof(ses_new->sid));
315 /* Unless we have a broken RNG this
316 shouldn't loop forever... ;-) */
317 goto restart;
321 list_add(&ses_new->entry, &fcr->list);
322 mutex_unlock(&fcr->sem);
324 /* Fill in some values for the user. */
325 sop->ses = ses_new->sid;
327 return 0;
329 error_hash:
330 cryptodev_cipher_deinit(&ses_new->cdata);
331 kfree(ses_new->sg);
332 kfree(ses_new->pages);
333 error_cipher:
334 kfree(ses_new);
336 return ret;
340 /* Everything that needs to be done when remowing a session. */
341 static inline void
342 crypto_destroy_session(struct csession *ses_ptr)
344 if (!mutex_trylock(&ses_ptr->sem)) {
345 dprintk(2, KERN_DEBUG, "Waiting for semaphore of sid=0x%08X\n",
346 ses_ptr->sid);
347 mutex_lock(&ses_ptr->sem);
349 dprintk(2, KERN_DEBUG, "Removed session 0x%08X\n", ses_ptr->sid);
350 #if defined(CRYPTODEV_STATS)
351 if (enable_stats)
352 dprintk(2, KERN_DEBUG,
353 "Usage in Bytes: enc=%llu, dec=%llu, \
354 max=%zu, avg=%lu, cnt=%zu\n",
355 ses_ptr->stat[COP_ENCRYPT], ses_ptr->stat[COP_DECRYPT],
356 ses_ptr->stat_max_size, ses_ptr->stat_count > 0
357 ? ((unsigned long)(ses_ptr->stat[COP_ENCRYPT]+
358 ses_ptr->stat[COP_DECRYPT]) /
359 ses_ptr->stat_count) : 0,
360 ses_ptr->stat_count);
361 #endif
362 cryptodev_cipher_deinit(&ses_ptr->cdata);
363 cryptodev_hash_deinit(&ses_ptr->hdata);
364 dprintk(2, KERN_DEBUG, "%s: freeing space for %d user pages\n",
365 __func__, ses_ptr->array_size);
366 kfree(ses_ptr->pages);
367 kfree(ses_ptr->sg);
368 mutex_unlock(&ses_ptr->sem);
369 kfree(ses_ptr);
372 /* Look up a session by ID and remove. */
373 static int
374 crypto_finish_session(struct fcrypt *fcr, uint32_t sid)
376 struct csession *tmp, *ses_ptr;
377 struct list_head *head;
378 int ret = 0;
380 mutex_lock(&fcr->sem);
381 head = &fcr->list;
382 list_for_each_entry_safe(ses_ptr, tmp, head, entry) {
383 if (ses_ptr->sid == sid) {
384 list_del(&ses_ptr->entry);
385 crypto_destroy_session(ses_ptr);
386 break;
390 if (unlikely(!ses_ptr)) {
391 dprintk(1, KERN_ERR, "Session with sid=0x%08X not found!\n",
392 sid);
393 ret = -ENOENT;
395 mutex_unlock(&fcr->sem);
397 return ret;
400 /* Remove all sessions when closing the file */
401 static int
402 crypto_finish_all_sessions(struct fcrypt *fcr)
404 struct csession *tmp, *ses_ptr;
405 struct list_head *head;
407 mutex_lock(&fcr->sem);
409 head = &fcr->list;
410 list_for_each_entry_safe(ses_ptr, tmp, head, entry) {
411 list_del(&ses_ptr->entry);
412 crypto_destroy_session(ses_ptr);
414 mutex_unlock(&fcr->sem);
416 return 0;
419 /* Look up session by session ID. The returned session is locked. */
420 static struct csession *
421 crypto_get_session_by_sid(struct fcrypt *fcr, uint32_t sid)
423 struct csession *ses_ptr;
425 mutex_lock(&fcr->sem);
426 list_for_each_entry(ses_ptr, &fcr->list, entry) {
427 if (ses_ptr->sid == sid) {
428 mutex_lock(&ses_ptr->sem);
429 break;
432 mutex_unlock(&fcr->sem);
434 return ses_ptr;
437 static int
438 hash_n_crypt(struct csession *ses_ptr, struct crypt_op *cop,
439 struct scatterlist *src_sg, struct scatterlist *dst_sg,
440 uint32_t len)
442 int ret;
444 /* Always hash before encryption and after decryption. Maybe
445 * we should introduce a flag to switch... TBD later on.
447 if (cop->op == COP_ENCRYPT) {
448 if (ses_ptr->hdata.init != 0) {
449 ret = cryptodev_hash_update(&ses_ptr->hdata,
450 src_sg, len);
451 if (unlikely(ret))
452 goto out_err;
454 if (ses_ptr->cdata.init != 0) {
455 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
456 src_sg, dst_sg, len);
458 if (unlikely(ret))
459 goto out_err;
461 } else {
462 if (ses_ptr->cdata.init != 0) {
463 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
464 src_sg, dst_sg, len);
466 if (unlikely(ret))
467 goto out_err;
470 if (ses_ptr->hdata.init != 0) {
471 ret = cryptodev_hash_update(&ses_ptr->hdata,
472 dst_sg, len);
473 if (unlikely(ret))
474 goto out_err;
477 return 0;
478 out_err:
479 dprintk(0, KERN_ERR, "CryptoAPI failure: %d\n", ret);
480 return ret;
484 /* This is the main crypto function - feed it with plaintext
485 and get a ciphertext (or vice versa :-) */
486 static int
487 __crypto_run_std(struct csession *ses_ptr, struct crypt_op *cop)
489 char *data;
490 char __user *src, *dst;
491 struct scatterlist sg;
492 size_t nbytes, bufsize;
493 int ret = 0;
495 nbytes = cop->len;
496 data = (char *)__get_free_page(GFP_KERNEL);
498 if (unlikely(!data))
499 return -ENOMEM;
501 bufsize = PAGE_SIZE < nbytes ? PAGE_SIZE : nbytes;
503 src = cop->src;
504 dst = cop->dst;
506 while (nbytes > 0) {
507 size_t current_len = nbytes > bufsize ? bufsize : nbytes;
509 if (unlikely(copy_from_user(data, src, current_len))) {
510 ret = -EFAULT;
511 break;
514 sg_init_one(&sg, data, current_len);
516 ret = hash_n_crypt(ses_ptr, cop, &sg, &sg, current_len);
518 if (unlikely(ret))
519 break;
521 if (ses_ptr->cdata.init != 0) {
522 if (unlikely(copy_to_user(dst, data, current_len))) {
523 ret = -EFAULT;
524 break;
528 dst += current_len;
529 nbytes -= current_len;
530 src += current_len;
533 free_page((unsigned long)data);
534 return ret;
537 void release_user_pages(struct page **pg, int pagecount)
539 while (pagecount--) {
540 if (!PageReserved(pg[pagecount]))
541 SetPageDirty(pg[pagecount]);
542 page_cache_release(pg[pagecount]);
546 /* offset of buf in it's first page */
547 #define PAGEOFFSET(buf) ((unsigned long)buf & ~PAGE_MASK)
549 /* fetch the pages addr resides in into pg and initialise sg with them */
550 int __get_userbuf(uint8_t __user *addr, uint32_t len, int write,
551 int pgcount, struct page **pg, struct scatterlist *sg,
552 struct task_struct *task, struct mm_struct *mm)
554 int ret, pglen, i = 0;
555 struct scatterlist *sgp;
557 down_write(&mm->mmap_sem);
558 ret = get_user_pages(task, mm,
559 (unsigned long)addr, pgcount, write, 0, pg, NULL);
560 up_write(&mm->mmap_sem);
561 if (ret != pgcount)
562 return -EINVAL;
564 sg_init_table(sg, pgcount);
566 pglen = min((ptrdiff_t)(PAGE_SIZE - PAGEOFFSET(addr)), (ptrdiff_t)len);
567 sg_set_page(sg, pg[i++], pglen, PAGEOFFSET(addr));
569 len -= pglen;
570 for (sgp = sg_next(sg); len; sgp = sg_next(sgp)) {
571 pglen = min((uint32_t)PAGE_SIZE, len);
572 sg_set_page(sgp, pg[i++], pglen, 0);
573 len -= pglen;
575 sg_mark_end(sg_last(sg, pgcount));
576 return 0;
579 /* make cop->src and cop->dst available in scatterlists */
580 static int get_userbuf(struct csession *ses, struct kernel_crypt_op *kcop,
581 struct scatterlist **src_sg, struct scatterlist **dst_sg,
582 int *tot_pages)
584 int src_pagecount, dst_pagecount = 0, pagecount, write_src = 1;
585 struct crypt_op *cop = &kcop->cop;
586 int rc;
588 if (cop->src == NULL)
589 return -EINVAL;
591 src_pagecount = PAGECOUNT(cop->src, cop->len);
592 if (!ses->cdata.init) { /* hashing only */
593 write_src = 0;
594 } else if (cop->src != cop->dst) { /* non-in-situ transformation */
595 if (cop->dst == NULL)
596 return -EINVAL;
598 dst_pagecount = PAGECOUNT(cop->dst, cop->len);
599 write_src = 0;
601 (*tot_pages) = pagecount = src_pagecount + dst_pagecount;
603 if (pagecount > ses->array_size) {
604 struct scatterlist *sg;
605 struct page **pages;
606 int array_size;
608 for (array_size = ses->array_size; array_size < pagecount;
609 array_size *= 2)
612 dprintk(2, KERN_DEBUG, "%s: reallocating to %d elements\n",
613 __func__, array_size);
614 pages = krealloc(ses->pages, array_size * sizeof(struct page *),
615 GFP_KERNEL);
616 if (unlikely(!pages))
617 return -ENOMEM;
618 ses->pages = pages;
619 sg = krealloc(ses->sg, array_size * sizeof(struct scatterlist),
620 GFP_KERNEL);
621 if (unlikely(!sg))
622 return -ENOMEM;
623 ses->sg = sg;
624 ses->array_size = array_size;
627 rc = __get_userbuf(cop->src, cop->len, write_src, src_pagecount,
628 ses->pages, ses->sg, kcop->task, kcop->mm);
629 if (unlikely(rc)) {
630 dprintk(1, KERN_ERR,
631 "failed to get user pages for data input\n");
632 return -EINVAL;
634 (*src_sg) = (*dst_sg) = ses->sg;
636 if (!dst_pagecount)
637 return 0;
639 (*dst_sg) = ses->sg + src_pagecount;
641 rc = __get_userbuf(cop->dst, cop->len, 1, dst_pagecount,
642 ses->pages + src_pagecount, *dst_sg,
643 kcop->task, kcop->mm);
644 if (unlikely(rc)) {
645 dprintk(1, KERN_ERR,
646 "failed to get user pages for data output\n");
647 release_user_pages(ses->pages, src_pagecount);
648 return -EINVAL;
650 return 0;
653 /* This is the main crypto function - zero-copy edition */
654 static int
655 __crypto_run_zc(struct csession *ses_ptr, struct kernel_crypt_op *kcop)
657 struct scatterlist *src_sg, *dst_sg;
658 struct crypt_op *cop = &kcop->cop;
659 int ret = 0, pagecount;
661 ret = get_userbuf(ses_ptr, kcop, &src_sg, &dst_sg, &pagecount);
662 if (unlikely(ret)) {
663 dprintk(1, KERN_ERR, "Error getting user pages. \
664 Falling back to non zero copy.\n");
665 return __crypto_run_std(ses_ptr, cop);
668 ret = hash_n_crypt(ses_ptr, cop, src_sg, dst_sg, cop->len);
670 release_user_pages(ses_ptr->pages, pagecount);
671 return ret;
674 static int crypto_run(struct fcrypt *fcr, struct kernel_crypt_op *kcop)
676 struct csession *ses_ptr;
677 struct crypt_op *cop = &kcop->cop;
678 int ret;
680 if (unlikely(cop->op != COP_ENCRYPT && cop->op != COP_DECRYPT)) {
681 dprintk(1, KERN_DEBUG, "invalid operation op=%u\n", cop->op);
682 return -EINVAL;
685 /* this also enters ses_ptr->sem */
686 ses_ptr = crypto_get_session_by_sid(fcr, cop->ses);
687 if (unlikely(!ses_ptr)) {
688 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", cop->ses);
689 return -EINVAL;
692 if (ses_ptr->hdata.init != 0 && !(cop->flags & COP_FLAG_UPDATE) &&
693 !(cop->flags & COP_FLAG_FINAL)) {
694 ret = cryptodev_hash_reset(&ses_ptr->hdata);
695 if (unlikely(ret)) {
696 dprintk(1, KERN_ERR,
697 "error in cryptodev_hash_reset()\n");
698 goto out_unlock;
702 if (ses_ptr->cdata.init != 0) {
703 int blocksize = ses_ptr->cdata.blocksize;
705 if (unlikely(cop->len % blocksize)) {
706 dprintk(1, KERN_ERR,
707 "data size (%u) isn't a multiple \
708 of block size (%u)\n",
709 cop->len, blocksize);
710 ret = -EINVAL;
711 goto out_unlock;
714 cryptodev_cipher_set_iv(&ses_ptr->cdata, kcop->iv,
715 min(ses_ptr->cdata.ivsize, kcop->ivlen));
718 if (cop->len != 0) {
719 ret = __crypto_run_zc(ses_ptr, kcop);
720 if (unlikely(ret))
721 goto out_unlock;
724 if (ses_ptr->hdata.init != 0 &&
725 ((cop->flags & COP_FLAG_FINAL) ||
726 (!(cop->flags & COP_FLAG_UPDATE) || cop->len == 0))) {
728 ret = cryptodev_hash_final(&ses_ptr->hdata, kcop->hash_output);
729 if (unlikely(ret)) {
730 dprintk(0, KERN_ERR, "CryptoAPI failure: %d\n", ret);
731 goto out_unlock;
733 kcop->digestsize = ses_ptr->hdata.digestsize;
736 #if defined(CRYPTODEV_STATS)
737 if (enable_stats) {
738 /* this is safe - we check cop->op at the function entry */
739 ses_ptr->stat[cop->op] += cop->len;
740 if (ses_ptr->stat_max_size < cop->len)
741 ses_ptr->stat_max_size = cop->len;
742 ses_ptr->stat_count++;
744 #endif
746 out_unlock:
747 mutex_unlock(&ses_ptr->sem);
748 return ret;
751 static void cryptask_routine(struct work_struct *work)
753 struct crypt_priv *pcr = container_of(work, struct crypt_priv, cryptask);
754 struct todo_list_item *item;
755 LIST_HEAD(tmp);
757 /* fetch all pending jobs into the temporary list */
758 mutex_lock(&pcr->todo.lock);
759 list_cut_position(&tmp, &pcr->todo.list, pcr->todo.list.prev);
760 mutex_unlock(&pcr->todo.lock);
762 /* handle each job locklessly */
763 list_for_each_entry(item, &tmp, __hook) {
764 item->result = crypto_run(&pcr->fcrypt, &item->kcop);
765 if (unlikely(item->result))
766 dprintk(0, KERN_ERR, "%s: crypto_run() failed: %d\n",
767 __func__, item->result);
770 /* push all handled jobs to the done list at once */
771 mutex_lock(&pcr->done.lock);
772 list_splice_tail(&tmp, &pcr->done.list);
773 mutex_unlock(&pcr->done.lock);
775 /* wake for POLLIN */
776 wake_up_interruptible(&pcr->user_waiter);
779 /* ====== /dev/crypto ====== */
781 static int
782 cryptodev_open(struct inode *inode, struct file *filp)
784 struct todo_list_item *tmp;
785 struct crypt_priv *pcr;
786 int i;
788 pcr = kmalloc(sizeof(*pcr), GFP_KERNEL);
789 if (!pcr)
790 return -ENOMEM;
792 memset(pcr, 0, sizeof(*pcr));
793 mutex_init(&pcr->fcrypt.sem);
794 INIT_LIST_HEAD(&pcr->fcrypt.list);
796 INIT_LIST_HEAD(&pcr->free.list);
797 INIT_LIST_HEAD(&pcr->todo.list);
798 INIT_LIST_HEAD(&pcr->done.list);
799 INIT_WORK(&pcr->cryptask, cryptask_routine);
800 mutex_init(&pcr->free.lock);
801 mutex_init(&pcr->todo.lock);
802 mutex_init(&pcr->done.lock);
803 init_waitqueue_head(&pcr->user_waiter);
805 for (i = 0; i < DEF_COP_RINGSIZE; i++) {
806 tmp = kzalloc(sizeof(struct todo_list_item), GFP_KERNEL);
807 pcr->itemcount++;
808 dprintk(2, KERN_DEBUG, "%s: allocated new item at %lx\n",
809 __func__, (unsigned long)tmp);
810 list_add(&tmp->__hook, &pcr->free.list);
813 filp->private_data = pcr;
814 dprintk(2, KERN_DEBUG,
815 "Cryptodev handle initialised, %d elements in queue\n",
816 DEF_COP_RINGSIZE);
817 return 0;
820 static int
821 cryptodev_release(struct inode *inode, struct file *filp)
823 struct crypt_priv *pcr = filp->private_data;
824 struct todo_list_item *item, *item_safe;
825 int items_freed = 0;
827 if (!pcr)
828 return 0;
830 cancel_work_sync(&pcr->cryptask);
832 mutex_destroy(&pcr->todo.lock);
833 mutex_destroy(&pcr->done.lock);
834 mutex_destroy(&pcr->free.lock);
836 list_splice_tail(&pcr->todo.list, &pcr->free.list);
837 list_splice_tail(&pcr->done.list, &pcr->free.list);
839 list_for_each_entry_safe(item, item_safe, &pcr->free.list, __hook) {
840 dprintk(2, KERN_DEBUG, "%s: freeing item at %lx\n",
841 __func__, (unsigned long)item);
842 list_del(&item->__hook);
843 kfree(item);
844 items_freed++;
847 if (items_freed != pcr->itemcount) {
848 dprintk(0, KERN_ERR,
849 "%s: freed %d items, but %d should exist!\n",
850 __func__, items_freed, pcr->itemcount);
853 crypto_finish_all_sessions(&pcr->fcrypt);
854 kfree(pcr);
855 filp->private_data = NULL;
857 dprintk(2, KERN_DEBUG,
858 "Cryptodev handle deinitialised, %d elements freed\n",
859 items_freed);
860 return 0;
863 static int
864 clonefd(struct file *filp)
866 int ret;
867 ret = get_unused_fd();
868 if (ret >= 0) {
869 get_file(filp);
870 fd_install(ret, filp);
873 return ret;
876 /* enqueue a job for asynchronous completion
878 * returns:
879 * -EBUSY when there are no free queue slots left
880 * (and the number of slots has reached it MAX_COP_RINGSIZE)
881 * -EFAULT when there was a memory allocation error
882 * 0 on success */
883 static int crypto_async_run(struct crypt_priv *pcr, struct kernel_crypt_op *kcop)
885 struct todo_list_item *item = NULL;
887 mutex_lock(&pcr->free.lock);
888 if (likely(!list_empty(&pcr->free.list))) {
889 item = list_first_entry(&pcr->free.list,
890 struct todo_list_item, __hook);
891 list_del(&item->__hook);
892 } else if (pcr->itemcount < MAX_COP_RINGSIZE) {
893 pcr->itemcount++;
894 } else {
895 mutex_unlock(&pcr->free.lock);
896 return -EBUSY;
898 mutex_unlock(&pcr->free.lock);
900 if (unlikely(!item)) {
901 item = kzalloc(sizeof(struct todo_list_item), GFP_KERNEL);
902 if (unlikely(!item))
903 return -EFAULT;
904 dprintk(1, KERN_INFO, "%s: increased item count to %d\n",
905 __func__, pcr->itemcount);
908 memcpy(&item->kcop, kcop, sizeof(struct kernel_crypt_op));
910 mutex_lock(&pcr->todo.lock);
911 list_add_tail(&item->__hook, &pcr->todo.list);
912 mutex_unlock(&pcr->todo.lock);
914 queue_work(cryptodev_wq, &pcr->cryptask);
915 return 0;
918 /* get the first completed job from the "done" queue
920 * returns:
921 * -EBUSY if no completed jobs are ready (yet)
922 * the return value of crypto_run() otherwise */
923 static int crypto_async_fetch(struct crypt_priv *pcr,
924 struct kernel_crypt_op *kcop)
926 struct todo_list_item *item;
927 int retval;
929 mutex_lock(&pcr->done.lock);
930 if (list_empty(&pcr->done.list)) {
931 mutex_unlock(&pcr->done.lock);
932 return -EBUSY;
934 item = list_first_entry(&pcr->done.list, struct todo_list_item, __hook);
935 list_del(&item->__hook);
936 mutex_unlock(&pcr->done.lock);
938 memcpy(kcop, &item->kcop, sizeof(struct kernel_crypt_op));
939 retval = item->result;
941 mutex_lock(&pcr->free.lock);
942 list_add_tail(&item->__hook, &pcr->free.list);
943 mutex_unlock(&pcr->free.lock);
945 /* wake for POLLOUT */
946 wake_up_interruptible(&pcr->user_waiter);
948 return retval;
951 /* this function has to be called from process context */
952 static int fill_kcop_from_cop(struct kernel_crypt_op *kcop, struct fcrypt *fcr)
954 struct crypt_op *cop = &kcop->cop;
955 struct csession *ses_ptr;
956 int rc;
958 /* this also enters ses_ptr->sem */
959 ses_ptr = crypto_get_session_by_sid(fcr, cop->ses);
960 if (unlikely(!ses_ptr)) {
961 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", cop->ses);
962 return -EINVAL;
964 kcop->ivlen = cop->iv ? ses_ptr->cdata.ivsize : 0;
965 kcop->digestsize = 0; /* will be updated during operation */
967 mutex_unlock(&ses_ptr->sem);
969 kcop->task = current;
970 kcop->mm = current->mm;
972 if (cop->iv) {
973 rc = copy_from_user(kcop->iv, cop->iv, kcop->ivlen);
974 if (unlikely(rc)) {
975 dprintk(1, KERN_ERR,
976 "error copying IV (%d bytes), copy_from_user returned %d for address %lx\n",
977 kcop->ivlen, rc, (unsigned long)cop->iv);
978 return -EFAULT;
982 return 0;
985 static int kcop_from_user(struct kernel_crypt_op *kcop,
986 struct fcrypt *fcr, void __user *arg)
988 if (unlikely(copy_from_user(&kcop->cop, arg, sizeof(kcop->cop))))
989 return -EFAULT;
991 return fill_kcop_from_cop(kcop, fcr);
994 static long
995 cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
997 void __user *arg = (void __user *)arg_;
998 int __user *p = arg;
999 struct session_op sop;
1000 struct kernel_crypt_op kcop;
1001 struct crypt_priv *pcr = filp->private_data;
1002 struct fcrypt *fcr;
1003 uint32_t ses;
1004 int ret, fd;
1006 if (unlikely(!pcr))
1007 BUG();
1009 fcr = &pcr->fcrypt;
1011 switch (cmd) {
1012 case CIOCASYMFEAT:
1013 return put_user(0, p);
1014 case CRIOGET:
1015 fd = clonefd(filp);
1016 ret = put_user(fd, p);
1017 if (unlikely(ret)) {
1018 sys_close(fd);
1019 return ret;
1021 return ret;
1022 case CIOCGSESSION:
1023 if (unlikely(copy_from_user(&sop, arg, sizeof(sop))))
1024 return -EFAULT;
1026 ret = crypto_create_session(fcr, &sop);
1027 if (unlikely(ret))
1028 return ret;
1029 ret = copy_to_user(arg, &sop, sizeof(sop));
1030 if (unlikely(ret)) {
1031 crypto_finish_session(fcr, sop.ses);
1032 return -EFAULT;
1034 return ret;
1035 case CIOCFSESSION:
1036 ret = get_user(ses, (uint32_t __user *)arg);
1037 if (unlikely(ret))
1038 return ret;
1039 ret = crypto_finish_session(fcr, ses);
1040 return ret;
1041 case CIOCCRYPT:
1042 if (unlikely(ret = kcop_from_user(&kcop, fcr, arg)))
1043 return ret;
1045 ret = crypto_run(fcr, &kcop);
1046 if (unlikely(ret))
1047 return ret;
1049 if (kcop.digestsize) {
1050 ret = copy_to_user(kcop.cop.mac,
1051 kcop.hash_output, kcop.digestsize);
1052 if (unlikely(ret))
1053 return -EFAULT;
1055 if (unlikely(copy_to_user(arg, &kcop.cop, sizeof(kcop.cop))))
1056 return -EFAULT;
1057 return 0;
1058 case CIOCASYNCCRYPT:
1059 if (unlikely(ret = kcop_from_user(&kcop, fcr, arg)))
1060 return ret;
1062 return crypto_async_run(pcr, &kcop);
1063 case CIOCASYNCFETCH:
1064 ret = crypto_async_fetch(pcr, &kcop);
1065 if (unlikely(ret))
1066 return ret;
1068 if (kcop.digestsize) {
1069 ret = copy_to_user(kcop.cop.mac,
1070 kcop.hash_output, kcop.digestsize);
1071 if (unlikely(ret))
1072 return -EFAULT;
1075 return copy_to_user(arg, &kcop.cop, sizeof(kcop.cop));
1077 default:
1078 return -EINVAL;
1082 /* compatibility code for 32bit userlands */
1083 #ifdef CONFIG_COMPAT
1085 static inline void
1086 compat_to_session_op(struct compat_session_op *compat, struct session_op *sop)
1088 sop->cipher = compat->cipher;
1089 sop->mac = compat->mac;
1090 sop->keylen = compat->keylen;
1092 sop->key = compat_ptr(compat->key);
1093 sop->mackeylen = compat->mackeylen;
1094 sop->mackey = compat_ptr(compat->mackey);
1095 sop->ses = compat->ses;
1098 static inline void
1099 session_op_to_compat(struct session_op *sop, struct compat_session_op *compat)
1101 compat->cipher = sop->cipher;
1102 compat->mac = sop->mac;
1103 compat->keylen = sop->keylen;
1105 compat->key = ptr_to_compat(sop->key);
1106 compat->mackeylen = sop->mackeylen;
1107 compat->mackey = ptr_to_compat(sop->mackey);
1108 compat->ses = sop->ses;
1111 static inline void
1112 compat_to_crypt_op(struct compat_crypt_op *compat, struct crypt_op *cop)
1114 cop->ses = compat->ses;
1115 cop->op = compat->op;
1116 cop->flags = compat->flags;
1117 cop->len = compat->len;
1119 cop->src = compat_ptr(compat->src);
1120 cop->dst = compat_ptr(compat->dst);
1121 cop->mac = compat_ptr(compat->mac);
1122 cop->iv = compat_ptr(compat->iv);
1125 static inline void
1126 crypt_op_to_compat(struct crypt_op *cop, struct compat_crypt_op *compat)
1128 compat->ses = cop->ses;
1129 compat->op = cop->op;
1130 compat->flags = cop->flags;
1131 compat->len = cop->len;
1133 compat->src = ptr_to_compat(cop->src);
1134 compat->dst = ptr_to_compat(cop->dst);
1135 compat->mac = ptr_to_compat(cop->mac);
1136 compat->iv = ptr_to_compat(cop->iv);
1139 static int compat_kcop_from_user(struct kernel_crypt_op *kcop,
1140 struct fcrypt *fcr, void __user *arg)
1142 struct compat_crypt_op compat_cop;
1144 if (unlikely(copy_from_user(&compat_cop, arg, sizeof(compat_cop))))
1145 return -EFAULT;
1146 compat_to_crypt_op(&compat_cop, &kcop->cop);
1148 return fill_kcop_from_cop(kcop, fcr);
1151 static long
1152 cryptodev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg_)
1154 void __user *arg = (void __user *)arg_;
1155 struct crypt_priv *pcr = file->private_data;
1156 struct fcrypt *fcr;
1157 struct session_op sop;
1158 struct compat_session_op compat_sop;
1159 struct kernel_crypt_op kcop;
1160 struct compat_crypt_op compat_cop;
1161 int ret;
1163 if (unlikely(!pcr))
1164 BUG();
1166 fcr = &pcr->fcrypt;
1168 switch (cmd) {
1169 case CIOCASYMFEAT:
1170 case CRIOGET:
1171 case CIOCFSESSION:
1172 return cryptodev_ioctl(file, cmd, arg_);
1174 case COMPAT_CIOCGSESSION:
1175 if (unlikely(copy_from_user(&compat_sop, arg,
1176 sizeof(compat_sop))))
1177 return -EFAULT;
1178 compat_to_session_op(&compat_sop, &sop);
1180 ret = crypto_create_session(fcr, &sop);
1181 if (unlikely(ret))
1182 return ret;
1184 session_op_to_compat(&sop, &compat_sop);
1185 ret = copy_to_user(arg, &compat_sop, sizeof(compat_sop));
1186 if (unlikely(ret)) {
1187 crypto_finish_session(fcr, sop.ses);
1188 return -EFAULT;
1190 return ret;
1192 case COMPAT_CIOCCRYPT:
1193 ret = compat_kcop_from_user(&kcop, fcr, arg);
1194 if (unlikely(ret))
1195 return ret;
1197 ret = crypto_run(fcr, &kcop);
1198 if (unlikely(ret))
1199 return ret;
1201 if (kcop.digestsize) {
1202 ret = copy_to_user(kcop.cop.mac,
1203 kcop.hash_output, kcop.digestsize);
1204 if (unlikely(ret))
1205 return -EFAULT;
1208 crypt_op_to_compat(&kcop.cop, &compat_cop);
1209 if (unlikely(copy_to_user(arg, &compat_cop,
1210 sizeof(compat_cop))))
1211 return -EFAULT;
1212 return 0;
1213 case COMPAT_CIOCASYNCCRYPT:
1214 if (unlikely(ret = compat_kcop_from_user(&kcop, fcr, arg)))
1215 return ret;
1217 return crypto_async_run(pcr, &kcop);
1218 case COMPAT_CIOCASYNCFETCH:
1219 ret = crypto_async_fetch(pcr, &kcop);
1220 if (unlikely(ret))
1221 return ret;
1223 if (kcop.digestsize) {
1224 ret = copy_to_user(kcop.cop.mac,
1225 kcop.hash_output, kcop.digestsize);
1226 if (unlikely(ret))
1227 return -EFAULT;
1230 crypt_op_to_compat(&kcop.cop, &compat_cop);
1231 return copy_to_user(arg, &compat_cop, sizeof(compat_cop));
1233 default:
1234 return -EINVAL;
1238 #endif /* CONFIG_COMPAT */
1240 static unsigned int cryptodev_poll(struct file *file, poll_table *wait)
1242 struct crypt_priv *pcr = file->private_data;
1243 int ret = 0;
1245 poll_wait(file, &pcr->user_waiter, wait);
1247 if (!list_empty_careful(&pcr->done.list))
1248 ret |= POLLIN | POLLRDNORM;
1249 if (!list_empty_careful(&pcr->free.list) || pcr->itemcount < MAX_COP_RINGSIZE)
1250 ret |= POLLOUT | POLLWRNORM;
1252 return ret;
1255 static const struct file_operations cryptodev_fops = {
1256 .owner = THIS_MODULE,
1257 .open = cryptodev_open,
1258 .release = cryptodev_release,
1259 .unlocked_ioctl = cryptodev_ioctl,
1260 #ifdef CONFIG_COMPAT
1261 .compat_ioctl = cryptodev_compat_ioctl,
1262 #endif /* CONFIG_COMPAT */
1263 .poll = cryptodev_poll,
1266 static struct miscdevice cryptodev = {
1267 .minor = MISC_DYNAMIC_MINOR,
1268 .name = "crypto",
1269 .fops = &cryptodev_fops,
1272 static int __init
1273 cryptodev_register(void)
1275 int rc;
1277 rc = misc_register(&cryptodev);
1278 if (unlikely(rc)) {
1279 printk(KERN_ERR PFX "registration of /dev/crypto failed\n");
1280 return rc;
1283 return 0;
1286 static void __exit
1287 cryptodev_deregister(void)
1289 misc_deregister(&cryptodev);
1292 /* ====== Module init/exit ====== */
1293 static int __init init_cryptodev(void)
1295 int rc;
1297 cryptodev_wq = create_workqueue("cryptodev_queue");
1298 if (unlikely(!cryptodev_wq)) {
1299 printk(KERN_ERR PFX "failed to allocate the cryptodev workqueue\n");
1300 return -EFAULT;
1303 rc = cryptodev_register();
1304 if (unlikely(rc)) {
1305 destroy_workqueue(cryptodev_wq);
1306 return rc;
1309 printk(KERN_INFO PFX "driver %s loaded.\n", VERSION);
1311 return 0;
1314 static void __exit exit_cryptodev(void)
1316 flush_workqueue(cryptodev_wq);
1317 destroy_workqueue(cryptodev_wq);
1319 cryptodev_deregister();
1320 printk(KERN_INFO PFX "driver unloaded.\n");
1323 module_init(init_cryptodev);
1324 module_exit(exit_cryptodev);