Added flag to prevent zero copy.
[cryptodev-linux.git] / cryptodev_main.c
blob092766cfb4dd076f2253d5e11fb978dea85dcc8a
1 /*
2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2004 Michal Ludvig <mludvig@logix.net.nz>, SuSE Labs
5 * Copyright (c) 2009,2010 Nikos Mavrogiannopoulos <nmav@gnutls.org>
7 * This file is part of linux cryptodev.
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc.,
22 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
26 * Device /dev/crypto provides an interface for
27 * accessing kernel CryptoAPI algorithms (ciphers,
28 * hashes) from userspace programs.
30 * /dev/crypto interface was originally introduced in
31 * OpenBSD and this module attempts to keep the API.
35 #include <crypto/hash.h>
36 #include <linux/crypto.h>
37 #include <linux/mm.h>
38 #include <linux/highmem.h>
39 #include <linux/ioctl.h>
40 #include <linux/random.h>
41 #include <linux/syscalls.h>
42 #include <linux/pagemap.h>
43 #include <linux/poll.h>
44 #include <linux/uaccess.h>
45 #include <crypto/cryptodev.h>
46 #include <linux/scatterlist.h>
47 #include "cryptodev_int.h"
48 #include "version.h"
50 MODULE_AUTHOR("Nikos Mavrogiannopoulos <nmav@gnutls.org>");
51 MODULE_DESCRIPTION("CryptoDev driver");
52 MODULE_LICENSE("GPL");
54 /* ====== Compile-time config ====== */
56 #define CRYPTODEV_STATS
58 /* Default (pre-allocated) and maximum size of the job queue.
59 * These are free, pending and done items all together. */
60 #define DEF_COP_RINGSIZE 16
61 #define MAX_COP_RINGSIZE 64
63 /* ====== Module parameters ====== */
65 int cryptodev_verbosity;
66 module_param(cryptodev_verbosity, int, 0644);
67 MODULE_PARM_DESC(cryptodev_verbosity, "0: normal, 1: verbose, 2: debug");
69 #ifdef CRYPTODEV_STATS
70 static int enable_stats;
71 module_param(enable_stats, int, 0644);
72 MODULE_PARM_DESC(enable_stats, "collect statictics about cryptodev usage");
73 #endif
75 /* ====== CryptoAPI ====== */
76 struct fcrypt {
77 struct list_head list;
78 struct mutex sem;
81 struct todo_list_item {
82 struct list_head __hook;
83 struct kernel_crypt_op kcop;
84 int result;
87 struct locked_list {
88 struct list_head list;
89 struct mutex lock;
92 struct crypt_priv {
93 struct fcrypt fcrypt;
94 struct locked_list free, todo, done;
95 int itemcount;
96 struct work_struct cryptask;
97 wait_queue_head_t user_waiter;
100 #define FILL_SG(sg, ptr, len) \
101 do { \
102 (sg)->page = virt_to_page(ptr); \
103 (sg)->offset = offset_in_page(ptr); \
104 (sg)->length = len; \
105 (sg)->dma_address = 0; \
106 } while (0)
108 struct csession {
109 struct list_head entry;
110 struct mutex sem;
111 struct cipher_data cdata;
112 struct hash_data hdata;
113 uint32_t sid;
114 uint32_t alignmask;
115 #ifdef CRYPTODEV_STATS
116 #if !((COP_ENCRYPT < 2) && (COP_DECRYPT < 2))
117 #error Struct csession.stat uses COP_{ENCRYPT,DECRYPT} as indices. Do something!
118 #endif
119 unsigned long long stat[2];
120 size_t stat_max_size, stat_count;
121 #endif
122 int array_size;
123 struct page **pages;
124 struct scatterlist *sg;
127 /* cryptodev's own workqueue, keeps crypto tasks from disturbing the force */
128 static struct workqueue_struct *cryptodev_wq;
130 /* Prepare session for future use. */
131 static int
132 crypto_create_session(struct fcrypt *fcr, struct session_op *sop)
134 struct csession *ses_new = NULL, *ses_ptr;
135 int ret = 0;
136 const char *alg_name = NULL;
137 const char *hash_name = NULL;
138 int hmac_mode = 1;
140 /* Does the request make sense? */
141 if (unlikely(!sop->cipher && !sop->mac)) {
142 dprintk(1, KERN_DEBUG, "Both 'cipher' and 'mac' unset.\n");
143 return -EINVAL;
146 switch (sop->cipher) {
147 case 0:
148 break;
149 case CRYPTO_DES_CBC:
150 alg_name = "cbc(des)";
151 break;
152 case CRYPTO_3DES_CBC:
153 alg_name = "cbc(des3_ede)";
154 break;
155 case CRYPTO_BLF_CBC:
156 alg_name = "cbc(blowfish)";
157 break;
158 case CRYPTO_AES_CBC:
159 alg_name = "cbc(aes)";
160 break;
161 case CRYPTO_AES_ECB:
162 alg_name = "ecb(aes)";
163 break;
164 case CRYPTO_CAMELLIA_CBC:
165 alg_name = "cbc(camelia)";
166 break;
167 case CRYPTO_AES_CTR:
168 alg_name = "ctr(aes)";
169 break;
170 case CRYPTO_NULL:
171 alg_name = "ecb(cipher_null)";
172 break;
173 default:
174 dprintk(1, KERN_DEBUG, "%s: bad cipher: %d\n", __func__,
175 sop->cipher);
176 return -EINVAL;
179 switch (sop->mac) {
180 case 0:
181 break;
182 case CRYPTO_MD5_HMAC:
183 hash_name = "hmac(md5)";
184 break;
185 case CRYPTO_RIPEMD160_HMAC:
186 hash_name = "hmac(rmd160)";
187 break;
188 case CRYPTO_SHA1_HMAC:
189 hash_name = "hmac(sha1)";
190 break;
191 case CRYPTO_SHA2_256_HMAC:
192 hash_name = "hmac(sha256)";
193 break;
194 case CRYPTO_SHA2_384_HMAC:
195 hash_name = "hmac(sha384)";
196 break;
197 case CRYPTO_SHA2_512_HMAC:
198 hash_name = "hmac(sha512)";
199 break;
201 /* non-hmac cases */
202 case CRYPTO_MD5:
203 hash_name = "md5";
204 hmac_mode = 0;
205 break;
206 case CRYPTO_RIPEMD160:
207 hash_name = "rmd160";
208 hmac_mode = 0;
209 break;
210 case CRYPTO_SHA1:
211 hash_name = "sha1";
212 hmac_mode = 0;
213 break;
214 case CRYPTO_SHA2_256:
215 hash_name = "sha256";
216 hmac_mode = 0;
217 break;
218 case CRYPTO_SHA2_384:
219 hash_name = "sha384";
220 hmac_mode = 0;
221 break;
222 case CRYPTO_SHA2_512:
223 hash_name = "sha512";
224 hmac_mode = 0;
225 break;
227 default:
228 dprintk(1, KERN_DEBUG, "%s: bad mac: %d\n", __func__,
229 sop->mac);
230 return -EINVAL;
233 /* Create a session and put it to the list. */
234 ses_new = kzalloc(sizeof(*ses_new), GFP_KERNEL);
235 if (!ses_new)
236 return -ENOMEM;
238 /* Set-up crypto transform. */
239 if (alg_name) {
240 uint8_t keyp[CRYPTO_CIPHER_MAX_KEY_LEN];
242 if (unlikely(sop->keylen > CRYPTO_CIPHER_MAX_KEY_LEN)) {
243 dprintk(1, KERN_DEBUG,
244 "Setting key failed for %s-%zu.\n",
245 alg_name, (size_t)sop->keylen*8);
246 ret = -EINVAL;
247 goto error_cipher;
250 if (unlikely(copy_from_user(keyp, sop->key, sop->keylen))) {
251 ret = -EFAULT;
252 goto error_cipher;
255 ret = cryptodev_cipher_init(&ses_new->cdata, alg_name, keyp,
256 sop->keylen);
257 if (ret < 0) {
258 dprintk(1, KERN_DEBUG,
259 "%s: Failed to load cipher for %s\n",
260 __func__, alg_name);
261 ret = -EINVAL;
262 goto error_cipher;
266 if (hash_name) {
267 uint8_t keyp[CRYPTO_HMAC_MAX_KEY_LEN];
269 if (unlikely(sop->mackeylen > CRYPTO_HMAC_MAX_KEY_LEN)) {
270 dprintk(1, KERN_DEBUG,
271 "Setting key failed for %s-%zu.\n",
272 alg_name, (size_t)sop->mackeylen*8);
273 ret = -EINVAL;
274 goto error_hash;
277 if (sop->mackey && unlikely(copy_from_user(keyp, sop->mackey,
278 sop->mackeylen))) {
279 ret = -EFAULT;
280 goto error_hash;
283 ret = cryptodev_hash_init(&ses_new->hdata, hash_name, hmac_mode,
284 keyp, sop->mackeylen);
285 if (ret != 0) {
286 dprintk(1, KERN_DEBUG,
287 "%s: Failed to load hash for %s\n",
288 __func__, hash_name);
289 ret = -EINVAL;
290 goto error_hash;
294 ses_new->alignmask = max(ses_new->cdata.alignmask,
295 ses_new->hdata.alignmask);
296 dprintk(2, KERN_DEBUG, "%s: got alignmask %d\n", __func__, ses_new->alignmask);
298 ses_new->array_size = DEFAULT_PREALLOC_PAGES;
299 dprintk(2, KERN_DEBUG, "%s: preallocating for %d user pages\n",
300 __func__, ses_new->array_size);
301 ses_new->pages = kzalloc(ses_new->array_size *
302 sizeof(struct page *), GFP_KERNEL);
303 ses_new->sg = kzalloc(ses_new->array_size *
304 sizeof(struct scatterlist), GFP_KERNEL);
305 if (ses_new->sg == NULL || ses_new->pages == NULL) {
306 dprintk(0, KERN_DEBUG, "Memory error\n");
307 ret = -ENOMEM;
308 goto error_hash;
311 /* put the new session to the list */
312 get_random_bytes(&ses_new->sid, sizeof(ses_new->sid));
313 mutex_init(&ses_new->sem);
315 mutex_lock(&fcr->sem);
316 restart:
317 list_for_each_entry(ses_ptr, &fcr->list, entry) {
318 /* Check for duplicate SID */
319 if (unlikely(ses_new->sid == ses_ptr->sid)) {
320 get_random_bytes(&ses_new->sid, sizeof(ses_new->sid));
321 /* Unless we have a broken RNG this
322 shouldn't loop forever... ;-) */
323 goto restart;
327 list_add(&ses_new->entry, &fcr->list);
328 mutex_unlock(&fcr->sem);
330 /* Fill in some values for the user. */
331 sop->ses = ses_new->sid;
333 return 0;
335 error_hash:
336 cryptodev_cipher_deinit(&ses_new->cdata);
337 kfree(ses_new->sg);
338 kfree(ses_new->pages);
339 error_cipher:
340 kfree(ses_new);
342 return ret;
346 /* Everything that needs to be done when remowing a session. */
347 static inline void
348 crypto_destroy_session(struct csession *ses_ptr)
350 if (!mutex_trylock(&ses_ptr->sem)) {
351 dprintk(2, KERN_DEBUG, "Waiting for semaphore of sid=0x%08X\n",
352 ses_ptr->sid);
353 mutex_lock(&ses_ptr->sem);
355 dprintk(2, KERN_DEBUG, "Removed session 0x%08X\n", ses_ptr->sid);
356 #if defined(CRYPTODEV_STATS)
357 if (enable_stats)
358 dprintk(2, KERN_DEBUG,
359 "Usage in Bytes: enc=%llu, dec=%llu, "
360 "max=%zu, avg=%lu, cnt=%zu\n",
361 ses_ptr->stat[COP_ENCRYPT], ses_ptr->stat[COP_DECRYPT],
362 ses_ptr->stat_max_size, ses_ptr->stat_count > 0
363 ? ((unsigned long)(ses_ptr->stat[COP_ENCRYPT]+
364 ses_ptr->stat[COP_DECRYPT]) /
365 ses_ptr->stat_count) : 0,
366 ses_ptr->stat_count);
367 #endif
368 cryptodev_cipher_deinit(&ses_ptr->cdata);
369 cryptodev_hash_deinit(&ses_ptr->hdata);
370 dprintk(2, KERN_DEBUG, "%s: freeing space for %d user pages\n",
371 __func__, ses_ptr->array_size);
372 kfree(ses_ptr->pages);
373 kfree(ses_ptr->sg);
374 mutex_unlock(&ses_ptr->sem);
375 kfree(ses_ptr);
378 /* Look up a session by ID and remove. */
379 static int
380 crypto_finish_session(struct fcrypt *fcr, uint32_t sid)
382 struct csession *tmp, *ses_ptr;
383 struct list_head *head;
384 int ret = 0;
386 mutex_lock(&fcr->sem);
387 head = &fcr->list;
388 list_for_each_entry_safe(ses_ptr, tmp, head, entry) {
389 if (ses_ptr->sid == sid) {
390 list_del(&ses_ptr->entry);
391 crypto_destroy_session(ses_ptr);
392 break;
396 if (unlikely(!ses_ptr)) {
397 dprintk(1, KERN_ERR, "Session with sid=0x%08X not found!\n",
398 sid);
399 ret = -ENOENT;
401 mutex_unlock(&fcr->sem);
403 return ret;
406 /* Remove all sessions when closing the file */
407 static int
408 crypto_finish_all_sessions(struct fcrypt *fcr)
410 struct csession *tmp, *ses_ptr;
411 struct list_head *head;
413 mutex_lock(&fcr->sem);
415 head = &fcr->list;
416 list_for_each_entry_safe(ses_ptr, tmp, head, entry) {
417 list_del(&ses_ptr->entry);
418 crypto_destroy_session(ses_ptr);
420 mutex_unlock(&fcr->sem);
422 return 0;
425 /* Look up session by session ID. The returned session is locked. */
426 static struct csession *
427 crypto_get_session_by_sid(struct fcrypt *fcr, uint32_t sid)
429 struct csession *ses_ptr, *retval = 0;
431 mutex_lock(&fcr->sem);
432 list_for_each_entry(ses_ptr, &fcr->list, entry) {
433 if (ses_ptr->sid == sid) {
434 mutex_lock(&ses_ptr->sem);
435 retval = ses_ptr;
436 break;
439 mutex_unlock(&fcr->sem);
441 return retval;
444 static int
445 hash_n_crypt(struct csession *ses_ptr, struct crypt_op *cop,
446 struct scatterlist *src_sg, struct scatterlist *dst_sg,
447 uint32_t len)
449 int ret;
451 /* Always hash before encryption and after decryption. Maybe
452 * we should introduce a flag to switch... TBD later on.
454 if (cop->op == COP_ENCRYPT) {
455 if (ses_ptr->hdata.init != 0) {
456 ret = cryptodev_hash_update(&ses_ptr->hdata,
457 src_sg, len);
458 if (unlikely(ret))
459 goto out_err;
461 if (ses_ptr->cdata.init != 0) {
462 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
463 src_sg, dst_sg, len);
465 if (unlikely(ret))
466 goto out_err;
468 } else {
469 if (ses_ptr->cdata.init != 0) {
470 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
471 src_sg, dst_sg, len);
473 if (unlikely(ret))
474 goto out_err;
477 if (ses_ptr->hdata.init != 0) {
478 ret = cryptodev_hash_update(&ses_ptr->hdata,
479 dst_sg, len);
480 if (unlikely(ret))
481 goto out_err;
484 return 0;
485 out_err:
486 dprintk(0, KERN_ERR, "CryptoAPI failure: %d\n", ret);
487 return ret;
491 /* This is the main crypto function - feed it with plaintext
492 and get a ciphertext (or vice versa :-) */
493 static int
494 __crypto_run_std(struct csession *ses_ptr, struct crypt_op *cop)
496 char *data;
497 char __user *src, *dst;
498 struct scatterlist sg;
499 size_t nbytes, bufsize;
500 int ret = 0;
502 nbytes = cop->len;
503 data = (char *)__get_free_page(GFP_KERNEL);
505 if (unlikely(!data))
506 return -ENOMEM;
508 bufsize = PAGE_SIZE < nbytes ? PAGE_SIZE : nbytes;
510 src = cop->src;
511 dst = cop->dst;
513 while (nbytes > 0) {
514 size_t current_len = nbytes > bufsize ? bufsize : nbytes;
516 if (unlikely(copy_from_user(data, src, current_len))) {
517 ret = -EFAULT;
518 break;
521 sg_init_one(&sg, data, current_len);
523 ret = hash_n_crypt(ses_ptr, cop, &sg, &sg, current_len);
525 if (unlikely(ret))
526 break;
528 if (ses_ptr->cdata.init != 0) {
529 if (unlikely(copy_to_user(dst, data, current_len))) {
530 ret = -EFAULT;
531 break;
535 dst += current_len;
536 nbytes -= current_len;
537 src += current_len;
540 free_page((unsigned long)data);
541 return ret;
544 void release_user_pages(struct page **pg, int pagecount)
546 while (pagecount--) {
547 if (!PageReserved(pg[pagecount]))
548 SetPageDirty(pg[pagecount]);
549 page_cache_release(pg[pagecount]);
553 /* offset of buf in it's first page */
554 #define PAGEOFFSET(buf) ((unsigned long)buf & ~PAGE_MASK)
556 /* fetch the pages addr resides in into pg and initialise sg with them */
557 int __get_userbuf(uint8_t __user *addr, uint32_t len, int write,
558 int pgcount, struct page **pg, struct scatterlist *sg,
559 struct task_struct *task, struct mm_struct *mm)
561 int ret, pglen, i = 0;
562 struct scatterlist *sgp;
564 down_write(&mm->mmap_sem);
565 ret = get_user_pages(task, mm,
566 (unsigned long)addr, pgcount, write, 0, pg, NULL);
567 up_write(&mm->mmap_sem);
568 if (ret != pgcount)
569 return -EINVAL;
571 sg_init_table(sg, pgcount);
573 pglen = min((ptrdiff_t)(PAGE_SIZE - PAGEOFFSET(addr)), (ptrdiff_t)len);
574 sg_set_page(sg, pg[i++], pglen, PAGEOFFSET(addr));
576 len -= pglen;
577 for (sgp = sg_next(sg); len; sgp = sg_next(sgp)) {
578 pglen = min((uint32_t)PAGE_SIZE, len);
579 sg_set_page(sgp, pg[i++], pglen, 0);
580 len -= pglen;
582 sg_mark_end(sg_last(sg, pgcount));
583 return 0;
586 /* make cop->src and cop->dst available in scatterlists */
587 static int get_userbuf(struct csession *ses, struct kernel_crypt_op *kcop,
588 struct scatterlist **src_sg, struct scatterlist **dst_sg,
589 int *tot_pages)
591 int src_pagecount, dst_pagecount = 0, pagecount, write_src = 1;
592 struct crypt_op *cop = &kcop->cop;
593 int rc;
595 if (cop->src == NULL)
596 return -EINVAL;
598 if (ses->alignmask && !IS_ALIGNED((unsigned long)cop->src, ses->alignmask)) {
599 dprintk(2, KERN_WARNING, "%s: careful - source address %lx is not %d byte aligned\n",
600 __func__, (unsigned long)cop->src, ses->alignmask + 1);
603 src_pagecount = PAGECOUNT(cop->src, cop->len);
604 if (!ses->cdata.init) { /* hashing only */
605 write_src = 0;
606 } else if (cop->src != cop->dst) { /* non-in-situ transformation */
607 if (cop->dst == NULL)
608 return -EINVAL;
610 dst_pagecount = PAGECOUNT(cop->dst, cop->len);
611 write_src = 0;
613 if (ses->alignmask && !IS_ALIGNED((unsigned long)cop->dst, ses->alignmask)) {
614 dprintk(2, KERN_WARNING, "%s: careful - destination address %lx is not %d byte aligned\n",
615 __func__, (unsigned long)cop->dst, ses->alignmask + 1);
619 (*tot_pages) = pagecount = src_pagecount + dst_pagecount;
621 if (pagecount > ses->array_size) {
622 struct scatterlist *sg;
623 struct page **pages;
624 int array_size;
626 for (array_size = ses->array_size; array_size < pagecount;
627 array_size *= 2)
630 dprintk(2, KERN_DEBUG, "%s: reallocating to %d elements\n",
631 __func__, array_size);
632 pages = krealloc(ses->pages, array_size * sizeof(struct page *),
633 GFP_KERNEL);
634 if (unlikely(!pages))
635 return -ENOMEM;
636 ses->pages = pages;
637 sg = krealloc(ses->sg, array_size * sizeof(struct scatterlist),
638 GFP_KERNEL);
639 if (unlikely(!sg))
640 return -ENOMEM;
641 ses->sg = sg;
642 ses->array_size = array_size;
645 rc = __get_userbuf(cop->src, cop->len, write_src, src_pagecount,
646 ses->pages, ses->sg, kcop->task, kcop->mm);
647 if (unlikely(rc)) {
648 dprintk(1, KERN_ERR,
649 "failed to get user pages for data input\n");
650 return -EINVAL;
652 (*src_sg) = (*dst_sg) = ses->sg;
654 if (!dst_pagecount)
655 return 0;
657 (*dst_sg) = ses->sg + src_pagecount;
659 rc = __get_userbuf(cop->dst, cop->len, 1, dst_pagecount,
660 ses->pages + src_pagecount, *dst_sg,
661 kcop->task, kcop->mm);
662 if (unlikely(rc)) {
663 dprintk(1, KERN_ERR,
664 "failed to get user pages for data output\n");
665 release_user_pages(ses->pages, src_pagecount);
666 return -EINVAL;
668 return 0;
671 /* This is the main crypto function - zero-copy edition */
672 static int
673 __crypto_run_zc(struct csession *ses_ptr, struct kernel_crypt_op *kcop)
675 struct scatterlist *src_sg, *dst_sg;
676 struct crypt_op *cop = &kcop->cop;
677 int ret = 0, pagecount;
679 ret = get_userbuf(ses_ptr, kcop, &src_sg, &dst_sg, &pagecount);
680 if (unlikely(ret)) {
681 dprintk(1, KERN_ERR, "Error getting user pages. "
682 "Falling back to non zero copy.\n");
683 return __crypto_run_std(ses_ptr, cop);
686 ret = hash_n_crypt(ses_ptr, cop, src_sg, dst_sg, cop->len);
688 release_user_pages(ses_ptr->pages, pagecount);
689 return ret;
692 static int crypto_run(struct fcrypt *fcr, struct kernel_crypt_op *kcop)
694 struct csession *ses_ptr;
695 struct crypt_op *cop = &kcop->cop;
696 int ret;
698 if (unlikely(cop->op != COP_ENCRYPT && cop->op != COP_DECRYPT)) {
699 dprintk(1, KERN_DEBUG, "invalid operation op=%u\n", cop->op);
700 return -EINVAL;
703 /* this also enters ses_ptr->sem */
704 ses_ptr = crypto_get_session_by_sid(fcr, cop->ses);
705 if (unlikely(!ses_ptr)) {
706 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", cop->ses);
707 return -EINVAL;
710 if (ses_ptr->hdata.init != 0 && !(cop->flags & (COP_FLAG_UPDATE | COP_FLAG_FINAL))) {
711 ret = cryptodev_hash_reset(&ses_ptr->hdata);
712 if (unlikely(ret)) {
713 dprintk(1, KERN_ERR,
714 "error in cryptodev_hash_reset()\n");
715 goto out_unlock;
719 if (ses_ptr->cdata.init != 0) {
720 int blocksize = ses_ptr->cdata.blocksize;
722 if (unlikely(cop->len % blocksize)) {
723 dprintk(1, KERN_ERR,
724 "data size (%u) isn't a multiple "
725 "of block size (%u)\n",
726 cop->len, blocksize);
727 ret = -EINVAL;
728 goto out_unlock;
731 cryptodev_cipher_set_iv(&ses_ptr->cdata, kcop->iv,
732 min(ses_ptr->cdata.ivsize, kcop->ivlen));
735 if (likely(cop->len)) {
736 if (cop->flags & COP_FLAG_NO_ZC)
737 ret = __crypto_run_std(ses_ptr, &kcop->cop);
738 else
739 ret = __crypto_run_zc(ses_ptr, kcop);
740 if (unlikely(ret))
741 goto out_unlock;
744 if (ses_ptr->cdata.init != 0) {
745 cryptodev_cipher_get_iv(&ses_ptr->cdata, kcop->iv,
746 min(ses_ptr->cdata.ivsize, kcop->ivlen));
749 if (ses_ptr->hdata.init != 0 &&
750 ((cop->flags & COP_FLAG_FINAL) ||
751 (!(cop->flags & COP_FLAG_UPDATE) || cop->len == 0))) {
753 ret = cryptodev_hash_final(&ses_ptr->hdata, kcop->hash_output);
754 if (unlikely(ret)) {
755 dprintk(0, KERN_ERR, "CryptoAPI failure: %d\n", ret);
756 goto out_unlock;
758 kcop->digestsize = ses_ptr->hdata.digestsize;
761 #if defined(CRYPTODEV_STATS)
762 if (enable_stats) {
763 /* this is safe - we check cop->op at the function entry */
764 ses_ptr->stat[cop->op] += cop->len;
765 if (ses_ptr->stat_max_size < cop->len)
766 ses_ptr->stat_max_size = cop->len;
767 ses_ptr->stat_count++;
769 #endif
771 out_unlock:
772 mutex_unlock(&ses_ptr->sem);
773 return ret;
776 static void cryptask_routine(struct work_struct *work)
778 struct crypt_priv *pcr = container_of(work, struct crypt_priv, cryptask);
779 struct todo_list_item *item;
780 LIST_HEAD(tmp);
782 /* fetch all pending jobs into the temporary list */
783 mutex_lock(&pcr->todo.lock);
784 list_cut_position(&tmp, &pcr->todo.list, pcr->todo.list.prev);
785 mutex_unlock(&pcr->todo.lock);
787 /* handle each job locklessly */
788 list_for_each_entry(item, &tmp, __hook) {
789 item->result = crypto_run(&pcr->fcrypt, &item->kcop);
790 if (unlikely(item->result))
791 dprintk(0, KERN_ERR, "%s: crypto_run() failed: %d\n",
792 __func__, item->result);
795 /* push all handled jobs to the done list at once */
796 mutex_lock(&pcr->done.lock);
797 list_splice_tail(&tmp, &pcr->done.list);
798 mutex_unlock(&pcr->done.lock);
800 /* wake for POLLIN */
801 wake_up_interruptible(&pcr->user_waiter);
804 /* ====== /dev/crypto ====== */
806 static int
807 cryptodev_open(struct inode *inode, struct file *filp)
809 struct todo_list_item *tmp;
810 struct crypt_priv *pcr;
811 int i;
813 pcr = kmalloc(sizeof(*pcr), GFP_KERNEL);
814 if (!pcr)
815 return -ENOMEM;
817 memset(pcr, 0, sizeof(*pcr));
818 mutex_init(&pcr->fcrypt.sem);
819 INIT_LIST_HEAD(&pcr->fcrypt.list);
821 INIT_LIST_HEAD(&pcr->free.list);
822 INIT_LIST_HEAD(&pcr->todo.list);
823 INIT_LIST_HEAD(&pcr->done.list);
824 INIT_WORK(&pcr->cryptask, cryptask_routine);
825 mutex_init(&pcr->free.lock);
826 mutex_init(&pcr->todo.lock);
827 mutex_init(&pcr->done.lock);
828 init_waitqueue_head(&pcr->user_waiter);
830 for (i = 0; i < DEF_COP_RINGSIZE; i++) {
831 tmp = kzalloc(sizeof(struct todo_list_item), GFP_KERNEL);
832 pcr->itemcount++;
833 dprintk(2, KERN_DEBUG, "%s: allocated new item at %lx\n",
834 __func__, (unsigned long)tmp);
835 list_add(&tmp->__hook, &pcr->free.list);
838 filp->private_data = pcr;
839 dprintk(2, KERN_DEBUG,
840 "Cryptodev handle initialised, %d elements in queue\n",
841 DEF_COP_RINGSIZE);
842 return 0;
845 static int
846 cryptodev_release(struct inode *inode, struct file *filp)
848 struct crypt_priv *pcr = filp->private_data;
849 struct todo_list_item *item, *item_safe;
850 int items_freed = 0;
852 if (!pcr)
853 return 0;
855 cancel_work_sync(&pcr->cryptask);
857 mutex_destroy(&pcr->todo.lock);
858 mutex_destroy(&pcr->done.lock);
859 mutex_destroy(&pcr->free.lock);
861 list_splice_tail(&pcr->todo.list, &pcr->free.list);
862 list_splice_tail(&pcr->done.list, &pcr->free.list);
864 list_for_each_entry_safe(item, item_safe, &pcr->free.list, __hook) {
865 dprintk(2, KERN_DEBUG, "%s: freeing item at %lx\n",
866 __func__, (unsigned long)item);
867 list_del(&item->__hook);
868 kfree(item);
869 items_freed++;
872 if (items_freed != pcr->itemcount) {
873 dprintk(0, KERN_ERR,
874 "%s: freed %d items, but %d should exist!\n",
875 __func__, items_freed, pcr->itemcount);
878 crypto_finish_all_sessions(&pcr->fcrypt);
879 kfree(pcr);
880 filp->private_data = NULL;
882 dprintk(2, KERN_DEBUG,
883 "Cryptodev handle deinitialised, %d elements freed\n",
884 items_freed);
885 return 0;
888 static int
889 clonefd(struct file *filp)
891 int ret;
892 ret = get_unused_fd();
893 if (ret >= 0) {
894 get_file(filp);
895 fd_install(ret, filp);
898 return ret;
901 /* enqueue a job for asynchronous completion
903 * returns:
904 * -EBUSY when there are no free queue slots left
905 * (and the number of slots has reached it MAX_COP_RINGSIZE)
906 * -EFAULT when there was a memory allocation error
907 * 0 on success */
908 static int crypto_async_run(struct crypt_priv *pcr, struct kernel_crypt_op *kcop)
910 struct todo_list_item *item = NULL;
912 mutex_lock(&pcr->free.lock);
913 if (likely(!list_empty(&pcr->free.list))) {
914 item = list_first_entry(&pcr->free.list,
915 struct todo_list_item, __hook);
916 list_del(&item->__hook);
917 } else if (pcr->itemcount < MAX_COP_RINGSIZE) {
918 pcr->itemcount++;
919 } else {
920 mutex_unlock(&pcr->free.lock);
921 return -EBUSY;
923 mutex_unlock(&pcr->free.lock);
925 if (unlikely(!item)) {
926 item = kzalloc(sizeof(struct todo_list_item), GFP_KERNEL);
927 if (unlikely(!item))
928 return -EFAULT;
929 dprintk(1, KERN_INFO, "%s: increased item count to %d\n",
930 __func__, pcr->itemcount);
933 memcpy(&item->kcop, kcop, sizeof(struct kernel_crypt_op));
935 mutex_lock(&pcr->todo.lock);
936 list_add_tail(&item->__hook, &pcr->todo.list);
937 mutex_unlock(&pcr->todo.lock);
939 queue_work(cryptodev_wq, &pcr->cryptask);
940 return 0;
943 /* get the first completed job from the "done" queue
945 * returns:
946 * -EBUSY if no completed jobs are ready (yet)
947 * the return value of crypto_run() otherwise */
948 static int crypto_async_fetch(struct crypt_priv *pcr,
949 struct kernel_crypt_op *kcop)
951 struct todo_list_item *item;
952 int retval;
954 mutex_lock(&pcr->done.lock);
955 if (list_empty(&pcr->done.list)) {
956 mutex_unlock(&pcr->done.lock);
957 return -EBUSY;
959 item = list_first_entry(&pcr->done.list, struct todo_list_item, __hook);
960 list_del(&item->__hook);
961 mutex_unlock(&pcr->done.lock);
963 memcpy(kcop, &item->kcop, sizeof(struct kernel_crypt_op));
964 retval = item->result;
966 mutex_lock(&pcr->free.lock);
967 list_add_tail(&item->__hook, &pcr->free.list);
968 mutex_unlock(&pcr->free.lock);
970 /* wake for POLLOUT */
971 wake_up_interruptible(&pcr->user_waiter);
973 return retval;
976 /* this function has to be called from process context */
977 static int fill_kcop_from_cop(struct kernel_crypt_op *kcop, struct fcrypt *fcr)
979 struct crypt_op *cop = &kcop->cop;
980 struct csession *ses_ptr;
981 int rc;
983 /* this also enters ses_ptr->sem */
984 ses_ptr = crypto_get_session_by_sid(fcr, cop->ses);
985 if (unlikely(!ses_ptr)) {
986 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", cop->ses);
987 return -EINVAL;
989 kcop->ivlen = cop->iv ? ses_ptr->cdata.ivsize : 0;
990 kcop->digestsize = 0; /* will be updated during operation */
992 mutex_unlock(&ses_ptr->sem);
994 kcop->task = current;
995 kcop->mm = current->mm;
997 if (cop->iv) {
998 rc = copy_from_user(kcop->iv, cop->iv, kcop->ivlen);
999 if (unlikely(rc)) {
1000 dprintk(1, KERN_ERR,
1001 "error copying IV (%d bytes), copy_from_user returned %d for address %lx\n",
1002 kcop->ivlen, rc, (unsigned long)cop->iv);
1003 return -EFAULT;
1007 return 0;
1010 /* this function has to be called from process context */
1011 static int fill_cop_from_kcop(struct kernel_crypt_op *kcop, struct fcrypt *fcr)
1013 int ret;
1015 if (kcop->digestsize) {
1016 ret = copy_to_user(kcop->cop.mac,
1017 kcop->hash_output, kcop->digestsize);
1018 if (unlikely(ret))
1019 return -EFAULT;
1021 if (kcop->ivlen && kcop->cop.flags & COP_FLAG_WRITE_IV) {
1022 ret = copy_to_user(kcop->cop.iv,
1023 kcop->iv, kcop->ivlen);
1024 if (unlikely(ret))
1025 return -EFAULT;
1027 return 0;
1030 static int kcop_from_user(struct kernel_crypt_op *kcop,
1031 struct fcrypt *fcr, void __user *arg)
1033 if (unlikely(copy_from_user(&kcop->cop, arg, sizeof(kcop->cop))))
1034 return -EFAULT;
1036 return fill_kcop_from_cop(kcop, fcr);
1039 static int kcop_to_user(struct kernel_crypt_op *kcop,
1040 struct fcrypt *fcr, void __user *arg)
1042 int ret;
1044 ret = fill_cop_from_kcop(kcop, fcr);
1045 if (unlikely(ret))
1046 return ret;
1048 if (unlikely(copy_to_user(arg, &kcop->cop, sizeof(kcop->cop))))
1049 return -EFAULT;
1050 return 0;
1053 static inline void tfm_info_to_alg_info(struct alg_info *dst, struct crypto_tfm *tfm)
1055 snprintf(dst->cra_name, CRYPTODEV_MAX_ALG_NAME,
1056 "%s", crypto_tfm_alg_name(tfm));
1057 snprintf(dst->cra_driver_name, CRYPTODEV_MAX_ALG_NAME,
1058 "%s", crypto_tfm_alg_driver_name(tfm));
1061 static int get_session_info(struct fcrypt *fcr, struct session_info_op *siop)
1063 struct csession *ses_ptr;
1065 /* this also enters ses_ptr->sem */
1066 ses_ptr = crypto_get_session_by_sid(fcr, siop->ses);
1067 if (unlikely(!ses_ptr)) {
1068 dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", siop->ses);
1069 return -EINVAL;
1072 if (ses_ptr->cdata.init) {
1073 tfm_info_to_alg_info(&siop->cipher_info,
1074 crypto_ablkcipher_tfm(ses_ptr->cdata.async.s));
1076 if (ses_ptr->hdata.init) {
1077 tfm_info_to_alg_info(&siop->hash_info,
1078 crypto_ahash_tfm(ses_ptr->hdata.async.s));
1081 siop->alignmask = ses_ptr->alignmask;
1083 mutex_unlock(&ses_ptr->sem);
1084 return 0;
1087 static long
1088 cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
1090 void __user *arg = (void __user *)arg_;
1091 int __user *p = arg;
1092 struct session_op sop;
1093 struct kernel_crypt_op kcop;
1094 struct crypt_priv *pcr = filp->private_data;
1095 struct fcrypt *fcr;
1096 struct session_info_op siop;
1097 uint32_t ses;
1098 int ret, fd;
1100 if (unlikely(!pcr))
1101 BUG();
1103 fcr = &pcr->fcrypt;
1105 switch (cmd) {
1106 case CIOCASYMFEAT:
1107 return put_user(0, p);
1108 case CRIOGET:
1109 fd = clonefd(filp);
1110 ret = put_user(fd, p);
1111 if (unlikely(ret)) {
1112 sys_close(fd);
1113 return ret;
1115 return ret;
1116 case CIOCGSESSION:
1117 if (unlikely(copy_from_user(&sop, arg, sizeof(sop))))
1118 return -EFAULT;
1120 ret = crypto_create_session(fcr, &sop);
1121 if (unlikely(ret))
1122 return ret;
1123 ret = copy_to_user(arg, &sop, sizeof(sop));
1124 if (unlikely(ret)) {
1125 crypto_finish_session(fcr, sop.ses);
1126 return -EFAULT;
1128 return ret;
1129 case CIOCFSESSION:
1130 ret = get_user(ses, (uint32_t __user *)arg);
1131 if (unlikely(ret))
1132 return ret;
1133 ret = crypto_finish_session(fcr, ses);
1134 return ret;
1135 case CIOCGSESSINFO:
1136 if (unlikely(copy_from_user(&siop, arg, sizeof(siop))))
1137 return -EFAULT;
1139 ret = get_session_info(fcr, &siop);
1140 if (unlikely(ret))
1141 return ret;
1142 return copy_to_user(arg, &siop, sizeof(siop));
1143 case CIOCCRYPT:
1144 if (unlikely(ret = kcop_from_user(&kcop, fcr, arg)))
1145 return ret;
1147 ret = crypto_run(fcr, &kcop);
1148 if (unlikely(ret))
1149 return ret;
1151 return kcop_to_user(&kcop, fcr, arg);
1152 case CIOCASYNCCRYPT:
1153 if (unlikely(ret = kcop_from_user(&kcop, fcr, arg)))
1154 return ret;
1156 return crypto_async_run(pcr, &kcop);
1157 case CIOCASYNCFETCH:
1158 ret = crypto_async_fetch(pcr, &kcop);
1159 if (unlikely(ret))
1160 return ret;
1162 return kcop_to_user(&kcop, fcr, arg);
1163 default:
1164 return -EINVAL;
1168 /* compatibility code for 32bit userlands */
1169 #ifdef CONFIG_COMPAT
1171 static inline void
1172 compat_to_session_op(struct compat_session_op *compat, struct session_op *sop)
1174 sop->cipher = compat->cipher;
1175 sop->mac = compat->mac;
1176 sop->keylen = compat->keylen;
1178 sop->key = compat_ptr(compat->key);
1179 sop->mackeylen = compat->mackeylen;
1180 sop->mackey = compat_ptr(compat->mackey);
1181 sop->ses = compat->ses;
1184 static inline void
1185 session_op_to_compat(struct session_op *sop, struct compat_session_op *compat)
1187 compat->cipher = sop->cipher;
1188 compat->mac = sop->mac;
1189 compat->keylen = sop->keylen;
1191 compat->key = ptr_to_compat(sop->key);
1192 compat->mackeylen = sop->mackeylen;
1193 compat->mackey = ptr_to_compat(sop->mackey);
1194 compat->ses = sop->ses;
1197 static inline void
1198 compat_to_crypt_op(struct compat_crypt_op *compat, struct crypt_op *cop)
1200 cop->ses = compat->ses;
1201 cop->op = compat->op;
1202 cop->flags = compat->flags;
1203 cop->len = compat->len;
1205 cop->src = compat_ptr(compat->src);
1206 cop->dst = compat_ptr(compat->dst);
1207 cop->mac = compat_ptr(compat->mac);
1208 cop->iv = compat_ptr(compat->iv);
1211 static inline void
1212 crypt_op_to_compat(struct crypt_op *cop, struct compat_crypt_op *compat)
1214 compat->ses = cop->ses;
1215 compat->op = cop->op;
1216 compat->flags = cop->flags;
1217 compat->len = cop->len;
1219 compat->src = ptr_to_compat(cop->src);
1220 compat->dst = ptr_to_compat(cop->dst);
1221 compat->mac = ptr_to_compat(cop->mac);
1222 compat->iv = ptr_to_compat(cop->iv);
1225 static int compat_kcop_from_user(struct kernel_crypt_op *kcop,
1226 struct fcrypt *fcr, void __user *arg)
1228 struct compat_crypt_op compat_cop;
1230 if (unlikely(copy_from_user(&compat_cop, arg, sizeof(compat_cop))))
1231 return -EFAULT;
1232 compat_to_crypt_op(&compat_cop, &kcop->cop);
1234 return fill_kcop_from_cop(kcop, fcr);
1237 static int compat_kcop_to_user(struct kernel_crypt_op *kcop,
1238 struct fcrypt *fcr, void __user *arg)
1240 int ret;
1241 struct compat_crypt_op compat_cop;
1243 ret = fill_cop_from_kcop(kcop, fcr);
1244 if (unlikely(ret))
1245 return ret;
1246 crypt_op_to_compat(&kcop->cop, &compat_cop);
1248 if (unlikely(copy_to_user(arg, &compat_cop, sizeof(compat_cop))))
1249 return -EFAULT;
1250 return 0;
1253 static long
1254 cryptodev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg_)
1256 void __user *arg = (void __user *)arg_;
1257 struct crypt_priv *pcr = file->private_data;
1258 struct fcrypt *fcr;
1259 struct session_op sop;
1260 struct compat_session_op compat_sop;
1261 struct kernel_crypt_op kcop;
1262 int ret;
1264 if (unlikely(!pcr))
1265 BUG();
1267 fcr = &pcr->fcrypt;
1269 switch (cmd) {
1270 case CIOCASYMFEAT:
1271 case CRIOGET:
1272 case CIOCFSESSION:
1273 case CIOCGSESSINFO:
1274 return cryptodev_ioctl(file, cmd, arg_);
1276 case COMPAT_CIOCGSESSION:
1277 if (unlikely(copy_from_user(&compat_sop, arg,
1278 sizeof(compat_sop))))
1279 return -EFAULT;
1280 compat_to_session_op(&compat_sop, &sop);
1282 ret = crypto_create_session(fcr, &sop);
1283 if (unlikely(ret))
1284 return ret;
1286 session_op_to_compat(&sop, &compat_sop);
1287 ret = copy_to_user(arg, &compat_sop, sizeof(compat_sop));
1288 if (unlikely(ret)) {
1289 crypto_finish_session(fcr, sop.ses);
1290 return -EFAULT;
1292 return ret;
1294 case COMPAT_CIOCCRYPT:
1295 ret = compat_kcop_from_user(&kcop, fcr, arg);
1296 if (unlikely(ret))
1297 return ret;
1299 ret = crypto_run(fcr, &kcop);
1300 if (unlikely(ret))
1301 return ret;
1303 return compat_kcop_to_user(&kcop, fcr, arg);
1304 case COMPAT_CIOCASYNCCRYPT:
1305 if (unlikely(ret = compat_kcop_from_user(&kcop, fcr, arg)))
1306 return ret;
1308 return crypto_async_run(pcr, &kcop);
1309 case COMPAT_CIOCASYNCFETCH:
1310 ret = crypto_async_fetch(pcr, &kcop);
1311 if (unlikely(ret))
1312 return ret;
1314 return compat_kcop_to_user(&kcop, fcr, arg);
1316 default:
1317 return -EINVAL;
1321 #endif /* CONFIG_COMPAT */
1323 static unsigned int cryptodev_poll(struct file *file, poll_table *wait)
1325 struct crypt_priv *pcr = file->private_data;
1326 int ret = 0;
1328 poll_wait(file, &pcr->user_waiter, wait);
1330 if (!list_empty_careful(&pcr->done.list))
1331 ret |= POLLIN | POLLRDNORM;
1332 if (!list_empty_careful(&pcr->free.list) || pcr->itemcount < MAX_COP_RINGSIZE)
1333 ret |= POLLOUT | POLLWRNORM;
1335 return ret;
1338 static const struct file_operations cryptodev_fops = {
1339 .owner = THIS_MODULE,
1340 .open = cryptodev_open,
1341 .release = cryptodev_release,
1342 .unlocked_ioctl = cryptodev_ioctl,
1343 #ifdef CONFIG_COMPAT
1344 .compat_ioctl = cryptodev_compat_ioctl,
1345 #endif /* CONFIG_COMPAT */
1346 .poll = cryptodev_poll,
1349 static struct miscdevice cryptodev = {
1350 .minor = MISC_DYNAMIC_MINOR,
1351 .name = "crypto",
1352 .fops = &cryptodev_fops,
1355 static int __init
1356 cryptodev_register(void)
1358 int rc;
1360 rc = misc_register(&cryptodev);
1361 if (unlikely(rc)) {
1362 printk(KERN_ERR PFX "registration of /dev/crypto failed\n");
1363 return rc;
1366 return 0;
1369 static void __exit
1370 cryptodev_deregister(void)
1372 misc_deregister(&cryptodev);
1375 /* ====== Module init/exit ====== */
1376 static int __init init_cryptodev(void)
1378 int rc;
1380 cryptodev_wq = create_workqueue("cryptodev_queue");
1381 if (unlikely(!cryptodev_wq)) {
1382 printk(KERN_ERR PFX "failed to allocate the cryptodev workqueue\n");
1383 return -EFAULT;
1386 rc = cryptodev_register();
1387 if (unlikely(rc)) {
1388 destroy_workqueue(cryptodev_wq);
1389 return rc;
1392 printk(KERN_INFO PFX "driver %s loaded.\n", VERSION);
1394 return 0;
1397 static void __exit exit_cryptodev(void)
1399 flush_workqueue(cryptodev_wq);
1400 destroy_workqueue(cryptodev_wq);
1402 cryptodev_deregister();
1403 printk(KERN_INFO PFX "driver unloaded.\n");
1406 module_init(init_cryptodev);
1407 module_exit(exit_cryptodev);