2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2004 Michal Ludvig <mludvig@logix.net.nz>, SuSE Labs
5 * Copyright (c) 2009,2010,2011 Nikos Mavrogiannopoulos <nmav@gnutls.org>
6 * Copyright (c) 2010 Phil Sutter
8 * This file is part of linux cryptodev.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
23 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
27 * Device /dev/crypto provides an interface for
28 * accessing kernel CryptoAPI algorithms (ciphers,
29 * hashes) from userspace programs.
31 * /dev/crypto interface was originally introduced in
32 * OpenBSD and this module attempts to keep the API.
36 #include <crypto/hash.h>
37 #include <linux/crypto.h>
39 #include <linux/highmem.h>
40 #include <linux/ioctl.h>
41 #include <linux/random.h>
42 #include <linux/syscalls.h>
43 #include <linux/pagemap.h>
44 #include <linux/poll.h>
45 #include <linux/uaccess.h>
46 #include <crypto/cryptodev.h>
47 #include <linux/scatterlist.h>
48 #include "cryptodev_int.h"
52 MODULE_AUTHOR("Nikos Mavrogiannopoulos <nmav@gnutls.org>");
53 MODULE_DESCRIPTION("CryptoDev driver");
54 MODULE_LICENSE("GPL");
56 /* ====== Compile-time config ====== */
58 /* Default (pre-allocated) and maximum size of the job queue.
59 * These are free, pending and done items all together. */
60 #define DEF_COP_RINGSIZE 16
61 #define MAX_COP_RINGSIZE 64
63 /* ====== Module parameters ====== */
65 int cryptodev_verbosity
;
66 module_param(cryptodev_verbosity
, int, 0644);
67 MODULE_PARM_DESC(cryptodev_verbosity
, "0: normal, 1: verbose, 2: debug");
69 /* ====== CryptoAPI ====== */
70 struct todo_list_item
{
71 struct list_head __hook
;
72 struct kernel_crypt_op kcop
;
77 struct list_head list
;
83 struct locked_list free
, todo
, done
;
85 struct work_struct cryptask
;
86 wait_queue_head_t user_waiter
;
89 #define FILL_SG(sg, ptr, len) \
91 (sg)->page = virt_to_page(ptr); \
92 (sg)->offset = offset_in_page(ptr); \
94 (sg)->dma_address = 0; \
97 /* cryptodev's own workqueue, keeps crypto tasks from disturbing the force */
98 static struct workqueue_struct
*cryptodev_wq
;
100 /* Prepare session for future use. */
102 crypto_create_session(struct fcrypt
*fcr
, struct session_op
*sop
)
104 struct csession
*ses_new
= NULL
, *ses_ptr
;
106 const char *alg_name
= NULL
;
107 const char *hash_name
= NULL
;
108 int hmac_mode
= 1, stream
= 0, aead
= 0;
110 /* Does the request make sense? */
111 if (unlikely(!sop
->cipher
&& !sop
->mac
)) {
112 dprintk(1, KERN_DEBUG
, "Both 'cipher' and 'mac' unset.\n");
116 switch (sop
->cipher
) {
120 alg_name
= "cbc(des)";
122 case CRYPTO_3DES_CBC
:
123 alg_name
= "cbc(des3_ede)";
126 alg_name
= "cbc(blowfish)";
129 alg_name
= "cbc(aes)";
132 alg_name
= "ecb(aes)";
134 case CRYPTO_CAMELLIA_CBC
:
135 alg_name
= "cbc(camelia)";
138 alg_name
= "ctr(aes)";
142 alg_name
= "gcm(aes)";
147 alg_name
= "ecb(cipher_null)";
151 dprintk(1, KERN_DEBUG
, "%s: bad cipher: %d\n", __func__
,
159 case CRYPTO_MD5_HMAC
:
160 hash_name
= "hmac(md5)";
162 case CRYPTO_RIPEMD160_HMAC
:
163 hash_name
= "hmac(rmd160)";
165 case CRYPTO_SHA1_HMAC
:
166 hash_name
= "hmac(sha1)";
168 case CRYPTO_SHA2_256_HMAC
:
169 hash_name
= "hmac(sha256)";
171 case CRYPTO_SHA2_384_HMAC
:
172 hash_name
= "hmac(sha384)";
174 case CRYPTO_SHA2_512_HMAC
:
175 hash_name
= "hmac(sha512)";
183 case CRYPTO_RIPEMD160
:
184 hash_name
= "rmd160";
191 case CRYPTO_SHA2_256
:
192 hash_name
= "sha256";
195 case CRYPTO_SHA2_384
:
196 hash_name
= "sha384";
199 case CRYPTO_SHA2_512
:
200 hash_name
= "sha512";
204 dprintk(1, KERN_DEBUG
, "%s: bad mac: %d\n", __func__
,
209 /* Create a session and put it to the list. */
210 ses_new
= kzalloc(sizeof(*ses_new
), GFP_KERNEL
);
214 /* Set-up crypto transform. */
216 uint8_t keyp
[CRYPTO_CIPHER_MAX_KEY_LEN
];
218 if (unlikely(sop
->keylen
> CRYPTO_CIPHER_MAX_KEY_LEN
)) {
219 dprintk(1, KERN_DEBUG
,
220 "Setting key failed for %s-%zu.\n",
221 alg_name
, (size_t)sop
->keylen
*8);
226 if (unlikely(copy_from_user(keyp
, sop
->key
, sop
->keylen
))) {
231 ret
= cryptodev_cipher_init(&ses_new
->cdata
, alg_name
, keyp
,
232 sop
->keylen
, stream
, aead
);
234 dprintk(1, KERN_DEBUG
,
235 "%s: Failed to load cipher for %s\n",
242 if (hash_name
&& aead
== 0) {
243 uint8_t keyp
[CRYPTO_HMAC_MAX_KEY_LEN
];
245 if (unlikely(sop
->mackeylen
> CRYPTO_HMAC_MAX_KEY_LEN
)) {
246 dprintk(1, KERN_DEBUG
,
247 "Setting key failed for %s-%zu.\n",
248 alg_name
, (size_t)sop
->mackeylen
*8);
253 if (sop
->mackey
&& unlikely(copy_from_user(keyp
, sop
->mackey
,
259 ret
= cryptodev_hash_init(&ses_new
->hdata
, hash_name
, hmac_mode
,
260 keyp
, sop
->mackeylen
);
262 dprintk(1, KERN_DEBUG
,
263 "%s: Failed to load hash for %s\n",
264 __func__
, hash_name
);
270 ses_new
->alignmask
= max(ses_new
->cdata
.alignmask
,
271 ses_new
->hdata
.alignmask
);
272 dprintk(2, KERN_DEBUG
, "%s: got alignmask %d\n", __func__
, ses_new
->alignmask
);
274 ses_new
->array_size
= DEFAULT_PREALLOC_PAGES
;
275 dprintk(2, KERN_DEBUG
, "%s: preallocating for %d user pages\n",
276 __func__
, ses_new
->array_size
);
277 ses_new
->pages
= kzalloc(ses_new
->array_size
*
278 sizeof(struct page
*), GFP_KERNEL
);
279 ses_new
->sg
= kzalloc(ses_new
->array_size
*
280 sizeof(struct scatterlist
), GFP_KERNEL
);
281 if (ses_new
->sg
== NULL
|| ses_new
->pages
== NULL
) {
282 dprintk(0, KERN_DEBUG
, "Memory error\n");
287 /* put the new session to the list */
288 get_random_bytes(&ses_new
->sid
, sizeof(ses_new
->sid
));
289 mutex_init(&ses_new
->sem
);
291 mutex_lock(&fcr
->sem
);
293 list_for_each_entry(ses_ptr
, &fcr
->list
, entry
) {
294 /* Check for duplicate SID */
295 if (unlikely(ses_new
->sid
== ses_ptr
->sid
)) {
296 get_random_bytes(&ses_new
->sid
, sizeof(ses_new
->sid
));
297 /* Unless we have a broken RNG this
298 shouldn't loop forever... ;-) */
303 list_add(&ses_new
->entry
, &fcr
->list
);
304 mutex_unlock(&fcr
->sem
);
306 /* Fill in some values for the user. */
307 sop
->ses
= ses_new
->sid
;
312 cryptodev_cipher_deinit(&ses_new
->cdata
);
314 kfree(ses_new
->pages
);
322 /* Everything that needs to be done when remowing a session. */
324 crypto_destroy_session(struct csession
*ses_ptr
)
326 if (!mutex_trylock(&ses_ptr
->sem
)) {
327 dprintk(2, KERN_DEBUG
, "Waiting for semaphore of sid=0x%08X\n",
329 mutex_lock(&ses_ptr
->sem
);
331 dprintk(2, KERN_DEBUG
, "Removed session 0x%08X\n", ses_ptr
->sid
);
332 cryptodev_cipher_deinit(&ses_ptr
->cdata
);
333 cryptodev_hash_deinit(&ses_ptr
->hdata
);
334 dprintk(2, KERN_DEBUG
, "%s: freeing space for %d user pages\n",
335 __func__
, ses_ptr
->array_size
);
336 kfree(ses_ptr
->pages
);
338 mutex_unlock(&ses_ptr
->sem
);
342 /* Look up a session by ID and remove. */
344 crypto_finish_session(struct fcrypt
*fcr
, uint32_t sid
)
346 struct csession
*tmp
, *ses_ptr
;
347 struct list_head
*head
;
350 mutex_lock(&fcr
->sem
);
352 list_for_each_entry_safe(ses_ptr
, tmp
, head
, entry
) {
353 if (ses_ptr
->sid
== sid
) {
354 list_del(&ses_ptr
->entry
);
355 crypto_destroy_session(ses_ptr
);
360 if (unlikely(!ses_ptr
)) {
361 dprintk(1, KERN_ERR
, "Session with sid=0x%08X not found!\n",
365 mutex_unlock(&fcr
->sem
);
370 /* Remove all sessions when closing the file */
372 crypto_finish_all_sessions(struct fcrypt
*fcr
)
374 struct csession
*tmp
, *ses_ptr
;
375 struct list_head
*head
;
377 mutex_lock(&fcr
->sem
);
380 list_for_each_entry_safe(ses_ptr
, tmp
, head
, entry
) {
381 list_del(&ses_ptr
->entry
);
382 crypto_destroy_session(ses_ptr
);
384 mutex_unlock(&fcr
->sem
);
389 /* Look up session by session ID. The returned session is locked. */
391 crypto_get_session_by_sid(struct fcrypt
*fcr
, uint32_t sid
)
393 struct csession
*ses_ptr
, *retval
= NULL
;
395 if (unlikely(fcr
== NULL
))
398 mutex_lock(&fcr
->sem
);
399 list_for_each_entry(ses_ptr
, &fcr
->list
, entry
) {
400 if (ses_ptr
->sid
== sid
) {
401 mutex_lock(&ses_ptr
->sem
);
406 mutex_unlock(&fcr
->sem
);
411 static void cryptask_routine(struct work_struct
*work
)
413 struct crypt_priv
*pcr
= container_of(work
, struct crypt_priv
, cryptask
);
414 struct todo_list_item
*item
;
417 /* fetch all pending jobs into the temporary list */
418 mutex_lock(&pcr
->todo
.lock
);
419 list_cut_position(&tmp
, &pcr
->todo
.list
, pcr
->todo
.list
.prev
);
420 mutex_unlock(&pcr
->todo
.lock
);
422 /* handle each job locklessly */
423 list_for_each_entry(item
, &tmp
, __hook
) {
424 item
->result
= crypto_run(&pcr
->fcrypt
, &item
->kcop
);
425 if (unlikely(item
->result
))
426 dprintk(0, KERN_ERR
, "%s: crypto_run() failed: %d\n",
427 __func__
, item
->result
);
430 /* push all handled jobs to the done list at once */
431 mutex_lock(&pcr
->done
.lock
);
432 list_splice_tail(&tmp
, &pcr
->done
.list
);
433 mutex_unlock(&pcr
->done
.lock
);
435 /* wake for POLLIN */
436 wake_up_interruptible(&pcr
->user_waiter
);
439 /* ====== /dev/crypto ====== */
442 cryptodev_open(struct inode
*inode
, struct file
*filp
)
444 struct todo_list_item
*tmp
;
445 struct crypt_priv
*pcr
;
448 pcr
= kmalloc(sizeof(*pcr
), GFP_KERNEL
);
452 memset(pcr
, 0, sizeof(*pcr
));
453 mutex_init(&pcr
->fcrypt
.sem
);
454 INIT_LIST_HEAD(&pcr
->fcrypt
.list
);
456 INIT_LIST_HEAD(&pcr
->free
.list
);
457 INIT_LIST_HEAD(&pcr
->todo
.list
);
458 INIT_LIST_HEAD(&pcr
->done
.list
);
459 INIT_WORK(&pcr
->cryptask
, cryptask_routine
);
460 mutex_init(&pcr
->free
.lock
);
461 mutex_init(&pcr
->todo
.lock
);
462 mutex_init(&pcr
->done
.lock
);
463 init_waitqueue_head(&pcr
->user_waiter
);
465 for (i
= 0; i
< DEF_COP_RINGSIZE
; i
++) {
466 tmp
= kzalloc(sizeof(struct todo_list_item
), GFP_KERNEL
);
468 dprintk(2, KERN_DEBUG
, "%s: allocated new item at %lx\n",
469 __func__
, (unsigned long)tmp
);
470 list_add(&tmp
->__hook
, &pcr
->free
.list
);
473 filp
->private_data
= pcr
;
474 dprintk(2, KERN_DEBUG
,
475 "Cryptodev handle initialised, %d elements in queue\n",
481 cryptodev_release(struct inode
*inode
, struct file
*filp
)
483 struct crypt_priv
*pcr
= filp
->private_data
;
484 struct todo_list_item
*item
, *item_safe
;
490 cancel_work_sync(&pcr
->cryptask
);
492 mutex_destroy(&pcr
->todo
.lock
);
493 mutex_destroy(&pcr
->done
.lock
);
494 mutex_destroy(&pcr
->free
.lock
);
496 list_splice_tail(&pcr
->todo
.list
, &pcr
->free
.list
);
497 list_splice_tail(&pcr
->done
.list
, &pcr
->free
.list
);
499 list_for_each_entry_safe(item
, item_safe
, &pcr
->free
.list
, __hook
) {
500 dprintk(2, KERN_DEBUG
, "%s: freeing item at %lx\n",
501 __func__
, (unsigned long)item
);
502 list_del(&item
->__hook
);
507 if (items_freed
!= pcr
->itemcount
) {
509 "%s: freed %d items, but %d should exist!\n",
510 __func__
, items_freed
, pcr
->itemcount
);
513 crypto_finish_all_sessions(&pcr
->fcrypt
);
515 filp
->private_data
= NULL
;
517 dprintk(2, KERN_DEBUG
,
518 "Cryptodev handle deinitialised, %d elements freed\n",
524 clonefd(struct file
*filp
)
527 ret
= get_unused_fd();
530 fd_install(ret
, filp
);
536 /* enqueue a job for asynchronous completion
539 * -EBUSY when there are no free queue slots left
540 * (and the number of slots has reached it MAX_COP_RINGSIZE)
541 * -EFAULT when there was a memory allocation error
543 static int crypto_async_run(struct crypt_priv
*pcr
, struct kernel_crypt_op
*kcop
)
545 struct todo_list_item
*item
= NULL
;
547 mutex_lock(&pcr
->free
.lock
);
548 if (likely(!list_empty(&pcr
->free
.list
))) {
549 item
= list_first_entry(&pcr
->free
.list
,
550 struct todo_list_item
, __hook
);
551 list_del(&item
->__hook
);
552 } else if (pcr
->itemcount
< MAX_COP_RINGSIZE
) {
555 mutex_unlock(&pcr
->free
.lock
);
558 mutex_unlock(&pcr
->free
.lock
);
560 if (unlikely(!item
)) {
561 item
= kzalloc(sizeof(struct todo_list_item
), GFP_KERNEL
);
564 dprintk(1, KERN_INFO
, "%s: increased item count to %d\n",
565 __func__
, pcr
->itemcount
);
568 memcpy(&item
->kcop
, kcop
, sizeof(struct kernel_crypt_op
));
570 mutex_lock(&pcr
->todo
.lock
);
571 list_add_tail(&item
->__hook
, &pcr
->todo
.list
);
572 mutex_unlock(&pcr
->todo
.lock
);
574 queue_work(cryptodev_wq
, &pcr
->cryptask
);
578 /* get the first completed job from the "done" queue
581 * -EBUSY if no completed jobs are ready (yet)
582 * the return value of crypto_run() otherwise */
583 static int crypto_async_fetch(struct crypt_priv
*pcr
,
584 struct kernel_crypt_op
*kcop
)
586 struct todo_list_item
*item
;
589 mutex_lock(&pcr
->done
.lock
);
590 if (list_empty(&pcr
->done
.list
)) {
591 mutex_unlock(&pcr
->done
.lock
);
594 item
= list_first_entry(&pcr
->done
.list
, struct todo_list_item
, __hook
);
595 list_del(&item
->__hook
);
596 mutex_unlock(&pcr
->done
.lock
);
598 memcpy(kcop
, &item
->kcop
, sizeof(struct kernel_crypt_op
));
599 retval
= item
->result
;
601 mutex_lock(&pcr
->free
.lock
);
602 list_add_tail(&item
->__hook
, &pcr
->free
.list
);
603 mutex_unlock(&pcr
->free
.lock
);
605 /* wake for POLLOUT */
606 wake_up_interruptible(&pcr
->user_waiter
);
611 /* this function has to be called from process context */
612 static int fill_kcop_from_cop(struct kernel_crypt_op
*kcop
, struct fcrypt
*fcr
)
614 struct crypt_op
*cop
= &kcop
->cop
;
615 struct csession
*ses_ptr
;
618 /* this also enters ses_ptr->sem */
619 ses_ptr
= crypto_get_session_by_sid(fcr
, cop
->ses
);
620 if (unlikely(!ses_ptr
)) {
621 dprintk(1, KERN_ERR
, "invalid session ID=0x%08X\n", cop
->ses
);
624 kcop
->ivlen
= cop
->iv
? ses_ptr
->cdata
.ivsize
: 0;
625 kcop
->digestsize
= 0; /* will be updated during operation */
627 crypto_put_session(ses_ptr
);
629 kcop
->task
= current
;
630 kcop
->mm
= current
->mm
;
633 rc
= copy_from_user(kcop
->iv
, cop
->iv
, kcop
->ivlen
);
636 "error copying IV (%d bytes), copy_from_user returned %d for address %lx\n",
637 kcop
->ivlen
, rc
, (unsigned long)cop
->iv
);
645 /* this function has to be called from process context */
646 static int fill_cop_from_kcop(struct kernel_crypt_op
*kcop
, struct fcrypt
*fcr
)
650 if (kcop
->digestsize
) {
651 ret
= copy_to_user(kcop
->cop
.mac
,
652 kcop
->hash_output
, kcop
->digestsize
);
656 if (kcop
->ivlen
&& kcop
->cop
.flags
& COP_FLAG_WRITE_IV
) {
657 ret
= copy_to_user(kcop
->cop
.iv
,
658 kcop
->iv
, kcop
->ivlen
);
665 static int kcop_from_user(struct kernel_crypt_op
*kcop
,
666 struct fcrypt
*fcr
, void __user
*arg
)
668 if (unlikely(copy_from_user(&kcop
->cop
, arg
, sizeof(kcop
->cop
))))
671 return fill_kcop_from_cop(kcop
, fcr
);
674 static int kcop_to_user(struct kernel_crypt_op
*kcop
,
675 struct fcrypt
*fcr
, void __user
*arg
)
679 ret
= fill_cop_from_kcop(kcop
, fcr
);
683 if (unlikely(copy_to_user(arg
, &kcop
->cop
, sizeof(kcop
->cop
))))
688 static inline void tfm_info_to_alg_info(struct alg_info
*dst
, struct crypto_tfm
*tfm
)
690 snprintf(dst
->cra_name
, CRYPTODEV_MAX_ALG_NAME
,
691 "%s", crypto_tfm_alg_name(tfm
));
692 snprintf(dst
->cra_driver_name
, CRYPTODEV_MAX_ALG_NAME
,
693 "%s", crypto_tfm_alg_driver_name(tfm
));
696 static int get_session_info(struct fcrypt
*fcr
, struct session_info_op
*siop
)
698 struct csession
*ses_ptr
;
700 /* this also enters ses_ptr->sem */
701 ses_ptr
= crypto_get_session_by_sid(fcr
, siop
->ses
);
702 if (unlikely(!ses_ptr
)) {
703 dprintk(1, KERN_ERR
, "invalid session ID=0x%08X\n", siop
->ses
);
707 if (ses_ptr
->cdata
.init
) {
708 if (ses_ptr
->cdata
.aead
== 0)
709 tfm_info_to_alg_info(&siop
->cipher_info
,
710 crypto_ablkcipher_tfm(ses_ptr
->cdata
.async
.s
));
712 tfm_info_to_alg_info(&siop
->cipher_info
,
713 crypto_aead_tfm(ses_ptr
->cdata
.async
.as
));
715 if (ses_ptr
->hdata
.init
) {
716 tfm_info_to_alg_info(&siop
->hash_info
,
717 crypto_ahash_tfm(ses_ptr
->hdata
.async
.s
));
720 siop
->alignmask
= ses_ptr
->alignmask
;
722 crypto_put_session(ses_ptr
);
727 cryptodev_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg_
)
729 void __user
*arg
= (void __user
*)arg_
;
731 struct session_op sop
;
732 struct kernel_crypt_op kcop
;
733 struct kernel_crypt_auth_op kcaop
;
734 struct crypt_priv
*pcr
= filp
->private_data
;
736 struct session_info_op siop
;
747 return put_user(0, p
);
750 ret
= put_user(fd
, p
);
757 if (unlikely(copy_from_user(&sop
, arg
, sizeof(sop
))))
760 ret
= crypto_create_session(fcr
, &sop
);
763 ret
= copy_to_user(arg
, &sop
, sizeof(sop
));
765 crypto_finish_session(fcr
, sop
.ses
);
770 ret
= get_user(ses
, (uint32_t __user
*)arg
);
773 ret
= crypto_finish_session(fcr
, ses
);
776 if (unlikely(copy_from_user(&siop
, arg
, sizeof(siop
))))
779 ret
= get_session_info(fcr
, &siop
);
782 return copy_to_user(arg
, &siop
, sizeof(siop
));
784 if (unlikely(ret
= kcop_from_user(&kcop
, fcr
, arg
))) {
785 dprintk(1, KERN_WARNING
, "Error copying from user");
789 ret
= crypto_run(fcr
, &kcop
);
791 dprintk(1, KERN_WARNING
, "Error in crypto_run");
795 return kcop_to_user(&kcop
, fcr
, arg
);
797 if (unlikely(ret
= kcaop_from_user(&kcaop
, fcr
, arg
))) {
798 dprintk(1, KERN_WARNING
, "Error copying from user");
802 ret
= crypto_auth_run(fcr
, &kcaop
);
804 dprintk(1, KERN_WARNING
, "Error in crypto_auth_run");
807 return kcaop_to_user(&kcaop
, fcr
, arg
);
809 if (unlikely(ret
= kcop_from_user(&kcop
, fcr
, arg
)))
812 return crypto_async_run(pcr
, &kcop
);
814 ret
= crypto_async_fetch(pcr
, &kcop
);
818 return kcop_to_user(&kcop
, fcr
, arg
);
824 /* compatibility code for 32bit userlands */
828 compat_to_session_op(struct compat_session_op
*compat
, struct session_op
*sop
)
830 sop
->cipher
= compat
->cipher
;
831 sop
->mac
= compat
->mac
;
832 sop
->keylen
= compat
->keylen
;
834 sop
->key
= compat_ptr(compat
->key
);
835 sop
->mackeylen
= compat
->mackeylen
;
836 sop
->mackey
= compat_ptr(compat
->mackey
);
837 sop
->ses
= compat
->ses
;
841 session_op_to_compat(struct session_op
*sop
, struct compat_session_op
*compat
)
843 compat
->cipher
= sop
->cipher
;
844 compat
->mac
= sop
->mac
;
845 compat
->keylen
= sop
->keylen
;
847 compat
->key
= ptr_to_compat(sop
->key
);
848 compat
->mackeylen
= sop
->mackeylen
;
849 compat
->mackey
= ptr_to_compat(sop
->mackey
);
850 compat
->ses
= sop
->ses
;
854 compat_to_crypt_op(struct compat_crypt_op
*compat
, struct crypt_op
*cop
)
856 cop
->ses
= compat
->ses
;
857 cop
->op
= compat
->op
;
858 cop
->flags
= compat
->flags
;
859 cop
->len
= compat
->len
;
861 cop
->src
= compat_ptr(compat
->src
);
862 cop
->dst
= compat_ptr(compat
->dst
);
863 cop
->mac
= compat_ptr(compat
->mac
);
864 cop
->iv
= compat_ptr(compat
->iv
);
868 crypt_op_to_compat(struct crypt_op
*cop
, struct compat_crypt_op
*compat
)
870 compat
->ses
= cop
->ses
;
871 compat
->op
= cop
->op
;
872 compat
->flags
= cop
->flags
;
873 compat
->len
= cop
->len
;
875 compat
->src
= ptr_to_compat(cop
->src
);
876 compat
->dst
= ptr_to_compat(cop
->dst
);
877 compat
->mac
= ptr_to_compat(cop
->mac
);
878 compat
->iv
= ptr_to_compat(cop
->iv
);
881 static int compat_kcop_from_user(struct kernel_crypt_op
*kcop
,
882 struct fcrypt
*fcr
, void __user
*arg
)
884 struct compat_crypt_op compat_cop
;
886 if (unlikely(copy_from_user(&compat_cop
, arg
, sizeof(compat_cop
))))
888 compat_to_crypt_op(&compat_cop
, &kcop
->cop
);
890 return fill_kcop_from_cop(kcop
, fcr
);
893 static int compat_kcop_to_user(struct kernel_crypt_op
*kcop
,
894 struct fcrypt
*fcr
, void __user
*arg
)
897 struct compat_crypt_op compat_cop
;
899 ret
= fill_cop_from_kcop(kcop
, fcr
);
901 dprintk(1, KERN_WARNING
, "Error in fill_cop_from_kcop");
904 crypt_op_to_compat(&kcop
->cop
, &compat_cop
);
906 if (unlikely(copy_to_user(arg
, &compat_cop
, sizeof(compat_cop
)))) {
907 dprintk(1, KERN_WARNING
, "Error copying to user");
914 cryptodev_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg_
)
916 void __user
*arg
= (void __user
*)arg_
;
917 struct crypt_priv
*pcr
= file
->private_data
;
919 struct session_op sop
;
920 struct compat_session_op compat_sop
;
921 struct kernel_crypt_op kcop
;
934 return cryptodev_ioctl(file
, cmd
, arg_
);
936 case COMPAT_CIOCGSESSION
:
937 if (unlikely(copy_from_user(&compat_sop
, arg
,
938 sizeof(compat_sop
))))
940 compat_to_session_op(&compat_sop
, &sop
);
942 ret
= crypto_create_session(fcr
, &sop
);
946 session_op_to_compat(&sop
, &compat_sop
);
947 ret
= copy_to_user(arg
, &compat_sop
, sizeof(compat_sop
));
949 crypto_finish_session(fcr
, sop
.ses
);
954 case COMPAT_CIOCCRYPT
:
955 ret
= compat_kcop_from_user(&kcop
, fcr
, arg
);
959 ret
= crypto_run(fcr
, &kcop
);
963 return compat_kcop_to_user(&kcop
, fcr
, arg
);
964 case COMPAT_CIOCASYNCCRYPT
:
965 if (unlikely(ret
= compat_kcop_from_user(&kcop
, fcr
, arg
)))
968 return crypto_async_run(pcr
, &kcop
);
969 case COMPAT_CIOCASYNCFETCH
:
970 ret
= crypto_async_fetch(pcr
, &kcop
);
974 return compat_kcop_to_user(&kcop
, fcr
, arg
);
981 #endif /* CONFIG_COMPAT */
983 static unsigned int cryptodev_poll(struct file
*file
, poll_table
*wait
)
985 struct crypt_priv
*pcr
= file
->private_data
;
988 poll_wait(file
, &pcr
->user_waiter
, wait
);
990 if (!list_empty_careful(&pcr
->done
.list
))
991 ret
|= POLLIN
| POLLRDNORM
;
992 if (!list_empty_careful(&pcr
->free
.list
) || pcr
->itemcount
< MAX_COP_RINGSIZE
)
993 ret
|= POLLOUT
| POLLWRNORM
;
998 static const struct file_operations cryptodev_fops
= {
999 .owner
= THIS_MODULE
,
1000 .open
= cryptodev_open
,
1001 .release
= cryptodev_release
,
1002 .unlocked_ioctl
= cryptodev_ioctl
,
1003 #ifdef CONFIG_COMPAT
1004 .compat_ioctl
= cryptodev_compat_ioctl
,
1005 #endif /* CONFIG_COMPAT */
1006 .poll
= cryptodev_poll
,
1009 static struct miscdevice cryptodev
= {
1010 .minor
= MISC_DYNAMIC_MINOR
,
1012 .fops
= &cryptodev_fops
,
1016 cryptodev_register(void)
1020 rc
= misc_register(&cryptodev
);
1022 printk(KERN_ERR PFX
"registration of /dev/crypto failed\n");
1030 cryptodev_deregister(void)
1032 misc_deregister(&cryptodev
);
1035 /* ====== Module init/exit ====== */
1036 static int __init
init_cryptodev(void)
1040 cryptodev_wq
= create_workqueue("cryptodev_queue");
1041 if (unlikely(!cryptodev_wq
)) {
1042 printk(KERN_ERR PFX
"failed to allocate the cryptodev workqueue\n");
1046 rc
= cryptodev_register();
1048 destroy_workqueue(cryptodev_wq
);
1052 printk(KERN_INFO PFX
"driver %s loaded.\n", VERSION
);
1057 static void __exit
exit_cryptodev(void)
1059 flush_workqueue(cryptodev_wq
);
1060 destroy_workqueue(cryptodev_wq
);
1062 cryptodev_deregister();
1063 printk(KERN_INFO PFX
"driver unloaded.\n");
1066 module_init(init_cryptodev
);
1067 module_exit(exit_cryptodev
);