2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2011, 2012 OpenSSL Software Foundation, Inc.
6 * Author: Nikos Mavrogiannopoulos
8 * This file is part of linux cryptodev.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
23 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
27 * This file handles the AEAD part of /dev/crypto.
31 #include <crypto/hash.h>
32 #include <linux/crypto.h>
34 #include <linux/highmem.h>
35 #include <linux/ioctl.h>
36 #include <linux/random.h>
37 #include <linux/syscalls.h>
38 #include <linux/pagemap.h>
39 #include <linux/poll.h>
40 #include <linux/uaccess.h>
41 #include <crypto/cryptodev.h>
42 #include <crypto/scatterwalk.h>
43 #include <linux/scatterlist.h>
44 #include "cryptodev_int.h"
51 /* make caop->dst available in scatterlist.
52 * (caop->src is assumed to be equal to caop->dst)
54 static int get_userbuf_tls(struct csession
*ses
, struct kernel_crypt_auth_op
*kcaop
,
55 struct scatterlist
**dst_sg
)
58 struct crypt_auth_op
*caop
= &kcaop
->caop
;
61 if (caop
->dst
== NULL
)
65 if (!IS_ALIGNED((unsigned long)caop
->dst
, ses
->alignmask
))
66 dprintk(2, KERN_WARNING
, "%s: careful - source address %lx is not %d byte aligned\n",
67 __func__
, (unsigned long)caop
->dst
, ses
->alignmask
+ 1);
70 if (kcaop
->dst_len
== 0) {
71 dprintk(1, KERN_WARNING
, "Destination length cannot be zero\n");
75 pagecount
= PAGECOUNT(caop
->dst
, kcaop
->dst_len
);
77 ses
->used_pages
= pagecount
;
78 ses
->readonly_pages
= 0;
80 rc
= adjust_sg_array(ses
, pagecount
);
84 rc
= __get_userbuf(caop
->dst
, kcaop
->dst_len
, 1, pagecount
,
85 ses
->pages
, ses
->sg
, kcaop
->task
, kcaop
->mm
);
88 "failed to get user pages for data input\n");
98 #define MAX_SRTP_AUTH_DATA_DIFF 256
100 /* Makes caop->auth_src available as scatterlist.
101 * It also provides a pointer to caop->dst, which however,
102 * is assumed to be within the caop->auth_src buffer. If not
103 * (if their difference exceeds MAX_SRTP_AUTH_DATA_DIFF) it
106 static int get_userbuf_srtp(struct csession
*ses
, struct kernel_crypt_auth_op
*kcaop
,
107 struct scatterlist
**auth_sg
, struct scatterlist
**dst_sg
)
110 int auth_pagecount
= 0;
111 struct crypt_auth_op
*caop
= &kcaop
->caop
;
114 if (caop
->dst
== NULL
&& caop
->auth_src
== NULL
) {
115 dprintk(1, KERN_ERR
, "dst and auth_src cannot be both null\n");
119 if (ses
->alignmask
) {
120 if (!IS_ALIGNED((unsigned long)caop
->dst
, ses
->alignmask
))
121 dprintk(2, KERN_WARNING
, "%s: careful - source address %lx is not %d byte aligned\n",
122 __func__
, (unsigned long)caop
->dst
, ses
->alignmask
+ 1);
123 if (!IS_ALIGNED((unsigned long)caop
->auth_src
, ses
->alignmask
))
124 dprintk(2, KERN_WARNING
, "%s: careful - source address %lx is not %d byte aligned\n",
125 __func__
, (unsigned long)caop
->auth_src
, ses
->alignmask
+ 1);
128 if (unlikely(kcaop
->dst_len
== 0 || caop
->auth_len
== 0)) {
129 dprintk(1, KERN_WARNING
, "Destination length cannot be zero\n");
133 /* Note that in SRTP auth data overlap with data to be encrypted (dst)
136 auth_pagecount
= PAGECOUNT(caop
->auth_src
, caop
->auth_len
);
137 diff
= (int)(caop
->src
- caop
->auth_src
);
138 if (diff
> MAX_SRTP_AUTH_DATA_DIFF
|| diff
< 0) {
139 dprintk(1, KERN_WARNING
, "auth_src must overlap with src (diff: %d).\n", diff
);
143 pagecount
= auth_pagecount
;
145 rc
= adjust_sg_array(ses
, pagecount
*2); /* double pages to have pages for dst(=auth_src) */
147 dprintk(1, KERN_ERR
, "cannot adjust sg array\n");
151 rc
= __get_userbuf(caop
->auth_src
, caop
->auth_len
, 1, auth_pagecount
,
152 ses
->pages
, ses
->sg
, kcaop
->task
, kcaop
->mm
);
155 "failed to get user pages for data input\n");
159 ses
->used_pages
= pagecount
;
160 ses
->readonly_pages
= 0;
162 (*auth_sg
) = ses
->sg
;
164 (*dst_sg
) = ses
->sg
+ auth_pagecount
;
165 sg_init_table(*dst_sg
, auth_pagecount
);
166 sg_copy(ses
->sg
, (*dst_sg
), caop
->auth_len
);
167 (*dst_sg
) = sg_advance(*dst_sg
, diff
);
168 if (*dst_sg
== NULL
) {
169 release_user_pages(ses
);
171 "failed to get enough pages for auth data\n");
178 static int fill_kcaop_from_caop(struct kernel_crypt_auth_op
*kcaop
, struct fcrypt
*fcr
)
180 struct crypt_auth_op
*caop
= &kcaop
->caop
;
181 struct csession
*ses_ptr
;
184 /* this also enters ses_ptr->sem */
185 ses_ptr
= crypto_get_session_by_sid(fcr
, caop
->ses
);
186 if (unlikely(!ses_ptr
)) {
187 dprintk(1, KERN_ERR
, "invalid session ID=0x%08X\n", caop
->ses
);
191 if (caop
->flags
& COP_FLAG_AEAD_TLS_TYPE
|| caop
->flags
& COP_FLAG_AEAD_SRTP_TYPE
) {
192 if (caop
->src
!= caop
->dst
) {
194 "Non-inplace encryption and decryption is not efficient and not implemented\n");
200 if (caop
->tag_len
== 0)
201 caop
->tag_len
= ses_ptr
->hdata
.digestsize
;
203 kcaop
->ivlen
= caop
->iv
? ses_ptr
->cdata
.ivsize
: 0;
205 if (caop
->flags
& COP_FLAG_AEAD_TLS_TYPE
)
206 kcaop
->dst_len
= caop
->len
+ ses_ptr
->cdata
.blocksize
/* pad */ + caop
->tag_len
;
208 kcaop
->dst_len
= caop
->len
;
210 kcaop
->task
= current
;
211 kcaop
->mm
= current
->mm
;
214 ret
= copy_from_user(kcaop
->iv
, caop
->iv
, kcaop
->ivlen
);
217 "error copying IV (%d bytes), copy_from_user returned %d for address %lx\n",
218 kcaop
->ivlen
, ret
, (unsigned long)caop
->iv
);
227 crypto_put_session(ses_ptr
);
232 static int fill_caop_from_kcaop(struct kernel_crypt_auth_op
*kcaop
, struct fcrypt
*fcr
)
236 kcaop
->caop
.len
= kcaop
->dst_len
;
238 if (kcaop
->ivlen
&& kcaop
->caop
.flags
& COP_FLAG_WRITE_IV
) {
239 ret
= copy_to_user(kcaop
->caop
.iv
,
240 kcaop
->iv
, kcaop
->ivlen
);
242 dprintk(1, KERN_ERR
, "Error in copying to userspace\n");
250 int kcaop_from_user(struct kernel_crypt_auth_op
*kcaop
,
251 struct fcrypt
*fcr
, void __user
*arg
)
253 if (unlikely(copy_from_user(&kcaop
->caop
, arg
, sizeof(kcaop
->caop
)))) {
254 dprintk(1, KERN_ERR
, "Error in copying from userspace\n");
258 return fill_kcaop_from_caop(kcaop
, fcr
);
261 int kcaop_to_user(struct kernel_crypt_auth_op
*kcaop
,
262 struct fcrypt
*fcr
, void __user
*arg
)
266 ret
= fill_caop_from_kcaop(kcaop
, fcr
);
268 dprintk(1, KERN_ERR
, "fill_caop_from_kcaop\n");
272 if (unlikely(copy_to_user(arg
, &kcaop
->caop
, sizeof(kcaop
->caop
)))) {
273 dprintk(1, KERN_ERR
, "Error in copying to userspace\n");
279 static void copy_tls_hash( struct scatterlist
*dst_sg
, int len
, void* hash
, int hash_len
)
281 scatterwalk_map_and_copy(hash
, dst_sg
, len
, hash_len
, 1);
284 static void read_tls_hash( struct scatterlist
*dst_sg
, int len
, void* hash
, int hash_len
)
286 scatterwalk_map_and_copy(hash
, dst_sg
, len
-hash_len
, hash_len
, 0);
289 static int pad_record( struct scatterlist
*dst_sg
, int len
, int block_size
)
291 uint8_t pad
[block_size
];
292 int pad_size
= block_size
- (len
% block_size
);
294 memset(pad
, pad_size
-1, pad_size
);
296 scatterwalk_map_and_copy(pad
, dst_sg
, len
, pad_size
, 1);
301 static int verify_tls_record_pad( struct scatterlist
*dst_sg
, int len
, int block_size
)
303 uint8_t pad
[256]; /* the maximum allowed */
307 scatterwalk_map_and_copy(&pad_size
, dst_sg
, len
-1, 1, 0);
309 if (pad_size
+1 > len
) {
310 dprintk(1, KERN_ERR
, "Pad size: %d\n", pad_size
);
314 scatterwalk_map_and_copy(pad
, dst_sg
, len
-pad_size
-1, pad_size
+1, 0);
316 for (i
=0;i
<pad_size
;i
++)
317 if (pad
[i
] != pad_size
) {
318 dprintk(1, KERN_ERR
, "Pad size: %d, pad: %d\n", pad_size
, (int)pad
[i
]);
325 /* Authenticate and encrypt the TLS way (also perform padding).
326 * During decryption it verifies the pad and tag and returns -EBADMSG on error.
329 tls_auth_n_crypt(struct csession
*ses_ptr
, struct kernel_crypt_auth_op
*kcaop
,
330 struct scatterlist
*auth_sg
, uint32_t auth_len
,
331 struct scatterlist
*dst_sg
, uint32_t len
)
334 struct crypt_auth_op
*caop
= &kcaop
->caop
;
335 uint8_t vhash
[AALG_MAX_RESULT_LEN
];
336 uint8_t hash_output
[AALG_MAX_RESULT_LEN
];
338 /* TLS authenticates the plaintext except for the padding.
340 if (caop
->op
== COP_ENCRYPT
) {
341 if (ses_ptr
->hdata
.init
!= 0) {
343 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
346 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
352 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
355 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
360 ret
= cryptodev_hash_final(&ses_ptr
->hdata
, hash_output
);
362 dprintk(0, KERN_ERR
, "cryptodev_hash_final: %d\n", ret
);
366 copy_tls_hash( dst_sg
, len
, hash_output
, caop
->tag_len
);
367 len
+= caop
->tag_len
;
370 if (ses_ptr
->cdata
.init
!= 0) {
371 if (ses_ptr
->cdata
.blocksize
> 1) {
372 ret
= pad_record(dst_sg
, len
, ses_ptr
->cdata
.blocksize
);
376 ret
= cryptodev_cipher_encrypt(&ses_ptr
->cdata
,
377 dst_sg
, dst_sg
, len
);
379 dprintk(0, KERN_ERR
, "cryptodev_cipher_encrypt: %d\n", ret
);
384 if (ses_ptr
->cdata
.init
!= 0) {
385 ret
= cryptodev_cipher_decrypt(&ses_ptr
->cdata
,
386 dst_sg
, dst_sg
, len
);
389 dprintk(0, KERN_ERR
, "cryptodev_cipher_decrypt: %d\n", ret
);
393 if (ses_ptr
->cdata
.blocksize
> 1) {
394 ret
= verify_tls_record_pad(dst_sg
, len
, ses_ptr
->cdata
.blocksize
);
395 if (unlikely(ret
< 0)) {
396 dprintk(2, KERN_ERR
, "verify_record_pad: %d\n", ret
);
404 if (ses_ptr
->hdata
.init
!= 0) {
405 if (unlikely(caop
->tag_len
> sizeof(vhash
) || caop
->tag_len
> len
)) {
406 dprintk(1, KERN_ERR
, "Illegal tag len size\n");
410 read_tls_hash( dst_sg
, len
, vhash
, caop
->tag_len
);
411 len
-= caop
->tag_len
;
414 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
417 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
423 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
426 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
431 ret
= cryptodev_hash_final(&ses_ptr
->hdata
, hash_output
);
433 dprintk(0, KERN_ERR
, "cryptodev_hash_final: %d\n", ret
);
437 if (memcmp(vhash
, hash_output
, caop
->tag_len
) != 0 || fail
!= 0) {
438 dprintk(2, KERN_ERR
, "MAC verification failed (tag_len: %d)\n", caop
->tag_len
);
443 kcaop
->dst_len
= len
;
447 /* Authenticate and encrypt the SRTP way. During decryption
448 * it verifies the tag and returns -EBADMSG on error.
451 srtp_auth_n_crypt(struct csession
*ses_ptr
, struct kernel_crypt_auth_op
*kcaop
,
452 struct scatterlist
*auth_sg
, uint32_t auth_len
,
453 struct scatterlist
*dst_sg
, uint32_t len
)
456 struct crypt_auth_op
*caop
= &kcaop
->caop
;
457 uint8_t vhash
[AALG_MAX_RESULT_LEN
];
458 uint8_t hash_output
[AALG_MAX_RESULT_LEN
];
460 /* SRTP authenticates the encrypted data.
462 if (caop
->op
== COP_ENCRYPT
) {
463 if (ses_ptr
->cdata
.init
!= 0) {
464 ret
= cryptodev_cipher_encrypt(&ses_ptr
->cdata
,
465 dst_sg
, dst_sg
, len
);
467 dprintk(0, KERN_ERR
, "cryptodev_cipher_encrypt: %d\n", ret
);
472 if (ses_ptr
->hdata
.init
!= 0) {
474 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
477 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
482 ret
= cryptodev_hash_final(&ses_ptr
->hdata
, hash_output
);
484 dprintk(0, KERN_ERR
, "cryptodev_hash_final: %d\n", ret
);
488 if (unlikely(copy_to_user(caop
->tag
, hash_output
, caop
->tag_len
))) {
494 if (ses_ptr
->hdata
.init
!= 0) {
495 if (unlikely(caop
->tag_len
> sizeof(vhash
) || caop
->tag_len
> len
)) {
496 dprintk(1, KERN_ERR
, "Illegal tag len size\n");
500 if (unlikely(copy_from_user(vhash
, caop
->tag
, caop
->tag_len
))) {
504 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
507 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
511 ret
= cryptodev_hash_final(&ses_ptr
->hdata
, hash_output
);
513 dprintk(0, KERN_ERR
, "cryptodev_hash_final: %d\n", ret
);
517 if (memcmp(vhash
, hash_output
, caop
->tag_len
) != 0 || fail
!= 0) {
518 dprintk(2, KERN_ERR
, "MAC verification failed\n");
523 if (ses_ptr
->cdata
.init
!= 0) {
524 ret
= cryptodev_cipher_decrypt(&ses_ptr
->cdata
,
525 dst_sg
, dst_sg
, len
);
528 dprintk(0, KERN_ERR
, "cryptodev_cipher_decrypt: %d\n", ret
);
534 kcaop
->dst_len
= len
;
538 /* Typical AEAD (i.e. GCM) encryption/decryption.
539 * During decryption the tag is verified.
542 auth_n_crypt(struct csession
*ses_ptr
, struct kernel_crypt_auth_op
*kcaop
,
543 struct scatterlist
*auth_sg
, uint32_t auth_len
,
544 struct scatterlist
*src_sg
,
545 struct scatterlist
*dst_sg
, uint32_t len
)
548 struct crypt_auth_op
*caop
= &kcaop
->caop
;
551 max_tag_len
= cryptodev_cipher_get_tag_size(&ses_ptr
->cdata
);
552 if (unlikely(caop
->tag_len
> max_tag_len
)) {
553 dprintk(0, KERN_ERR
, "Illegal tag length: %d\n", caop
->tag_len
);
558 cryptodev_cipher_set_tag_size(&ses_ptr
->cdata
, caop
->tag_len
);
560 caop
->tag_len
= max_tag_len
;
562 if (caop
->op
== COP_ENCRYPT
) {
564 cryptodev_cipher_auth(&ses_ptr
->cdata
, auth_sg
, auth_len
);
565 else /* for some reason we _have_ to call that */
566 cryptodev_cipher_auth(&ses_ptr
->cdata
, NULL
, 0);
568 ret
= cryptodev_cipher_encrypt(&ses_ptr
->cdata
,
569 src_sg
, dst_sg
, len
);
571 dprintk(0, KERN_ERR
, "cryptodev_cipher_encrypt: %d\n", ret
);
574 kcaop
->dst_len
= len
+ caop
->tag_len
;
575 caop
->tag
= caop
->dst
+ len
;
578 cryptodev_cipher_auth(&ses_ptr
->cdata
, auth_sg
, auth_len
);
580 ret
= cryptodev_cipher_decrypt(&ses_ptr
->cdata
,
581 src_sg
, dst_sg
, len
);
584 dprintk(0, KERN_ERR
, "cryptodev_cipher_decrypt: %d\n", ret
);
587 kcaop
->dst_len
= len
- caop
->tag_len
;
588 caop
->tag
= caop
->dst
+ len
- caop
->tag_len
;
594 /* This is the main crypto function - zero-copy edition */
596 __crypto_auth_run_zc(struct csession
*ses_ptr
, struct kernel_crypt_auth_op
*kcaop
)
598 struct scatterlist
*dst_sg
, *auth_sg
, *src_sg
;
599 struct crypt_auth_op
*caop
= &kcaop
->caop
;
602 if (caop
->flags
& COP_FLAG_AEAD_SRTP_TYPE
) {
603 if (unlikely(ses_ptr
->cdata
.init
!= 0 &&
604 (ses_ptr
->cdata
.stream
== 0 || ses_ptr
->cdata
.aead
!= 0)))
606 dprintk(0, KERN_ERR
, "Only stream modes are allowed in SRTP mode (but not AEAD)\n");
610 ret
= get_userbuf_srtp(ses_ptr
, kcaop
, &auth_sg
, &dst_sg
);
612 dprintk(1, KERN_ERR
, "get_userbuf_srtp(): Error getting user pages.\n");
616 ret
= srtp_auth_n_crypt(ses_ptr
, kcaop
, auth_sg
, caop
->auth_len
,
618 } else { /* TLS and normal cases. Here auth data are usually small
619 * so we just copy them to a free page, instead of trying
622 unsigned char* auth_buf
= NULL
;
623 struct scatterlist tmp
;
625 if (unlikely(caop
->auth_len
> PAGE_SIZE
)) {
626 dprintk(1, KERN_ERR
, "auth data len is excessive.\n");
630 auth_buf
= (char *)__get_free_page(GFP_KERNEL
);
631 if (unlikely(!auth_buf
)) {
632 dprintk(1, KERN_ERR
, "unable to get a free page.\n");
636 if (caop
->auth_src
&& caop
->auth_len
> 0) {
637 if (unlikely(copy_from_user(auth_buf
, caop
->auth_src
, caop
->auth_len
))) {
638 dprintk(1, KERN_ERR
, "unable to copy auth data from userspace.\n");
643 sg_init_one(&tmp
, auth_buf
, caop
->auth_len
);
649 if (caop
->flags
& COP_FLAG_AEAD_TLS_TYPE
&& ses_ptr
->cdata
.aead
== 0) {
650 ret
= get_userbuf_tls(ses_ptr
, kcaop
, &dst_sg
);
652 dprintk(1, KERN_ERR
, "get_userbuf_tls(): Error getting user pages.\n");
656 ret
= tls_auth_n_crypt(ses_ptr
, kcaop
, auth_sg
, caop
->auth_len
,
661 if (unlikely(ses_ptr
->cdata
.init
== 0 ||
662 ses_ptr
->cdata
.stream
== 0 ||
663 ses_ptr
->cdata
.aead
== 0))
665 dprintk(0, KERN_ERR
, "Only stream and AEAD ciphers are allowed for authenc\n");
670 if (caop
->op
== COP_ENCRYPT
) dst_len
= caop
->len
+ cryptodev_cipher_get_tag_size(&ses_ptr
->cdata
);
671 else dst_len
= caop
->len
- cryptodev_cipher_get_tag_size(&ses_ptr
->cdata
);
673 ret
= get_userbuf(ses_ptr
, caop
->src
, caop
->len
, caop
->dst
, dst_len
,
674 kcaop
->task
, kcaop
->mm
, &src_sg
, &dst_sg
);
676 dprintk(1, KERN_ERR
, "get_userbuf(): Error getting user pages.\n");
680 ret
= auth_n_crypt(ses_ptr
, kcaop
, auth_sg
, caop
->auth_len
,
681 src_sg
, dst_sg
, caop
->len
);
685 free_page((unsigned long)auth_buf
);
688 release_user_pages(ses_ptr
);
693 int crypto_auth_run(struct fcrypt
*fcr
, struct kernel_crypt_auth_op
*kcaop
)
695 struct csession
*ses_ptr
;
696 struct crypt_auth_op
*caop
= &kcaop
->caop
;
699 if (unlikely(caop
->op
!= COP_ENCRYPT
&& caop
->op
!= COP_DECRYPT
)) {
700 dprintk(1, KERN_DEBUG
, "invalid operation op=%u\n", caop
->op
);
704 /* this also enters ses_ptr->sem */
705 ses_ptr
= crypto_get_session_by_sid(fcr
, caop
->ses
);
706 if (unlikely(!ses_ptr
)) {
707 dprintk(1, KERN_ERR
, "invalid session ID=0x%08X\n", caop
->ses
);
711 if (unlikely(ses_ptr
->cdata
.init
== 0)) {
712 dprintk(1, KERN_ERR
, "cipher context not initialized\n");
717 /* If we have a hash/mac handle reset its state */
718 if (ses_ptr
->hdata
.init
!= 0) {
719 ret
= cryptodev_hash_reset(&ses_ptr
->hdata
);
722 "error in cryptodev_hash_reset()\n");
727 cryptodev_cipher_set_iv(&ses_ptr
->cdata
, kcaop
->iv
,
728 min(ses_ptr
->cdata
.ivsize
, kcaop
->ivlen
));
730 if (likely(caop
->len
|| caop
->auth_len
)) {
731 ret
= __crypto_auth_run_zc(ses_ptr
, kcaop
);
734 "error in __crypto_auth_run_zc()\n");
739 "Do not have auth data nor other data.\n");
746 cryptodev_cipher_get_iv(&ses_ptr
->cdata
, kcaop
->iv
,
747 min(ses_ptr
->cdata
.ivsize
, kcaop
->ivlen
));
750 crypto_put_session(ses_ptr
);