2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2011, 2012 OpenSSL Software Foundation, Inc.
6 * Author: Nikos Mavrogiannopoulos
8 * This file is part of linux cryptodev.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
23 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
27 * This file handles the AEAD part of /dev/crypto.
31 #include <crypto/hash.h>
32 #include <linux/crypto.h>
34 #include <linux/highmem.h>
35 #include <linux/ioctl.h>
36 #include <linux/random.h>
37 #include <linux/syscalls.h>
38 #include <linux/pagemap.h>
39 #include <linux/poll.h>
40 #include <linux/uaccess.h>
41 #include <crypto/cryptodev.h>
42 #include <crypto/scatterwalk.h>
43 #include <linux/scatterlist.h>
44 #include "cryptodev_int.h"
51 /* make caop->dst available in scatterlist.
52 * (caop->src is assumed to be equal to caop->dst)
54 static int get_userbuf_tls(struct csession
*ses
, struct kernel_crypt_auth_op
*kcaop
,
55 struct scatterlist
**dst_sg
)
58 struct crypt_auth_op
*caop
= &kcaop
->caop
;
61 if (caop
->dst
== NULL
)
65 if (!IS_ALIGNED((unsigned long)caop
->dst
, ses
->alignmask
))
66 dprintk(2, KERN_WARNING
, "careful - source address %lx is not %d byte aligned\n",
67 (unsigned long)caop
->dst
, ses
->alignmask
+ 1);
70 if (kcaop
->dst_len
== 0) {
71 dprintk(1, KERN_WARNING
, "Destination length cannot be zero\n");
75 pagecount
= PAGECOUNT(caop
->dst
, kcaop
->dst_len
);
77 ses
->used_pages
= pagecount
;
78 ses
->readonly_pages
= 0;
80 rc
= adjust_sg_array(ses
, pagecount
);
84 rc
= __get_userbuf(caop
->dst
, kcaop
->dst_len
, 1, pagecount
,
85 ses
->pages
, ses
->sg
, kcaop
->task
, kcaop
->mm
);
88 "failed to get user pages for data input\n");
98 #define MAX_SRTP_AUTH_DATA_DIFF 256
100 /* Makes caop->auth_src available as scatterlist.
101 * It also provides a pointer to caop->dst, which however,
102 * is assumed to be within the caop->auth_src buffer. If not
103 * (if their difference exceeds MAX_SRTP_AUTH_DATA_DIFF) it
106 static int get_userbuf_srtp(struct csession
*ses
, struct kernel_crypt_auth_op
*kcaop
,
107 struct scatterlist
**auth_sg
, struct scatterlist
**dst_sg
)
110 int auth_pagecount
= 0;
111 struct crypt_auth_op
*caop
= &kcaop
->caop
;
114 if (caop
->dst
== NULL
&& caop
->auth_src
== NULL
) {
115 dprintk(1, KERN_ERR
, "dst and auth_src cannot be both null\n");
119 if (ses
->alignmask
) {
120 if (!IS_ALIGNED((unsigned long)caop
->dst
, ses
->alignmask
))
121 dprintk(2, KERN_WARNING
, "careful - source address %lx is not %d byte aligned\n",
122 (unsigned long)caop
->dst
, ses
->alignmask
+ 1);
123 if (!IS_ALIGNED((unsigned long)caop
->auth_src
, ses
->alignmask
))
124 dprintk(2, KERN_WARNING
, "careful - source address %lx is not %d byte aligned\n",
125 (unsigned long)caop
->auth_src
, ses
->alignmask
+ 1);
128 if (unlikely(kcaop
->dst_len
== 0 || caop
->auth_len
== 0)) {
129 dprintk(1, KERN_WARNING
, "Destination length cannot be zero\n");
133 /* Note that in SRTP auth data overlap with data to be encrypted (dst)
136 auth_pagecount
= PAGECOUNT(caop
->auth_src
, caop
->auth_len
);
137 diff
= (int)(caop
->src
- caop
->auth_src
);
138 if (diff
> MAX_SRTP_AUTH_DATA_DIFF
|| diff
< 0) {
139 dprintk(1, KERN_WARNING
, "auth_src must overlap with src (diff: %d).\n", diff
);
143 pagecount
= auth_pagecount
;
145 rc
= adjust_sg_array(ses
, pagecount
*2); /* double pages to have pages for dst(=auth_src) */
147 dprintk(1, KERN_ERR
, "cannot adjust sg array\n");
151 rc
= __get_userbuf(caop
->auth_src
, caop
->auth_len
, 1, auth_pagecount
,
152 ses
->pages
, ses
->sg
, kcaop
->task
, kcaop
->mm
);
155 "failed to get user pages for data input\n");
159 ses
->used_pages
= pagecount
;
160 ses
->readonly_pages
= 0;
162 (*auth_sg
) = ses
->sg
;
164 (*dst_sg
) = ses
->sg
+ auth_pagecount
;
165 sg_init_table(*dst_sg
, auth_pagecount
);
166 sg_copy(ses
->sg
, (*dst_sg
), caop
->auth_len
);
167 (*dst_sg
) = sg_advance(*dst_sg
, diff
);
168 if (*dst_sg
== NULL
) {
169 release_user_pages(ses
);
171 "failed to get enough pages for auth data\n");
178 static int fill_kcaop_from_caop(struct kernel_crypt_auth_op
*kcaop
, struct fcrypt
*fcr
)
180 struct crypt_auth_op
*caop
= &kcaop
->caop
;
181 struct csession
*ses_ptr
;
184 /* this also enters ses_ptr->sem */
185 ses_ptr
= crypto_get_session_by_sid(fcr
, caop
->ses
);
186 if (unlikely(!ses_ptr
)) {
187 dprintk(1, KERN_ERR
, "invalid session ID=0x%08X\n", caop
->ses
);
191 if (caop
->flags
& COP_FLAG_AEAD_TLS_TYPE
|| caop
->flags
& COP_FLAG_AEAD_SRTP_TYPE
) {
192 if (caop
->src
!= caop
->dst
) {
194 "Non-inplace encryption and decryption is not efficient and not implemented\n");
200 if (caop
->tag_len
== 0)
201 caop
->tag_len
= ses_ptr
->hdata
.digestsize
;
203 kcaop
->ivlen
= caop
->iv
? ses_ptr
->cdata
.ivsize
: 0;
205 if (caop
->flags
& COP_FLAG_AEAD_TLS_TYPE
)
206 kcaop
->dst_len
= caop
->len
+ ses_ptr
->cdata
.blocksize
/* pad */ + caop
->tag_len
;
208 kcaop
->dst_len
= caop
->len
;
210 kcaop
->task
= current
;
211 kcaop
->mm
= current
->mm
;
214 ret
= copy_from_user(kcaop
->iv
, caop
->iv
, kcaop
->ivlen
);
217 "error copying IV (%d bytes), copy_from_user returned %d for address %lx\n",
218 kcaop
->ivlen
, ret
, (unsigned long)caop
->iv
);
227 crypto_put_session(ses_ptr
);
232 static int fill_caop_from_kcaop(struct kernel_crypt_auth_op
*kcaop
, struct fcrypt
*fcr
)
236 kcaop
->caop
.len
= kcaop
->dst_len
;
238 if (kcaop
->ivlen
&& kcaop
->caop
.flags
& COP_FLAG_WRITE_IV
) {
239 ret
= copy_to_user(kcaop
->caop
.iv
,
240 kcaop
->iv
, kcaop
->ivlen
);
242 dprintk(1, KERN_ERR
, "Error in copying to userspace\n");
250 int kcaop_from_user(struct kernel_crypt_auth_op
*kcaop
,
251 struct fcrypt
*fcr
, void __user
*arg
)
253 if (unlikely(copy_from_user(&kcaop
->caop
, arg
, sizeof(kcaop
->caop
)))) {
254 dprintk(1, KERN_ERR
, "Error in copying from userspace\n");
258 return fill_kcaop_from_caop(kcaop
, fcr
);
261 int kcaop_to_user(struct kernel_crypt_auth_op
*kcaop
,
262 struct fcrypt
*fcr
, void __user
*arg
)
266 ret
= fill_caop_from_kcaop(kcaop
, fcr
);
268 dprintk(1, KERN_ERR
, "fill_caop_from_kcaop\n");
272 if (unlikely(copy_to_user(arg
, &kcaop
->caop
, sizeof(kcaop
->caop
)))) {
273 dprintk(1, KERN_ERR
, "Error in copying to userspace\n");
279 static void copy_tls_hash( struct scatterlist
*dst_sg
, int len
, void* hash
, int hash_len
)
281 scatterwalk_map_and_copy(hash
, dst_sg
, len
, hash_len
, 1);
284 static void read_tls_hash( struct scatterlist
*dst_sg
, int len
, void* hash
, int hash_len
)
286 scatterwalk_map_and_copy(hash
, dst_sg
, len
-hash_len
, hash_len
, 0);
289 static int pad_record( struct scatterlist
*dst_sg
, int len
, int block_size
)
291 uint8_t pad
[block_size
];
292 int pad_size
= block_size
- (len
% block_size
);
294 memset(pad
, pad_size
-1, pad_size
);
296 scatterwalk_map_and_copy(pad
, dst_sg
, len
, pad_size
, 1);
301 static int verify_tls_record_pad( struct scatterlist
*dst_sg
, int len
, int block_size
)
303 uint8_t pad
[256]; /* the maximum allowed */
307 scatterwalk_map_and_copy(&pad_size
, dst_sg
, len
-1, 1, 0);
309 if (pad_size
+1 > len
) {
310 dprintk(1, KERN_ERR
, "Pad size: %d\n", pad_size
);
314 scatterwalk_map_and_copy(pad
, dst_sg
, len
-pad_size
-1, pad_size
+1, 0);
316 for (i
=0;i
<pad_size
;i
++)
317 if (pad
[i
] != pad_size
) {
318 dprintk(1, KERN_ERR
, "Pad size: %d, pad: %d\n", pad_size
, (int)pad
[i
]);
325 /* Authenticate and encrypt the TLS way (also perform padding).
326 * During decryption it verifies the pad and tag and returns -EBADMSG on error.
329 tls_auth_n_crypt(struct csession
*ses_ptr
, struct kernel_crypt_auth_op
*kcaop
,
330 struct scatterlist
*auth_sg
, uint32_t auth_len
,
331 struct scatterlist
*dst_sg
, uint32_t len
)
334 struct crypt_auth_op
*caop
= &kcaop
->caop
;
335 uint8_t vhash
[AALG_MAX_RESULT_LEN
];
336 uint8_t hash_output
[AALG_MAX_RESULT_LEN
];
338 /* TLS authenticates the plaintext except for the padding.
340 if (caop
->op
== COP_ENCRYPT
) {
341 if (ses_ptr
->hdata
.init
!= 0) {
343 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
346 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
352 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
355 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
360 ret
= cryptodev_hash_final(&ses_ptr
->hdata
, hash_output
);
362 dprintk(0, KERN_ERR
, "cryptodev_hash_final: %d\n", ret
);
366 copy_tls_hash( dst_sg
, len
, hash_output
, caop
->tag_len
);
367 len
+= caop
->tag_len
;
370 if (ses_ptr
->cdata
.init
!= 0) {
371 if (ses_ptr
->cdata
.blocksize
> 1) {
372 ret
= pad_record(dst_sg
, len
, ses_ptr
->cdata
.blocksize
);
376 ret
= cryptodev_cipher_encrypt(&ses_ptr
->cdata
,
377 dst_sg
, dst_sg
, len
);
379 dprintk(0, KERN_ERR
, "cryptodev_cipher_encrypt: %d\n", ret
);
384 if (ses_ptr
->cdata
.init
!= 0) {
385 ret
= cryptodev_cipher_decrypt(&ses_ptr
->cdata
,
386 dst_sg
, dst_sg
, len
);
389 dprintk(0, KERN_ERR
, "cryptodev_cipher_decrypt: %d\n", ret
);
393 if (ses_ptr
->cdata
.blocksize
> 1) {
394 ret
= verify_tls_record_pad(dst_sg
, len
, ses_ptr
->cdata
.blocksize
);
395 if (unlikely(ret
< 0)) {
396 dprintk(2, KERN_ERR
, "verify_record_pad: %d\n", ret
);
404 if (ses_ptr
->hdata
.init
!= 0) {
405 if (unlikely(caop
->tag_len
> sizeof(vhash
) || caop
->tag_len
> len
)) {
406 dprintk(1, KERN_ERR
, "Illegal tag len size\n");
410 read_tls_hash( dst_sg
, len
, vhash
, caop
->tag_len
);
411 len
-= caop
->tag_len
;
414 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
417 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
423 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
426 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
431 ret
= cryptodev_hash_final(&ses_ptr
->hdata
, hash_output
);
433 dprintk(0, KERN_ERR
, "cryptodev_hash_final: %d\n", ret
);
437 if (memcmp(vhash
, hash_output
, caop
->tag_len
) != 0 || fail
!= 0) {
438 dprintk(2, KERN_ERR
, "MAC verification failed (tag_len: %d)\n", caop
->tag_len
);
443 kcaop
->dst_len
= len
;
447 /* Authenticate and encrypt the SRTP way. During decryption
448 * it verifies the tag and returns -EBADMSG on error.
451 srtp_auth_n_crypt(struct csession
*ses_ptr
, struct kernel_crypt_auth_op
*kcaop
,
452 struct scatterlist
*auth_sg
, uint32_t auth_len
,
453 struct scatterlist
*dst_sg
, uint32_t len
)
456 struct crypt_auth_op
*caop
= &kcaop
->caop
;
457 uint8_t vhash
[AALG_MAX_RESULT_LEN
];
458 uint8_t hash_output
[AALG_MAX_RESULT_LEN
];
460 /* SRTP authenticates the encrypted data.
462 if (caop
->op
== COP_ENCRYPT
) {
463 if (ses_ptr
->cdata
.init
!= 0) {
464 ret
= cryptodev_cipher_encrypt(&ses_ptr
->cdata
,
465 dst_sg
, dst_sg
, len
);
467 dprintk(0, KERN_ERR
, "cryptodev_cipher_encrypt: %d\n", ret
);
472 if (ses_ptr
->hdata
.init
!= 0) {
474 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
477 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
482 ret
= cryptodev_hash_final(&ses_ptr
->hdata
, hash_output
);
484 dprintk(0, KERN_ERR
, "cryptodev_hash_final: %d\n", ret
);
488 if (unlikely(copy_to_user(caop
->tag
, hash_output
, caop
->tag_len
))) {
494 if (ses_ptr
->hdata
.init
!= 0) {
495 if (unlikely(caop
->tag_len
> sizeof(vhash
) || caop
->tag_len
> len
)) {
496 dprintk(1, KERN_ERR
, "Illegal tag len size\n");
500 if (unlikely(copy_from_user(vhash
, caop
->tag
, caop
->tag_len
))) {
504 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
507 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
511 ret
= cryptodev_hash_final(&ses_ptr
->hdata
, hash_output
);
513 dprintk(0, KERN_ERR
, "cryptodev_hash_final: %d\n", ret
);
517 if (memcmp(vhash
, hash_output
, caop
->tag_len
) != 0 || fail
!= 0) {
518 dprintk(2, KERN_ERR
, "MAC verification failed\n");
523 if (ses_ptr
->cdata
.init
!= 0) {
524 ret
= cryptodev_cipher_decrypt(&ses_ptr
->cdata
,
525 dst_sg
, dst_sg
, len
);
528 dprintk(0, KERN_ERR
, "cryptodev_cipher_decrypt: %d\n", ret
);
534 kcaop
->dst_len
= len
;
538 /* Typical AEAD (i.e. GCM) encryption/decryption.
539 * During decryption the tag is verified.
542 auth_n_crypt(struct csession
*ses_ptr
, struct kernel_crypt_auth_op
*kcaop
,
543 struct scatterlist
*auth_sg
, uint32_t auth_len
,
544 struct scatterlist
*src_sg
,
545 struct scatterlist
*dst_sg
, uint32_t len
)
548 struct crypt_auth_op
*caop
= &kcaop
->caop
;
551 max_tag_len
= cryptodev_cipher_get_tag_size(&ses_ptr
->cdata
);
552 if (unlikely(caop
->tag_len
> max_tag_len
)) {
553 dprintk(0, KERN_ERR
, "Illegal tag length: %d\n", caop
->tag_len
);
558 cryptodev_cipher_set_tag_size(&ses_ptr
->cdata
, caop
->tag_len
);
560 caop
->tag_len
= max_tag_len
;
562 cryptodev_cipher_auth(&ses_ptr
->cdata
, auth_sg
, auth_len
);
564 if (caop
->op
== COP_ENCRYPT
) {
565 ret
= cryptodev_cipher_encrypt(&ses_ptr
->cdata
,
566 src_sg
, dst_sg
, len
);
568 dprintk(0, KERN_ERR
, "cryptodev_cipher_encrypt: %d\n", ret
);
571 kcaop
->dst_len
= len
+ caop
->tag_len
;
572 caop
->tag
= caop
->dst
+ len
;
574 ret
= cryptodev_cipher_decrypt(&ses_ptr
->cdata
,
575 src_sg
, dst_sg
, len
);
578 dprintk(0, KERN_ERR
, "cryptodev_cipher_decrypt: %d\n", ret
);
581 kcaop
->dst_len
= len
- caop
->tag_len
;
582 caop
->tag
= caop
->dst
+ len
- caop
->tag_len
;
588 /* This is the main crypto function - zero-copy edition */
590 __crypto_auth_run_zc(struct csession
*ses_ptr
, struct kernel_crypt_auth_op
*kcaop
)
592 struct scatterlist
*dst_sg
, *auth_sg
, *src_sg
;
593 struct crypt_auth_op
*caop
= &kcaop
->caop
;
596 if (caop
->flags
& COP_FLAG_AEAD_SRTP_TYPE
) {
597 if (unlikely(ses_ptr
->cdata
.init
!= 0 &&
598 (ses_ptr
->cdata
.stream
== 0 || ses_ptr
->cdata
.aead
!= 0)))
600 dprintk(0, KERN_ERR
, "Only stream modes are allowed in SRTP mode (but not AEAD)\n");
604 ret
= get_userbuf_srtp(ses_ptr
, kcaop
, &auth_sg
, &dst_sg
);
606 dprintk(1, KERN_ERR
, "get_userbuf_srtp(): Error getting user pages.\n");
610 ret
= srtp_auth_n_crypt(ses_ptr
, kcaop
, auth_sg
, caop
->auth_len
,
613 release_user_pages(ses_ptr
);
614 } else { /* TLS and normal cases. Here auth data are usually small
615 * so we just copy them to a free page, instead of trying
618 unsigned char* auth_buf
= NULL
;
619 struct scatterlist tmp
;
621 if (unlikely(caop
->auth_len
> PAGE_SIZE
)) {
622 dprintk(1, KERN_ERR
, "auth data len is excessive.\n");
626 auth_buf
= (char *)__get_free_page(GFP_KERNEL
);
627 if (unlikely(!auth_buf
)) {
628 dprintk(1, KERN_ERR
, "unable to get a free page.\n");
632 if (caop
->auth_src
&& caop
->auth_len
> 0) {
633 if (unlikely(copy_from_user(auth_buf
, caop
->auth_src
, caop
->auth_len
))) {
634 dprintk(1, KERN_ERR
, "unable to copy auth data from userspace.\n");
639 sg_init_one(&tmp
, auth_buf
, caop
->auth_len
);
645 if (caop
->flags
& COP_FLAG_AEAD_TLS_TYPE
&& ses_ptr
->cdata
.aead
== 0) {
646 ret
= get_userbuf_tls(ses_ptr
, kcaop
, &dst_sg
);
648 dprintk(1, KERN_ERR
, "get_userbuf_tls(): Error getting user pages.\n");
652 ret
= tls_auth_n_crypt(ses_ptr
, kcaop
, auth_sg
, caop
->auth_len
,
657 if (unlikely(ses_ptr
->cdata
.init
== 0 ||
658 ses_ptr
->cdata
.stream
== 0 ||
659 ses_ptr
->cdata
.aead
== 0))
661 dprintk(0, KERN_ERR
, "Only stream and AEAD ciphers are allowed for authenc\n");
666 if (caop
->op
== COP_ENCRYPT
) dst_len
= caop
->len
+ cryptodev_cipher_get_tag_size(&ses_ptr
->cdata
);
667 else dst_len
= caop
->len
;
669 ret
= get_userbuf(ses_ptr
, caop
->src
, caop
->len
, caop
->dst
, dst_len
,
670 kcaop
->task
, kcaop
->mm
, &src_sg
, &dst_sg
);
672 dprintk(1, KERN_ERR
, "get_userbuf(): Error getting user pages.\n");
676 ret
= auth_n_crypt(ses_ptr
, kcaop
, auth_sg
, caop
->auth_len
,
677 src_sg
, dst_sg
, caop
->len
);
680 release_user_pages(ses_ptr
);
683 free_page((unsigned long)auth_buf
);
690 int crypto_auth_run(struct fcrypt
*fcr
, struct kernel_crypt_auth_op
*kcaop
)
692 struct csession
*ses_ptr
;
693 struct crypt_auth_op
*caop
= &kcaop
->caop
;
696 if (unlikely(caop
->op
!= COP_ENCRYPT
&& caop
->op
!= COP_DECRYPT
)) {
697 dprintk(1, KERN_DEBUG
, "invalid operation op=%u\n", caop
->op
);
701 /* this also enters ses_ptr->sem */
702 ses_ptr
= crypto_get_session_by_sid(fcr
, caop
->ses
);
703 if (unlikely(!ses_ptr
)) {
704 dprintk(1, KERN_ERR
, "invalid session ID=0x%08X\n", caop
->ses
);
708 if (unlikely(ses_ptr
->cdata
.init
== 0)) {
709 dprintk(1, KERN_ERR
, "cipher context not initialized\n");
714 /* If we have a hash/mac handle reset its state */
715 if (ses_ptr
->hdata
.init
!= 0) {
716 ret
= cryptodev_hash_reset(&ses_ptr
->hdata
);
719 "error in cryptodev_hash_reset()\n");
724 cryptodev_cipher_set_iv(&ses_ptr
->cdata
, kcaop
->iv
,
725 min(ses_ptr
->cdata
.ivsize
, kcaop
->ivlen
));
727 ret
= __crypto_auth_run_zc(ses_ptr
, kcaop
);
730 "error in __crypto_auth_run_zc()\n");
736 cryptodev_cipher_get_iv(&ses_ptr
->cdata
, kcaop
->iv
,
737 min(ses_ptr
->cdata
.ivsize
, kcaop
->ivlen
));
740 crypto_put_session(ses_ptr
);