2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2011, 2012 OpenSSL Software Foundation, Inc.
6 * Author: Nikos Mavrogiannopoulos
8 * This file is part of linux cryptodev.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
23 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
27 * This file handles the AEAD part of /dev/crypto.
31 #include <crypto/hash.h>
32 #include <linux/crypto.h>
34 #include <linux/highmem.h>
35 #include <linux/ioctl.h>
36 #include <linux/random.h>
37 #include <linux/syscalls.h>
38 #include <linux/pagemap.h>
39 #include <linux/poll.h>
40 #include <linux/uaccess.h>
41 #include <crypto/cryptodev.h>
42 #include <crypto/scatterwalk.h>
43 #include <linux/scatterlist.h>
44 #include "cryptodev_int.h"
51 /* make caop->dst available in scatterlist.
52 * (caop->src is assumed to be equal to caop->dst)
54 static int get_userbuf_tls(struct csession
*ses
, struct kernel_crypt_auth_op
*kcaop
,
55 struct scatterlist
**dst_sg
,
59 struct crypt_auth_op
*caop
= &kcaop
->caop
;
62 if (caop
->dst
== NULL
)
66 if (!IS_ALIGNED((unsigned long)caop
->dst
, ses
->alignmask
))
67 dprintk(2, KERN_WARNING
, "%s: careful - source address %lx is not %d byte aligned\n",
68 __func__
, (unsigned long)caop
->dst
, ses
->alignmask
+ 1);
71 if (kcaop
->dst_len
== 0) {
72 dprintk(1, KERN_WARNING
, "Destination length cannot be zero\n");
76 pagecount
= PAGECOUNT(caop
->dst
, kcaop
->dst_len
);
78 (*tot_pages
) = pagecount
;
80 rc
= adjust_sg_array(ses
, pagecount
);
84 rc
= __get_userbuf(caop
->dst
, kcaop
->dst_len
, 1, pagecount
,
85 ses
->pages
, ses
->sg
, kcaop
->task
, kcaop
->mm
);
88 "failed to get user pages for data input\n");
98 #define MAX_SRTP_AUTH_DATA_DIFF 256
100 /* Makes caop->auth_src available as scatterlist.
101 * It also provides a pointer to caop->dst, which however,
102 * is assumed to be within the caop->auth_src buffer. If not
103 * (if their difference exceeds MAX_SRTP_AUTH_DATA_DIFF) it
106 static int get_userbuf_srtp(struct csession
*ses
, struct kernel_crypt_auth_op
*kcaop
,
107 struct scatterlist
**auth_sg
, struct scatterlist
**dst_sg
,
111 int auth_pagecount
= 0;
112 struct crypt_auth_op
*caop
= &kcaop
->caop
;
115 if (caop
->dst
== NULL
&& caop
->auth_src
== NULL
) {
116 dprintk(1, KERN_ERR
, "dst and auth_src cannot be both null\n");
120 if (ses
->alignmask
) {
121 if (!IS_ALIGNED((unsigned long)caop
->dst
, ses
->alignmask
))
122 dprintk(2, KERN_WARNING
, "%s: careful - source address %lx is not %d byte aligned\n",
123 __func__
, (unsigned long)caop
->dst
, ses
->alignmask
+ 1);
124 if (!IS_ALIGNED((unsigned long)caop
->auth_src
, ses
->alignmask
))
125 dprintk(2, KERN_WARNING
, "%s: careful - source address %lx is not %d byte aligned\n",
126 __func__
, (unsigned long)caop
->auth_src
, ses
->alignmask
+ 1);
129 if (unlikely(kcaop
->dst_len
== 0 || caop
->auth_len
== 0)) {
130 dprintk(1, KERN_WARNING
, "Destination length cannot be zero\n");
134 /* Note that in SRTP auth data overlap with data to be encrypted (dst)
137 auth_pagecount
= PAGECOUNT(caop
->auth_src
, caop
->auth_len
);
138 diff
= (int)(caop
->src
- caop
->auth_src
);
139 if (diff
> MAX_SRTP_AUTH_DATA_DIFF
|| diff
< 0) {
140 dprintk(1, KERN_WARNING
, "auth_src must overlap with src (diff: %d).\n", diff
);
144 (*tot_pages
) = pagecount
= auth_pagecount
;
146 rc
= adjust_sg_array(ses
, pagecount
*2); /* double pages to have pages for dst(=auth_src) */
148 dprintk(1, KERN_ERR
, "cannot adjust sg array\n");
152 rc
= __get_userbuf(caop
->auth_src
, caop
->auth_len
, 1, auth_pagecount
,
153 ses
->pages
, ses
->sg
, kcaop
->task
, kcaop
->mm
);
156 "failed to get user pages for data input\n");
159 (*auth_sg
) = ses
->sg
;
161 (*dst_sg
) = ses
->sg
+ auth_pagecount
;
162 sg_init_table(*dst_sg
, auth_pagecount
);
163 sg_copy(ses
->sg
, (*dst_sg
), caop
->auth_len
);
164 (*dst_sg
) = sg_advance(*dst_sg
, diff
);
165 if (*dst_sg
== NULL
) {
166 release_user_pages(ses
->pages
, pagecount
);
168 "failed to get enough pages for auth data\n");
175 static int fill_kcaop_from_caop(struct kernel_crypt_auth_op
*kcaop
, struct fcrypt
*fcr
)
177 struct crypt_auth_op
*caop
= &kcaop
->caop
;
178 struct csession
*ses_ptr
;
181 /* this also enters ses_ptr->sem */
182 ses_ptr
= crypto_get_session_by_sid(fcr
, caop
->ses
);
183 if (unlikely(!ses_ptr
)) {
184 dprintk(1, KERN_ERR
, "invalid session ID=0x%08X\n", caop
->ses
);
188 if (caop
->flags
& COP_FLAG_AEAD_TLS_TYPE
|| caop
->flags
& COP_FLAG_AEAD_SRTP_TYPE
) {
189 if (caop
->src
!= caop
->dst
) {
191 "Non-inplace encryption and decryption is not efficient and not implemented\n");
197 if (caop
->tag_len
== 0)
198 caop
->tag_len
= ses_ptr
->hdata
.digestsize
;
200 kcaop
->ivlen
= caop
->iv
? ses_ptr
->cdata
.ivsize
: 0;
202 if (caop
->flags
& COP_FLAG_AEAD_TLS_TYPE
)
203 kcaop
->dst_len
= caop
->len
+ ses_ptr
->cdata
.blocksize
/* pad */ + caop
->tag_len
;
205 kcaop
->dst_len
= caop
->len
;
207 kcaop
->task
= current
;
208 kcaop
->mm
= current
->mm
;
211 ret
= copy_from_user(kcaop
->iv
, caop
->iv
, kcaop
->ivlen
);
214 "error copying IV (%d bytes), copy_from_user returned %d for address %lx\n",
215 kcaop
->ivlen
, ret
, (unsigned long)caop
->iv
);
224 crypto_put_session(ses_ptr
);
229 static int fill_caop_from_kcaop(struct kernel_crypt_auth_op
*kcaop
, struct fcrypt
*fcr
)
233 kcaop
->caop
.len
= kcaop
->dst_len
;
235 if (kcaop
->ivlen
&& kcaop
->caop
.flags
& COP_FLAG_WRITE_IV
) {
236 ret
= copy_to_user(kcaop
->caop
.iv
,
237 kcaop
->iv
, kcaop
->ivlen
);
239 dprintk(1, KERN_ERR
, "Error in copying to userspace\n");
247 int kcaop_from_user(struct kernel_crypt_auth_op
*kcaop
,
248 struct fcrypt
*fcr
, void __user
*arg
)
250 if (unlikely(copy_from_user(&kcaop
->caop
, arg
, sizeof(kcaop
->caop
)))) {
251 dprintk(1, KERN_ERR
, "Error in copying from userspace\n");
255 return fill_kcaop_from_caop(kcaop
, fcr
);
258 int kcaop_to_user(struct kernel_crypt_auth_op
*kcaop
,
259 struct fcrypt
*fcr
, void __user
*arg
)
263 ret
= fill_caop_from_kcaop(kcaop
, fcr
);
265 dprintk(1, KERN_ERR
, "fill_caop_from_kcaop\n");
269 if (unlikely(copy_to_user(arg
, &kcaop
->caop
, sizeof(kcaop
->caop
)))) {
270 dprintk(1, KERN_ERR
, "Error in copying to userspace\n");
276 static void copy_tls_hash( struct scatterlist
*dst_sg
, int len
, void* hash
, int hash_len
)
278 scatterwalk_map_and_copy(hash
, dst_sg
, len
, hash_len
, 1);
281 static void read_tls_hash( struct scatterlist
*dst_sg
, int len
, void* hash
, int hash_len
)
283 scatterwalk_map_and_copy(hash
, dst_sg
, len
-hash_len
, hash_len
, 0);
286 static int pad_record( struct scatterlist
*dst_sg
, int len
, int block_size
)
288 uint8_t pad
[block_size
];
289 int pad_size
= block_size
- (len
% block_size
);
291 memset(pad
, pad_size
-1, pad_size
);
293 scatterwalk_map_and_copy(pad
, dst_sg
, len
, pad_size
, 1);
298 static int verify_tls_record_pad( struct scatterlist
*dst_sg
, int len
, int block_size
)
300 uint8_t pad
[256]; /* the maximum allowed */
304 scatterwalk_map_and_copy(&pad_size
, dst_sg
, len
-1, 1, 0);
306 if (pad_size
+1 > len
) {
307 dprintk(1, KERN_ERR
, "Pad size: %d\n", pad_size
);
311 scatterwalk_map_and_copy(pad
, dst_sg
, len
-pad_size
-1, pad_size
+1, 0);
313 for (i
=0;i
<pad_size
;i
++)
314 if (pad
[i
] != pad_size
) {
315 dprintk(1, KERN_ERR
, "Pad size: %d, pad: %d\n", pad_size
, (int)pad
[i
]);
322 /* Authenticate and encrypt the TLS way (also perform padding).
323 * During decryption it verifies the pad and tag and returns -ECANCELED on error.
326 tls_auth_n_crypt(struct csession
*ses_ptr
, struct kernel_crypt_auth_op
*kcaop
,
327 struct scatterlist
*auth_sg
, uint32_t auth_len
,
328 struct scatterlist
*dst_sg
, uint32_t len
)
331 struct crypt_auth_op
*caop
= &kcaop
->caop
;
332 uint8_t vhash
[AALG_MAX_RESULT_LEN
];
333 uint8_t hash_output
[AALG_MAX_RESULT_LEN
];
335 /* TLS authenticates the plaintext except for the padding.
337 if (caop
->op
== COP_ENCRYPT
) {
338 if (ses_ptr
->hdata
.init
!= 0) {
340 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
343 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
349 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
352 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
357 ret
= cryptodev_hash_final(&ses_ptr
->hdata
, hash_output
);
359 dprintk(0, KERN_ERR
, "cryptodev_hash_final: %d\n", ret
);
363 copy_tls_hash( dst_sg
, len
, hash_output
, caop
->tag_len
);
364 len
+= caop
->tag_len
;
367 if (ses_ptr
->cdata
.init
!= 0) {
368 if (ses_ptr
->cdata
.blocksize
> 1) {
369 ret
= pad_record(dst_sg
, len
, ses_ptr
->cdata
.blocksize
);
373 ret
= cryptodev_cipher_encrypt(&ses_ptr
->cdata
,
374 dst_sg
, dst_sg
, len
);
376 dprintk(0, KERN_ERR
, "cryptodev_cipher_encrypt: %d\n", ret
);
381 if (ses_ptr
->cdata
.init
!= 0) {
382 ret
= cryptodev_cipher_decrypt(&ses_ptr
->cdata
,
383 dst_sg
, dst_sg
, len
);
386 dprintk(0, KERN_ERR
, "cryptodev_cipher_decrypt: %d\n", ret
);
390 if (ses_ptr
->cdata
.blocksize
> 1) {
391 ret
= verify_tls_record_pad(dst_sg
, len
, ses_ptr
->cdata
.blocksize
);
392 if (unlikely(ret
< 0)) {
393 dprintk(2, KERN_ERR
, "verify_record_pad: %d\n", ret
);
401 if (ses_ptr
->hdata
.init
!= 0) {
402 if (unlikely(caop
->tag_len
> sizeof(vhash
) || caop
->tag_len
> len
)) {
403 dprintk(1, KERN_ERR
, "Illegal tag len size\n");
407 read_tls_hash( dst_sg
, len
, vhash
, caop
->tag_len
);
408 len
-= caop
->tag_len
;
411 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
414 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
420 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
423 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
428 ret
= cryptodev_hash_final(&ses_ptr
->hdata
, hash_output
);
430 dprintk(0, KERN_ERR
, "cryptodev_hash_final: %d\n", ret
);
434 if (memcmp(vhash
, hash_output
, caop
->tag_len
) != 0 || fail
!= 0) {
435 dprintk(2, KERN_ERR
, "MAC verification failed (tag_len: %d)\n", caop
->tag_len
);
440 kcaop
->dst_len
= len
;
444 /* Authenticate and encrypt the SRTP way. During decryption
445 * it verifies the tag and returns -ECANCELED on error.
448 srtp_auth_n_crypt(struct csession
*ses_ptr
, struct kernel_crypt_auth_op
*kcaop
,
449 struct scatterlist
*auth_sg
, uint32_t auth_len
,
450 struct scatterlist
*dst_sg
, uint32_t len
)
453 struct crypt_auth_op
*caop
= &kcaop
->caop
;
454 uint8_t vhash
[AALG_MAX_RESULT_LEN
];
455 uint8_t hash_output
[AALG_MAX_RESULT_LEN
];
457 /* SRTP authenticates the encrypted data.
459 if (caop
->op
== COP_ENCRYPT
) {
460 if (ses_ptr
->cdata
.init
!= 0) {
461 ret
= cryptodev_cipher_encrypt(&ses_ptr
->cdata
,
462 dst_sg
, dst_sg
, len
);
464 dprintk(0, KERN_ERR
, "cryptodev_cipher_encrypt: %d\n", ret
);
469 if (ses_ptr
->hdata
.init
!= 0) {
471 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
474 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
479 ret
= cryptodev_hash_final(&ses_ptr
->hdata
, hash_output
);
481 dprintk(0, KERN_ERR
, "cryptodev_hash_final: %d\n", ret
);
485 if (unlikely(copy_to_user(caop
->tag
, hash_output
, caop
->tag_len
))) {
491 if (ses_ptr
->hdata
.init
!= 0) {
492 if (unlikely(caop
->tag_len
> sizeof(vhash
) || caop
->tag_len
> len
)) {
493 dprintk(1, KERN_ERR
, "Illegal tag len size\n");
497 if (unlikely(copy_from_user(vhash
, caop
->tag
, caop
->tag_len
))) {
501 ret
= cryptodev_hash_update(&ses_ptr
->hdata
,
504 dprintk(0, KERN_ERR
, "cryptodev_hash_update: %d\n", ret
);
508 ret
= cryptodev_hash_final(&ses_ptr
->hdata
, hash_output
);
510 dprintk(0, KERN_ERR
, "cryptodev_hash_final: %d\n", ret
);
514 if (memcmp(vhash
, hash_output
, caop
->tag_len
) != 0 || fail
!= 0) {
515 dprintk(2, KERN_ERR
, "MAC verification failed\n");
520 if (ses_ptr
->cdata
.init
!= 0) {
521 ret
= cryptodev_cipher_decrypt(&ses_ptr
->cdata
,
522 dst_sg
, dst_sg
, len
);
525 dprintk(0, KERN_ERR
, "cryptodev_cipher_decrypt: %d\n", ret
);
531 kcaop
->dst_len
= len
;
535 /* Typical AEAD (i.e. GCM) encryption/decryption.
536 * During decryption the tag is verified.
539 auth_n_crypt(struct csession
*ses_ptr
, struct kernel_crypt_auth_op
*kcaop
,
540 struct scatterlist
*auth_sg
, uint32_t auth_len
,
541 struct scatterlist
*src_sg
,
542 struct scatterlist
*dst_sg
, uint32_t len
)
545 struct crypt_auth_op
*caop
= &kcaop
->caop
;
548 max_tag_len
= cryptodev_cipher_get_tag_size(&ses_ptr
->cdata
);
549 if (unlikely(caop
->tag_len
> max_tag_len
)) {
550 dprintk(0, KERN_ERR
, "Illegal tag length: %d\n", caop
->tag_len
);
555 cryptodev_cipher_set_tag_size(&ses_ptr
->cdata
, caop
->tag_len
);
557 caop
->tag_len
= max_tag_len
;
559 if (caop
->op
== COP_ENCRYPT
) {
561 cryptodev_cipher_auth(&ses_ptr
->cdata
, auth_sg
, auth_len
);
562 else /* for some reason we _have_ to call that */
563 cryptodev_cipher_auth(&ses_ptr
->cdata
, NULL
, 0);
565 ret
= cryptodev_cipher_encrypt(&ses_ptr
->cdata
,
566 src_sg
, dst_sg
, len
);
568 dprintk(0, KERN_ERR
, "cryptodev_cipher_encrypt: %d\n", ret
);
571 kcaop
->dst_len
= len
+ caop
->tag_len
;
572 caop
->tag
= caop
->dst
+ len
;
575 cryptodev_cipher_auth(&ses_ptr
->cdata
, auth_sg
, auth_len
);
577 ret
= cryptodev_cipher_decrypt(&ses_ptr
->cdata
,
578 src_sg
, dst_sg
, len
);
581 dprintk(0, KERN_ERR
, "cryptodev_cipher_decrypt: %d\n", ret
);
584 kcaop
->dst_len
= len
- caop
->tag_len
;
585 caop
->tag
= caop
->dst
+ len
- caop
->tag_len
;
591 /* This is the main crypto function - zero-copy edition */
593 __crypto_auth_run_zc(struct csession
*ses_ptr
, struct kernel_crypt_auth_op
*kcaop
)
595 struct scatterlist
*dst_sg
, *auth_sg
, *src_sg
;
596 struct crypt_auth_op
*caop
= &kcaop
->caop
;
597 int ret
= 0, pagecount
= 0;
599 if (caop
->flags
& COP_FLAG_AEAD_SRTP_TYPE
) {
600 if (unlikely(ses_ptr
->cdata
.init
!= 0 &&
601 (ses_ptr
->cdata
.stream
== 0 || ses_ptr
->cdata
.aead
!= 0)))
603 dprintk(0, KERN_ERR
, "Only stream modes are allowed in SRTP mode (but not AEAD)\n");
607 ret
= get_userbuf_srtp(ses_ptr
, kcaop
, &auth_sg
, &dst_sg
, &pagecount
);
609 dprintk(1, KERN_ERR
, "get_userbuf_srtp(): Error getting user pages.\n");
613 ret
= srtp_auth_n_crypt(ses_ptr
, kcaop
, auth_sg
, caop
->auth_len
,
615 } else { /* TLS and normal cases. Here auth data are usually small
616 * so we just copy them to a free page, instead of trying
619 unsigned char* auth_buf
= NULL
;
620 struct scatterlist tmp
;
622 if (unlikely(caop
->auth_len
> PAGE_SIZE
))
625 auth_buf
= (char *)__get_free_page(GFP_KERNEL
);
626 if (unlikely(!auth_buf
))
629 if (caop
->auth_len
> 0) {
630 if (unlikely(copy_from_user(auth_buf
, caop
->auth_src
, caop
->auth_len
))) {
635 sg_init_one(&tmp
, auth_buf
, caop
->auth_len
);
641 if (caop
->flags
& COP_FLAG_AEAD_TLS_TYPE
&& ses_ptr
->cdata
.aead
== 0) {
642 ret
= get_userbuf_tls(ses_ptr
, kcaop
, &dst_sg
, &pagecount
);
644 dprintk(1, KERN_ERR
, "get_userbuf_tls(): Error getting user pages.\n");
648 ret
= tls_auth_n_crypt(ses_ptr
, kcaop
, auth_sg
, caop
->auth_len
,
653 if (unlikely(ses_ptr
->cdata
.init
== 0 ||
654 ses_ptr
->cdata
.stream
== 0 ||
655 ses_ptr
->cdata
.aead
== 0))
657 dprintk(0, KERN_ERR
, "Only stream and AEAD ciphers are allowed for authenc\n");
661 if (caop
->op
== COP_ENCRYPT
) dst_len
= caop
->len
+ cryptodev_cipher_get_tag_size(&ses_ptr
->cdata
);
662 else dst_len
= caop
->len
- cryptodev_cipher_get_tag_size(&ses_ptr
->cdata
);
664 ret
= get_userbuf(ses_ptr
, caop
->src
, caop
->len
, caop
->dst
, dst_len
,
665 kcaop
->task
, kcaop
->mm
, &src_sg
, &dst_sg
, &pagecount
);
667 dprintk(1, KERN_ERR
, "get_userbuf(): Error getting user pages.\n");
671 ret
= auth_n_crypt(ses_ptr
, kcaop
, auth_sg
, caop
->auth_len
,
672 src_sg
, dst_sg
, caop
->len
);
676 free_page((unsigned long)auth_buf
);
679 release_user_pages(ses_ptr
->pages
, pagecount
);
684 int crypto_auth_run(struct fcrypt
*fcr
, struct kernel_crypt_auth_op
*kcaop
)
686 struct csession
*ses_ptr
;
687 struct crypt_auth_op
*caop
= &kcaop
->caop
;
690 if (unlikely(caop
->op
!= COP_ENCRYPT
&& caop
->op
!= COP_DECRYPT
)) {
691 dprintk(1, KERN_DEBUG
, "invalid operation op=%u\n", caop
->op
);
695 /* this also enters ses_ptr->sem */
696 ses_ptr
= crypto_get_session_by_sid(fcr
, caop
->ses
);
697 if (unlikely(!ses_ptr
)) {
698 dprintk(1, KERN_ERR
, "invalid session ID=0x%08X\n", caop
->ses
);
702 if (unlikely(ses_ptr
->cdata
.init
== 0)) {
703 dprintk(1, KERN_ERR
, "cipher context not initialized\n");
708 /* If we have a hash/mac handle reset its state */
709 if (ses_ptr
->hdata
.init
!= 0) {
710 ret
= cryptodev_hash_reset(&ses_ptr
->hdata
);
713 "error in cryptodev_hash_reset()\n");
718 cryptodev_cipher_set_iv(&ses_ptr
->cdata
, kcaop
->iv
,
719 min(ses_ptr
->cdata
.ivsize
, kcaop
->ivlen
));
721 if (likely(caop
->len
|| caop
->auth_len
)) {
722 ret
= __crypto_auth_run_zc(ses_ptr
, kcaop
);
732 cryptodev_cipher_get_iv(&ses_ptr
->cdata
, kcaop
->iv
,
733 min(ses_ptr
->cdata
.ivsize
, kcaop
->ivlen
));
736 crypto_put_session(ses_ptr
);