1 /* Algorithms supported by virtio crypto device
3 * Authors: Gonglei <arei.gonglei@huawei.com>
5 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include <linux/scatterlist.h>
22 #include <crypto/algapi.h>
23 #include <linux/err.h>
24 #include <crypto/scatterwalk.h>
25 #include <linux/atomic.h>
27 #include <uapi/linux/virtio_crypto.h>
28 #include "virtio_crypto_common.h"
31 struct virtio_crypto_ablkcipher_ctx
{
32 struct virtio_crypto
*vcrypto
;
33 struct crypto_tfm
*tfm
;
35 struct virtio_crypto_sym_session_info enc_sess_info
;
36 struct virtio_crypto_sym_session_info dec_sess_info
;
39 struct virtio_crypto_sym_request
{
40 struct virtio_crypto_request base
;
44 struct virtio_crypto_ablkcipher_ctx
*ablkcipher_ctx
;
45 struct ablkcipher_request
*ablkcipher_req
;
52 * The algs_lock protects the below global virtio_crypto_active_devs
53 * and crypto algorithms registion.
55 static DEFINE_MUTEX(algs_lock
);
56 static unsigned int virtio_crypto_active_devs
;
57 static void virtio_crypto_ablkcipher_finalize_req(
58 struct virtio_crypto_sym_request
*vc_sym_req
,
59 struct ablkcipher_request
*req
,
62 static void virtio_crypto_dataq_sym_callback
63 (struct virtio_crypto_request
*vc_req
, int len
)
65 struct virtio_crypto_sym_request
*vc_sym_req
=
66 container_of(vc_req
, struct virtio_crypto_sym_request
, base
);
67 struct ablkcipher_request
*ablk_req
;
70 /* Finish the encrypt or decrypt process */
71 if (vc_sym_req
->type
== VIRTIO_CRYPTO_SYM_OP_CIPHER
) {
72 switch (vc_req
->status
) {
73 case VIRTIO_CRYPTO_OK
:
76 case VIRTIO_CRYPTO_INVSESS
:
77 case VIRTIO_CRYPTO_ERR
:
80 case VIRTIO_CRYPTO_BADMSG
:
87 ablk_req
= vc_sym_req
->ablkcipher_req
;
88 virtio_crypto_ablkcipher_finalize_req(vc_sym_req
,
93 static u64
virtio_crypto_alg_sg_nents_length(struct scatterlist
*sg
)
97 for (total
= 0; sg
; sg
= sg_next(sg
))
104 virtio_crypto_alg_validate_key(int key_len
, uint32_t *alg
)
107 case AES_KEYSIZE_128
:
108 case AES_KEYSIZE_192
:
109 case AES_KEYSIZE_256
:
110 *alg
= VIRTIO_CRYPTO_CIPHER_AES_CBC
;
113 pr_err("virtio_crypto: Unsupported key length: %d\n",
120 static int virtio_crypto_alg_ablkcipher_init_session(
121 struct virtio_crypto_ablkcipher_ctx
*ctx
,
122 uint32_t alg
, const uint8_t *key
,
126 struct scatterlist outhdr
, key_sg
, inhdr
, *sgs
[3];
128 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
129 int op
= encrypt
? VIRTIO_CRYPTO_OP_ENCRYPT
: VIRTIO_CRYPTO_OP_DECRYPT
;
131 unsigned int num_out
= 0, num_in
= 0;
134 * Avoid to do DMA from the stack, switch to using
135 * dynamically-allocated for the key
137 uint8_t *cipher_key
= kmalloc(keylen
, GFP_ATOMIC
);
142 memcpy(cipher_key
, key
, keylen
);
144 spin_lock(&vcrypto
->ctrl_lock
);
145 /* Pad ctrl header */
146 vcrypto
->ctrl
.header
.opcode
=
147 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION
);
148 vcrypto
->ctrl
.header
.algo
= cpu_to_le32(alg
);
149 /* Set the default dataqueue id to 0 */
150 vcrypto
->ctrl
.header
.queue_id
= 0;
152 vcrypto
->input
.status
= cpu_to_le32(VIRTIO_CRYPTO_ERR
);
153 /* Pad cipher's parameters */
154 vcrypto
->ctrl
.u
.sym_create_session
.op_type
=
155 cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER
);
156 vcrypto
->ctrl
.u
.sym_create_session
.u
.cipher
.para
.algo
=
157 vcrypto
->ctrl
.header
.algo
;
158 vcrypto
->ctrl
.u
.sym_create_session
.u
.cipher
.para
.keylen
=
160 vcrypto
->ctrl
.u
.sym_create_session
.u
.cipher
.para
.op
=
163 sg_init_one(&outhdr
, &vcrypto
->ctrl
, sizeof(vcrypto
->ctrl
));
164 sgs
[num_out
++] = &outhdr
;
167 sg_init_one(&key_sg
, cipher_key
, keylen
);
168 sgs
[num_out
++] = &key_sg
;
170 /* Return status and session id back */
171 sg_init_one(&inhdr
, &vcrypto
->input
, sizeof(vcrypto
->input
));
172 sgs
[num_out
+ num_in
++] = &inhdr
;
174 err
= virtqueue_add_sgs(vcrypto
->ctrl_vq
, sgs
, num_out
,
175 num_in
, vcrypto
, GFP_ATOMIC
);
177 spin_unlock(&vcrypto
->ctrl_lock
);
181 virtqueue_kick(vcrypto
->ctrl_vq
);
184 * Trapping into the hypervisor, so the request should be
185 * handled immediately.
187 while (!virtqueue_get_buf(vcrypto
->ctrl_vq
, &tmp
) &&
188 !virtqueue_is_broken(vcrypto
->ctrl_vq
))
191 if (le32_to_cpu(vcrypto
->input
.status
) != VIRTIO_CRYPTO_OK
) {
192 spin_unlock(&vcrypto
->ctrl_lock
);
193 pr_err("virtio_crypto: Create session failed status: %u\n",
194 le32_to_cpu(vcrypto
->input
.status
));
200 ctx
->enc_sess_info
.session_id
=
201 le64_to_cpu(vcrypto
->input
.session_id
);
203 ctx
->dec_sess_info
.session_id
=
204 le64_to_cpu(vcrypto
->input
.session_id
);
206 spin_unlock(&vcrypto
->ctrl_lock
);
212 static int virtio_crypto_alg_ablkcipher_close_session(
213 struct virtio_crypto_ablkcipher_ctx
*ctx
,
216 struct scatterlist outhdr
, status_sg
, *sgs
[2];
218 struct virtio_crypto_destroy_session_req
*destroy_session
;
219 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
221 unsigned int num_out
= 0, num_in
= 0;
223 spin_lock(&vcrypto
->ctrl_lock
);
224 vcrypto
->ctrl_status
.status
= VIRTIO_CRYPTO_ERR
;
225 /* Pad ctrl header */
226 vcrypto
->ctrl
.header
.opcode
=
227 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION
);
228 /* Set the default virtqueue id to 0 */
229 vcrypto
->ctrl
.header
.queue_id
= 0;
231 destroy_session
= &vcrypto
->ctrl
.u
.destroy_session
;
234 destroy_session
->session_id
=
235 cpu_to_le64(ctx
->enc_sess_info
.session_id
);
237 destroy_session
->session_id
=
238 cpu_to_le64(ctx
->dec_sess_info
.session_id
);
240 sg_init_one(&outhdr
, &vcrypto
->ctrl
, sizeof(vcrypto
->ctrl
));
241 sgs
[num_out
++] = &outhdr
;
243 /* Return status and session id back */
244 sg_init_one(&status_sg
, &vcrypto
->ctrl_status
.status
,
245 sizeof(vcrypto
->ctrl_status
.status
));
246 sgs
[num_out
+ num_in
++] = &status_sg
;
248 err
= virtqueue_add_sgs(vcrypto
->ctrl_vq
, sgs
, num_out
,
249 num_in
, vcrypto
, GFP_ATOMIC
);
251 spin_unlock(&vcrypto
->ctrl_lock
);
254 virtqueue_kick(vcrypto
->ctrl_vq
);
256 while (!virtqueue_get_buf(vcrypto
->ctrl_vq
, &tmp
) &&
257 !virtqueue_is_broken(vcrypto
->ctrl_vq
))
260 if (vcrypto
->ctrl_status
.status
!= VIRTIO_CRYPTO_OK
) {
261 spin_unlock(&vcrypto
->ctrl_lock
);
262 pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
263 vcrypto
->ctrl_status
.status
,
264 destroy_session
->session_id
);
268 spin_unlock(&vcrypto
->ctrl_lock
);
273 static int virtio_crypto_alg_ablkcipher_init_sessions(
274 struct virtio_crypto_ablkcipher_ctx
*ctx
,
275 const uint8_t *key
, unsigned int keylen
)
279 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
281 if (keylen
> vcrypto
->max_cipher_key_len
) {
282 pr_err("virtio_crypto: the key is too long\n");
286 if (virtio_crypto_alg_validate_key(keylen
, &alg
))
289 /* Create encryption session */
290 ret
= virtio_crypto_alg_ablkcipher_init_session(ctx
,
291 alg
, key
, keylen
, 1);
294 /* Create decryption session */
295 ret
= virtio_crypto_alg_ablkcipher_init_session(ctx
,
296 alg
, key
, keylen
, 0);
298 virtio_crypto_alg_ablkcipher_close_session(ctx
, 1);
304 crypto_tfm_set_flags(ctx
->tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
308 /* Note: kernel crypto API realization */
309 static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher
*tfm
,
313 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
318 int node
= virtio_crypto_get_current_node();
319 struct virtio_crypto
*vcrypto
=
320 virtcrypto_get_dev_node(node
);
322 pr_err("virtio_crypto: Could not find a virtio device in the system\n");
326 ctx
->vcrypto
= vcrypto
;
328 /* Rekeying, we should close the created sessions previously */
329 virtio_crypto_alg_ablkcipher_close_session(ctx
, 1);
330 virtio_crypto_alg_ablkcipher_close_session(ctx
, 0);
333 ret
= virtio_crypto_alg_ablkcipher_init_sessions(ctx
, key
, keylen
);
335 virtcrypto_dev_put(ctx
->vcrypto
);
345 __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request
*vc_sym_req
,
346 struct ablkcipher_request
*req
,
347 struct data_queue
*data_vq
)
349 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
350 struct virtio_crypto_ablkcipher_ctx
*ctx
= vc_sym_req
->ablkcipher_ctx
;
351 struct virtio_crypto_request
*vc_req
= &vc_sym_req
->base
;
352 unsigned int ivsize
= crypto_ablkcipher_ivsize(tfm
);
353 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
354 struct virtio_crypto_op_data_req
*req_data
;
355 int src_nents
, dst_nents
;
358 struct scatterlist outhdr
, iv_sg
, status_sg
, **sgs
;
361 unsigned int num_out
= 0, num_in
= 0;
365 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
366 dst_nents
= sg_nents(req
->dst
);
368 pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
369 src_nents
, dst_nents
);
371 /* Why 3? outhdr + iv + inhdr */
372 sg_total
= src_nents
+ dst_nents
+ 3;
373 sgs
= kzalloc_node(sg_total
* sizeof(*sgs
), GFP_ATOMIC
,
374 dev_to_node(&vcrypto
->vdev
->dev
));
378 req_data
= kzalloc_node(sizeof(*req_data
), GFP_ATOMIC
,
379 dev_to_node(&vcrypto
->vdev
->dev
));
385 vc_req
->req_data
= req_data
;
386 vc_sym_req
->type
= VIRTIO_CRYPTO_SYM_OP_CIPHER
;
387 /* Head of operation */
388 if (vc_sym_req
->encrypt
) {
389 req_data
->header
.session_id
=
390 cpu_to_le64(ctx
->enc_sess_info
.session_id
);
391 req_data
->header
.opcode
=
392 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT
);
394 req_data
->header
.session_id
=
395 cpu_to_le64(ctx
->dec_sess_info
.session_id
);
396 req_data
->header
.opcode
=
397 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT
);
399 req_data
->u
.sym_req
.op_type
= cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER
);
400 req_data
->u
.sym_req
.u
.cipher
.para
.iv_len
= cpu_to_le32(ivsize
);
401 req_data
->u
.sym_req
.u
.cipher
.para
.src_data_len
=
402 cpu_to_le32(req
->nbytes
);
404 dst_len
= virtio_crypto_alg_sg_nents_length(req
->dst
);
405 if (unlikely(dst_len
> U32_MAX
)) {
406 pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
411 pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
412 req
->nbytes
, dst_len
);
414 if (unlikely(req
->nbytes
+ dst_len
+ ivsize
+
415 sizeof(vc_req
->status
) > vcrypto
->max_size
)) {
416 pr_err("virtio_crypto: The length is too big\n");
421 req_data
->u
.sym_req
.u
.cipher
.para
.dst_data_len
=
422 cpu_to_le32((uint32_t)dst_len
);
425 sg_init_one(&outhdr
, req_data
, sizeof(*req_data
));
426 sgs
[num_out
++] = &outhdr
;
431 * Avoid to do DMA from the stack, switch to using
432 * dynamically-allocated for the IV
434 iv
= kzalloc_node(ivsize
, GFP_ATOMIC
,
435 dev_to_node(&vcrypto
->vdev
->dev
));
440 memcpy(iv
, req
->info
, ivsize
);
441 sg_init_one(&iv_sg
, iv
, ivsize
);
442 sgs
[num_out
++] = &iv_sg
;
446 for (i
= 0; i
< src_nents
; i
++)
447 sgs
[num_out
++] = &req
->src
[i
];
449 /* Destination data */
450 for (i
= 0; i
< dst_nents
; i
++)
451 sgs
[num_out
+ num_in
++] = &req
->dst
[i
];
454 sg_init_one(&status_sg
, &vc_req
->status
, sizeof(vc_req
->status
));
455 sgs
[num_out
+ num_in
++] = &status_sg
;
459 spin_lock_irqsave(&data_vq
->lock
, flags
);
460 err
= virtqueue_add_sgs(data_vq
->vq
, sgs
, num_out
,
461 num_in
, vc_req
, GFP_ATOMIC
);
462 virtqueue_kick(data_vq
->vq
);
463 spin_unlock_irqrestore(&data_vq
->lock
, flags
);
464 if (unlikely(err
< 0))
477 static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request
*req
)
479 struct crypto_ablkcipher
*atfm
= crypto_ablkcipher_reqtfm(req
);
480 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_ablkcipher_ctx(atfm
);
481 struct virtio_crypto_sym_request
*vc_sym_req
=
482 ablkcipher_request_ctx(req
);
483 struct virtio_crypto_request
*vc_req
= &vc_sym_req
->base
;
484 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
485 /* Use the first data virtqueue as default */
486 struct data_queue
*data_vq
= &vcrypto
->data_vq
[0];
488 vc_req
->dataq
= data_vq
;
489 vc_req
->alg_cb
= virtio_crypto_dataq_sym_callback
;
490 vc_sym_req
->ablkcipher_ctx
= ctx
;
491 vc_sym_req
->ablkcipher_req
= req
;
492 vc_sym_req
->encrypt
= true;
494 return crypto_transfer_cipher_request_to_engine(data_vq
->engine
, req
);
497 static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request
*req
)
499 struct crypto_ablkcipher
*atfm
= crypto_ablkcipher_reqtfm(req
);
500 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_ablkcipher_ctx(atfm
);
501 struct virtio_crypto_sym_request
*vc_sym_req
=
502 ablkcipher_request_ctx(req
);
503 struct virtio_crypto_request
*vc_req
= &vc_sym_req
->base
;
504 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
505 /* Use the first data virtqueue as default */
506 struct data_queue
*data_vq
= &vcrypto
->data_vq
[0];
508 vc_req
->dataq
= data_vq
;
509 vc_req
->alg_cb
= virtio_crypto_dataq_sym_callback
;
510 vc_sym_req
->ablkcipher_ctx
= ctx
;
511 vc_sym_req
->ablkcipher_req
= req
;
512 vc_sym_req
->encrypt
= false;
514 return crypto_transfer_cipher_request_to_engine(data_vq
->engine
, req
);
517 static int virtio_crypto_ablkcipher_init(struct crypto_tfm
*tfm
)
519 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
521 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct virtio_crypto_sym_request
);
527 static void virtio_crypto_ablkcipher_exit(struct crypto_tfm
*tfm
)
529 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
534 virtio_crypto_alg_ablkcipher_close_session(ctx
, 1);
535 virtio_crypto_alg_ablkcipher_close_session(ctx
, 0);
536 virtcrypto_dev_put(ctx
->vcrypto
);
540 int virtio_crypto_ablkcipher_crypt_req(
541 struct crypto_engine
*engine
,
542 struct ablkcipher_request
*req
)
544 struct virtio_crypto_sym_request
*vc_sym_req
=
545 ablkcipher_request_ctx(req
);
546 struct virtio_crypto_request
*vc_req
= &vc_sym_req
->base
;
547 struct data_queue
*data_vq
= vc_req
->dataq
;
550 ret
= __virtio_crypto_ablkcipher_do_req(vc_sym_req
, req
, data_vq
);
554 virtqueue_kick(data_vq
->vq
);
559 static void virtio_crypto_ablkcipher_finalize_req(
560 struct virtio_crypto_sym_request
*vc_sym_req
,
561 struct ablkcipher_request
*req
,
564 crypto_finalize_cipher_request(vc_sym_req
->base
.dataq
->engine
,
566 kzfree(vc_sym_req
->iv
);
567 virtcrypto_clear_request(&vc_sym_req
->base
);
570 static struct crypto_alg virtio_crypto_algs
[] = { {
571 .cra_name
= "cbc(aes)",
572 .cra_driver_name
= "virtio_crypto_aes_cbc",
574 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
575 .cra_blocksize
= AES_BLOCK_SIZE
,
576 .cra_ctxsize
= sizeof(struct virtio_crypto_ablkcipher_ctx
),
578 .cra_module
= THIS_MODULE
,
579 .cra_type
= &crypto_ablkcipher_type
,
580 .cra_init
= virtio_crypto_ablkcipher_init
,
581 .cra_exit
= virtio_crypto_ablkcipher_exit
,
584 .setkey
= virtio_crypto_ablkcipher_setkey
,
585 .decrypt
= virtio_crypto_ablkcipher_decrypt
,
586 .encrypt
= virtio_crypto_ablkcipher_encrypt
,
587 .min_keysize
= AES_MIN_KEY_SIZE
,
588 .max_keysize
= AES_MAX_KEY_SIZE
,
589 .ivsize
= AES_BLOCK_SIZE
,
594 int virtio_crypto_algs_register(void)
598 mutex_lock(&algs_lock
);
599 if (++virtio_crypto_active_devs
!= 1)
602 ret
= crypto_register_algs(virtio_crypto_algs
,
603 ARRAY_SIZE(virtio_crypto_algs
));
605 virtio_crypto_active_devs
--;
608 mutex_unlock(&algs_lock
);
612 void virtio_crypto_algs_unregister(void)
614 mutex_lock(&algs_lock
);
615 if (--virtio_crypto_active_devs
!= 0)
618 crypto_unregister_algs(virtio_crypto_algs
,
619 ARRAY_SIZE(virtio_crypto_algs
));
622 mutex_unlock(&algs_lock
);