1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Algorithms supported by virtio crypto device
4 * Authors: Gonglei <arei.gonglei@huawei.com>
6 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
9 #include <crypto/engine.h>
10 #include <crypto/internal/skcipher.h>
11 #include <crypto/scatterwalk.h>
12 #include <linux/err.h>
13 #include <linux/scatterlist.h>
14 #include <uapi/linux/virtio_crypto.h>
15 #include "virtio_crypto_common.h"
18 struct virtio_crypto_skcipher_ctx
{
19 struct virtio_crypto
*vcrypto
;
20 struct crypto_skcipher
*tfm
;
22 struct virtio_crypto_sym_session_info enc_sess_info
;
23 struct virtio_crypto_sym_session_info dec_sess_info
;
26 struct virtio_crypto_sym_request
{
27 struct virtio_crypto_request base
;
31 struct virtio_crypto_skcipher_ctx
*skcipher_ctx
;
32 struct skcipher_request
*skcipher_req
;
38 struct virtio_crypto_algo
{
41 unsigned int active_devs
;
42 struct skcipher_engine_alg algo
;
46 * The algs_lock protects the below global virtio_crypto_active_devs
47 * and crypto algorithms registion.
49 static DEFINE_MUTEX(algs_lock
);
50 static void virtio_crypto_skcipher_finalize_req(
51 struct virtio_crypto_sym_request
*vc_sym_req
,
52 struct skcipher_request
*req
,
55 static void virtio_crypto_dataq_sym_callback
56 (struct virtio_crypto_request
*vc_req
, int len
)
58 struct virtio_crypto_sym_request
*vc_sym_req
=
59 container_of(vc_req
, struct virtio_crypto_sym_request
, base
);
60 struct skcipher_request
*ablk_req
;
63 /* Finish the encrypt or decrypt process */
64 if (vc_sym_req
->type
== VIRTIO_CRYPTO_SYM_OP_CIPHER
) {
65 switch (vc_req
->status
) {
66 case VIRTIO_CRYPTO_OK
:
69 case VIRTIO_CRYPTO_INVSESS
:
70 case VIRTIO_CRYPTO_ERR
:
73 case VIRTIO_CRYPTO_BADMSG
:
80 ablk_req
= vc_sym_req
->skcipher_req
;
81 virtio_crypto_skcipher_finalize_req(vc_sym_req
,
86 static u64
virtio_crypto_alg_sg_nents_length(struct scatterlist
*sg
)
90 for (total
= 0; sg
; sg
= sg_next(sg
))
97 virtio_crypto_alg_validate_key(int key_len
, uint32_t *alg
)
100 case AES_KEYSIZE_128
:
101 case AES_KEYSIZE_192
:
102 case AES_KEYSIZE_256
:
103 *alg
= VIRTIO_CRYPTO_CIPHER_AES_CBC
;
111 static int virtio_crypto_alg_skcipher_init_session(
112 struct virtio_crypto_skcipher_ctx
*ctx
,
113 uint32_t alg
, const uint8_t *key
,
117 struct scatterlist outhdr
, key_sg
, inhdr
, *sgs
[3];
118 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
119 int op
= encrypt
? VIRTIO_CRYPTO_OP_ENCRYPT
: VIRTIO_CRYPTO_OP_DECRYPT
;
121 unsigned int num_out
= 0, num_in
= 0;
122 struct virtio_crypto_op_ctrl_req
*ctrl
;
123 struct virtio_crypto_session_input
*input
;
124 struct virtio_crypto_sym_create_session_req
*sym_create_session
;
125 struct virtio_crypto_ctrl_request
*vc_ctrl_req
;
128 * Avoid to do DMA from the stack, switch to using
129 * dynamically-allocated for the key
131 uint8_t *cipher_key
= kmemdup(key
, keylen
, GFP_ATOMIC
);
136 vc_ctrl_req
= kzalloc(sizeof(*vc_ctrl_req
), GFP_KERNEL
);
142 /* Pad ctrl header */
143 ctrl
= &vc_ctrl_req
->ctrl
;
144 ctrl
->header
.opcode
= cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION
);
145 ctrl
->header
.algo
= cpu_to_le32(alg
);
146 /* Set the default dataqueue id to 0 */
147 ctrl
->header
.queue_id
= 0;
149 input
= &vc_ctrl_req
->input
;
150 input
->status
= cpu_to_le32(VIRTIO_CRYPTO_ERR
);
151 /* Pad cipher's parameters */
152 sym_create_session
= &ctrl
->u
.sym_create_session
;
153 sym_create_session
->op_type
= cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER
);
154 sym_create_session
->u
.cipher
.para
.algo
= ctrl
->header
.algo
;
155 sym_create_session
->u
.cipher
.para
.keylen
= cpu_to_le32(keylen
);
156 sym_create_session
->u
.cipher
.para
.op
= cpu_to_le32(op
);
158 sg_init_one(&outhdr
, ctrl
, sizeof(*ctrl
));
159 sgs
[num_out
++] = &outhdr
;
162 sg_init_one(&key_sg
, cipher_key
, keylen
);
163 sgs
[num_out
++] = &key_sg
;
165 /* Return status and session id back */
166 sg_init_one(&inhdr
, input
, sizeof(*input
));
167 sgs
[num_out
+ num_in
++] = &inhdr
;
169 err
= virtio_crypto_ctrl_vq_request(vcrypto
, sgs
, num_out
, num_in
, vc_ctrl_req
);
173 if (le32_to_cpu(input
->status
) != VIRTIO_CRYPTO_OK
) {
174 pr_err("virtio_crypto: Create session failed status: %u\n",
175 le32_to_cpu(input
->status
));
181 ctx
->enc_sess_info
.session_id
= le64_to_cpu(input
->session_id
);
183 ctx
->dec_sess_info
.session_id
= le64_to_cpu(input
->session_id
);
188 kfree_sensitive(cipher_key
);
192 static int virtio_crypto_alg_skcipher_close_session(
193 struct virtio_crypto_skcipher_ctx
*ctx
,
196 struct scatterlist outhdr
, status_sg
, *sgs
[2];
197 struct virtio_crypto_destroy_session_req
*destroy_session
;
198 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
200 unsigned int num_out
= 0, num_in
= 0;
201 struct virtio_crypto_op_ctrl_req
*ctrl
;
202 struct virtio_crypto_inhdr
*ctrl_status
;
203 struct virtio_crypto_ctrl_request
*vc_ctrl_req
;
205 vc_ctrl_req
= kzalloc(sizeof(*vc_ctrl_req
), GFP_KERNEL
);
209 ctrl_status
= &vc_ctrl_req
->ctrl_status
;
210 ctrl_status
->status
= VIRTIO_CRYPTO_ERR
;
211 /* Pad ctrl header */
212 ctrl
= &vc_ctrl_req
->ctrl
;
213 ctrl
->header
.opcode
= cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION
);
214 /* Set the default virtqueue id to 0 */
215 ctrl
->header
.queue_id
= 0;
217 destroy_session
= &ctrl
->u
.destroy_session
;
220 destroy_session
->session_id
= cpu_to_le64(ctx
->enc_sess_info
.session_id
);
222 destroy_session
->session_id
= cpu_to_le64(ctx
->dec_sess_info
.session_id
);
224 sg_init_one(&outhdr
, ctrl
, sizeof(*ctrl
));
225 sgs
[num_out
++] = &outhdr
;
227 /* Return status and session id back */
228 sg_init_one(&status_sg
, &ctrl_status
->status
, sizeof(ctrl_status
->status
));
229 sgs
[num_out
+ num_in
++] = &status_sg
;
231 err
= virtio_crypto_ctrl_vq_request(vcrypto
, sgs
, num_out
, num_in
, vc_ctrl_req
);
235 if (ctrl_status
->status
!= VIRTIO_CRYPTO_OK
) {
236 pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
237 ctrl_status
->status
, destroy_session
->session_id
);
249 static int virtio_crypto_alg_skcipher_init_sessions(
250 struct virtio_crypto_skcipher_ctx
*ctx
,
251 const uint8_t *key
, unsigned int keylen
)
255 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
257 if (keylen
> vcrypto
->max_cipher_key_len
) {
258 pr_err("virtio_crypto: the key is too long\n");
262 if (virtio_crypto_alg_validate_key(keylen
, &alg
))
265 /* Create encryption session */
266 ret
= virtio_crypto_alg_skcipher_init_session(ctx
,
267 alg
, key
, keylen
, 1);
270 /* Create decryption session */
271 ret
= virtio_crypto_alg_skcipher_init_session(ctx
,
272 alg
, key
, keylen
, 0);
274 virtio_crypto_alg_skcipher_close_session(ctx
, 1);
280 /* Note: kernel crypto API realization */
281 static int virtio_crypto_skcipher_setkey(struct crypto_skcipher
*tfm
,
285 struct virtio_crypto_skcipher_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
289 ret
= virtio_crypto_alg_validate_key(keylen
, &alg
);
295 int node
= virtio_crypto_get_current_node();
296 struct virtio_crypto
*vcrypto
=
297 virtcrypto_get_dev_node(node
,
298 VIRTIO_CRYPTO_SERVICE_CIPHER
, alg
);
300 pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
304 ctx
->vcrypto
= vcrypto
;
306 /* Rekeying, we should close the created sessions previously */
307 virtio_crypto_alg_skcipher_close_session(ctx
, 1);
308 virtio_crypto_alg_skcipher_close_session(ctx
, 0);
311 ret
= virtio_crypto_alg_skcipher_init_sessions(ctx
, key
, keylen
);
313 virtcrypto_dev_put(ctx
->vcrypto
);
323 __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request
*vc_sym_req
,
324 struct skcipher_request
*req
,
325 struct data_queue
*data_vq
)
327 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
328 struct virtio_crypto_skcipher_ctx
*ctx
= vc_sym_req
->skcipher_ctx
;
329 struct virtio_crypto_request
*vc_req
= &vc_sym_req
->base
;
330 unsigned int ivsize
= crypto_skcipher_ivsize(tfm
);
331 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
332 struct virtio_crypto_op_data_req
*req_data
;
333 int src_nents
, dst_nents
;
336 struct scatterlist outhdr
, iv_sg
, status_sg
, **sgs
;
338 unsigned int num_out
= 0, num_in
= 0;
341 struct scatterlist
*sg
;
343 src_nents
= sg_nents_for_len(req
->src
, req
->cryptlen
);
345 pr_err("Invalid number of src SG.\n");
349 dst_nents
= sg_nents(req
->dst
);
351 pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
352 src_nents
, dst_nents
);
354 /* Why 3? outhdr + iv + inhdr */
355 sg_total
= src_nents
+ dst_nents
+ 3;
356 sgs
= kcalloc_node(sg_total
, sizeof(*sgs
), GFP_KERNEL
,
357 dev_to_node(&vcrypto
->vdev
->dev
));
361 req_data
= kzalloc_node(sizeof(*req_data
), GFP_KERNEL
,
362 dev_to_node(&vcrypto
->vdev
->dev
));
368 vc_req
->req_data
= req_data
;
369 vc_sym_req
->type
= VIRTIO_CRYPTO_SYM_OP_CIPHER
;
370 /* Head of operation */
371 if (vc_sym_req
->encrypt
) {
372 req_data
->header
.session_id
=
373 cpu_to_le64(ctx
->enc_sess_info
.session_id
);
374 req_data
->header
.opcode
=
375 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT
);
377 req_data
->header
.session_id
=
378 cpu_to_le64(ctx
->dec_sess_info
.session_id
);
379 req_data
->header
.opcode
=
380 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT
);
382 req_data
->u
.sym_req
.op_type
= cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER
);
383 req_data
->u
.sym_req
.u
.cipher
.para
.iv_len
= cpu_to_le32(ivsize
);
384 req_data
->u
.sym_req
.u
.cipher
.para
.src_data_len
=
385 cpu_to_le32(req
->cryptlen
);
387 dst_len
= virtio_crypto_alg_sg_nents_length(req
->dst
);
388 if (unlikely(dst_len
> U32_MAX
)) {
389 pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
394 dst_len
= min_t(unsigned int, req
->cryptlen
, dst_len
);
395 pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
396 req
->cryptlen
, dst_len
);
398 if (unlikely(req
->cryptlen
+ dst_len
+ ivsize
+
399 sizeof(vc_req
->status
) > vcrypto
->max_size
)) {
400 pr_err("virtio_crypto: The length is too big\n");
405 req_data
->u
.sym_req
.u
.cipher
.para
.dst_data_len
=
406 cpu_to_le32((uint32_t)dst_len
);
409 sg_init_one(&outhdr
, req_data
, sizeof(*req_data
));
410 sgs
[num_out
++] = &outhdr
;
415 * Avoid to do DMA from the stack, switch to using
416 * dynamically-allocated for the IV
418 iv
= kzalloc_node(ivsize
, GFP_ATOMIC
,
419 dev_to_node(&vcrypto
->vdev
->dev
));
424 memcpy(iv
, req
->iv
, ivsize
);
425 if (!vc_sym_req
->encrypt
)
426 scatterwalk_map_and_copy(req
->iv
, req
->src
,
427 req
->cryptlen
- AES_BLOCK_SIZE
,
430 sg_init_one(&iv_sg
, iv
, ivsize
);
431 sgs
[num_out
++] = &iv_sg
;
435 for (sg
= req
->src
; src_nents
; sg
= sg_next(sg
), src_nents
--)
438 /* Destination data */
439 for (sg
= req
->dst
; sg
; sg
= sg_next(sg
))
440 sgs
[num_out
+ num_in
++] = sg
;
443 sg_init_one(&status_sg
, &vc_req
->status
, sizeof(vc_req
->status
));
444 sgs
[num_out
+ num_in
++] = &status_sg
;
448 spin_lock_irqsave(&data_vq
->lock
, flags
);
449 err
= virtqueue_add_sgs(data_vq
->vq
, sgs
, num_out
,
450 num_in
, vc_req
, GFP_ATOMIC
);
451 virtqueue_kick(data_vq
->vq
);
452 spin_unlock_irqrestore(&data_vq
->lock
, flags
);
453 if (unlikely(err
< 0))
461 kfree_sensitive(req_data
);
466 static int virtio_crypto_skcipher_encrypt(struct skcipher_request
*req
)
468 struct crypto_skcipher
*atfm
= crypto_skcipher_reqtfm(req
);
469 struct virtio_crypto_skcipher_ctx
*ctx
= crypto_skcipher_ctx(atfm
);
470 struct virtio_crypto_sym_request
*vc_sym_req
=
471 skcipher_request_ctx(req
);
472 struct virtio_crypto_request
*vc_req
= &vc_sym_req
->base
;
473 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
474 /* Use the first data virtqueue as default */
475 struct data_queue
*data_vq
= &vcrypto
->data_vq
[0];
479 if (req
->cryptlen
% AES_BLOCK_SIZE
)
482 vc_req
->dataq
= data_vq
;
483 vc_req
->alg_cb
= virtio_crypto_dataq_sym_callback
;
484 vc_sym_req
->skcipher_ctx
= ctx
;
485 vc_sym_req
->skcipher_req
= req
;
486 vc_sym_req
->encrypt
= true;
488 return crypto_transfer_skcipher_request_to_engine(data_vq
->engine
, req
);
491 static int virtio_crypto_skcipher_decrypt(struct skcipher_request
*req
)
493 struct crypto_skcipher
*atfm
= crypto_skcipher_reqtfm(req
);
494 struct virtio_crypto_skcipher_ctx
*ctx
= crypto_skcipher_ctx(atfm
);
495 struct virtio_crypto_sym_request
*vc_sym_req
=
496 skcipher_request_ctx(req
);
497 struct virtio_crypto_request
*vc_req
= &vc_sym_req
->base
;
498 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
499 /* Use the first data virtqueue as default */
500 struct data_queue
*data_vq
= &vcrypto
->data_vq
[0];
504 if (req
->cryptlen
% AES_BLOCK_SIZE
)
507 vc_req
->dataq
= data_vq
;
508 vc_req
->alg_cb
= virtio_crypto_dataq_sym_callback
;
509 vc_sym_req
->skcipher_ctx
= ctx
;
510 vc_sym_req
->skcipher_req
= req
;
511 vc_sym_req
->encrypt
= false;
513 return crypto_transfer_skcipher_request_to_engine(data_vq
->engine
, req
);
516 static int virtio_crypto_skcipher_init(struct crypto_skcipher
*tfm
)
518 struct virtio_crypto_skcipher_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
520 crypto_skcipher_set_reqsize(tfm
, sizeof(struct virtio_crypto_sym_request
));
526 static void virtio_crypto_skcipher_exit(struct crypto_skcipher
*tfm
)
528 struct virtio_crypto_skcipher_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
533 virtio_crypto_alg_skcipher_close_session(ctx
, 1);
534 virtio_crypto_alg_skcipher_close_session(ctx
, 0);
535 virtcrypto_dev_put(ctx
->vcrypto
);
539 int virtio_crypto_skcipher_crypt_req(
540 struct crypto_engine
*engine
, void *vreq
)
542 struct skcipher_request
*req
= container_of(vreq
, struct skcipher_request
, base
);
543 struct virtio_crypto_sym_request
*vc_sym_req
=
544 skcipher_request_ctx(req
);
545 struct virtio_crypto_request
*vc_req
= &vc_sym_req
->base
;
546 struct data_queue
*data_vq
= vc_req
->dataq
;
549 ret
= __virtio_crypto_skcipher_do_req(vc_sym_req
, req
, data_vq
);
553 virtqueue_kick(data_vq
->vq
);
558 static void virtio_crypto_skcipher_finalize_req(
559 struct virtio_crypto_sym_request
*vc_sym_req
,
560 struct skcipher_request
*req
,
563 if (vc_sym_req
->encrypt
)
564 scatterwalk_map_and_copy(req
->iv
, req
->dst
,
565 req
->cryptlen
- AES_BLOCK_SIZE
,
567 kfree_sensitive(vc_sym_req
->iv
);
568 virtcrypto_clear_request(&vc_sym_req
->base
);
570 crypto_finalize_skcipher_request(vc_sym_req
->base
.dataq
->engine
,
574 static struct virtio_crypto_algo virtio_crypto_algs
[] = { {
575 .algonum
= VIRTIO_CRYPTO_CIPHER_AES_CBC
,
576 .service
= VIRTIO_CRYPTO_SERVICE_CIPHER
,
578 .base
.cra_name
= "cbc(aes)",
579 .base
.cra_driver_name
= "virtio_crypto_aes_cbc",
580 .base
.cra_priority
= 150,
581 .base
.cra_flags
= CRYPTO_ALG_ASYNC
|
582 CRYPTO_ALG_ALLOCATES_MEMORY
,
583 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
584 .base
.cra_ctxsize
= sizeof(struct virtio_crypto_skcipher_ctx
),
585 .base
.cra_module
= THIS_MODULE
,
586 .init
= virtio_crypto_skcipher_init
,
587 .exit
= virtio_crypto_skcipher_exit
,
588 .setkey
= virtio_crypto_skcipher_setkey
,
589 .decrypt
= virtio_crypto_skcipher_decrypt
,
590 .encrypt
= virtio_crypto_skcipher_encrypt
,
591 .min_keysize
= AES_MIN_KEY_SIZE
,
592 .max_keysize
= AES_MAX_KEY_SIZE
,
593 .ivsize
= AES_BLOCK_SIZE
,
596 .do_one_request
= virtio_crypto_skcipher_crypt_req
,
600 int virtio_crypto_skcipher_algs_register(struct virtio_crypto
*vcrypto
)
605 mutex_lock(&algs_lock
);
607 for (i
= 0; i
< ARRAY_SIZE(virtio_crypto_algs
); i
++) {
609 uint32_t service
= virtio_crypto_algs
[i
].service
;
610 uint32_t algonum
= virtio_crypto_algs
[i
].algonum
;
612 if (!virtcrypto_algo_is_supported(vcrypto
, service
, algonum
))
615 if (virtio_crypto_algs
[i
].active_devs
== 0) {
616 ret
= crypto_engine_register_skcipher(&virtio_crypto_algs
[i
].algo
);
621 virtio_crypto_algs
[i
].active_devs
++;
622 dev_info(&vcrypto
->vdev
->dev
, "Registered algo %s\n",
623 virtio_crypto_algs
[i
].algo
.base
.base
.cra_name
);
627 mutex_unlock(&algs_lock
);
631 void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto
*vcrypto
)
635 mutex_lock(&algs_lock
);
637 for (i
= 0; i
< ARRAY_SIZE(virtio_crypto_algs
); i
++) {
639 uint32_t service
= virtio_crypto_algs
[i
].service
;
640 uint32_t algonum
= virtio_crypto_algs
[i
].algonum
;
642 if (virtio_crypto_algs
[i
].active_devs
== 0 ||
643 !virtcrypto_algo_is_supported(vcrypto
, service
, algonum
))
646 if (virtio_crypto_algs
[i
].active_devs
== 1)
647 crypto_engine_unregister_skcipher(&virtio_crypto_algs
[i
].algo
);
649 virtio_crypto_algs
[i
].active_devs
--;
652 mutex_unlock(&algs_lock
);