1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Algorithms supported by virtio crypto device
4 * Authors: Gonglei <arei.gonglei@huawei.com>
6 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
9 #include <linux/scatterlist.h>
10 #include <crypto/algapi.h>
11 #include <crypto/internal/skcipher.h>
12 #include <linux/err.h>
13 #include <crypto/scatterwalk.h>
14 #include <linux/atomic.h>
16 #include <uapi/linux/virtio_crypto.h>
17 #include "virtio_crypto_common.h"
20 struct virtio_crypto_skcipher_ctx
{
21 struct crypto_engine_ctx enginectx
;
22 struct virtio_crypto
*vcrypto
;
23 struct crypto_skcipher
*tfm
;
25 struct virtio_crypto_sym_session_info enc_sess_info
;
26 struct virtio_crypto_sym_session_info dec_sess_info
;
29 struct virtio_crypto_sym_request
{
30 struct virtio_crypto_request base
;
34 struct virtio_crypto_skcipher_ctx
*skcipher_ctx
;
35 struct skcipher_request
*skcipher_req
;
41 struct virtio_crypto_algo
{
44 unsigned int active_devs
;
45 struct skcipher_alg algo
;
49 * The algs_lock protects the below global virtio_crypto_active_devs
50 * and crypto algorithms registion.
52 static DEFINE_MUTEX(algs_lock
);
53 static void virtio_crypto_skcipher_finalize_req(
54 struct virtio_crypto_sym_request
*vc_sym_req
,
55 struct skcipher_request
*req
,
58 static void virtio_crypto_dataq_sym_callback
59 (struct virtio_crypto_request
*vc_req
, int len
)
61 struct virtio_crypto_sym_request
*vc_sym_req
=
62 container_of(vc_req
, struct virtio_crypto_sym_request
, base
);
63 struct skcipher_request
*ablk_req
;
66 /* Finish the encrypt or decrypt process */
67 if (vc_sym_req
->type
== VIRTIO_CRYPTO_SYM_OP_CIPHER
) {
68 switch (vc_req
->status
) {
69 case VIRTIO_CRYPTO_OK
:
72 case VIRTIO_CRYPTO_INVSESS
:
73 case VIRTIO_CRYPTO_ERR
:
76 case VIRTIO_CRYPTO_BADMSG
:
83 ablk_req
= vc_sym_req
->skcipher_req
;
84 virtio_crypto_skcipher_finalize_req(vc_sym_req
,
89 static u64
virtio_crypto_alg_sg_nents_length(struct scatterlist
*sg
)
93 for (total
= 0; sg
; sg
= sg_next(sg
))
100 virtio_crypto_alg_validate_key(int key_len
, uint32_t *alg
)
103 case AES_KEYSIZE_128
:
104 case AES_KEYSIZE_192
:
105 case AES_KEYSIZE_256
:
106 *alg
= VIRTIO_CRYPTO_CIPHER_AES_CBC
;
114 static int virtio_crypto_alg_skcipher_init_session(
115 struct virtio_crypto_skcipher_ctx
*ctx
,
116 uint32_t alg
, const uint8_t *key
,
120 struct scatterlist outhdr
, key_sg
, inhdr
, *sgs
[3];
122 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
123 int op
= encrypt
? VIRTIO_CRYPTO_OP_ENCRYPT
: VIRTIO_CRYPTO_OP_DECRYPT
;
125 unsigned int num_out
= 0, num_in
= 0;
128 * Avoid to do DMA from the stack, switch to using
129 * dynamically-allocated for the key
131 uint8_t *cipher_key
= kmemdup(key
, keylen
, GFP_ATOMIC
);
136 spin_lock(&vcrypto
->ctrl_lock
);
137 /* Pad ctrl header */
138 vcrypto
->ctrl
.header
.opcode
=
139 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION
);
140 vcrypto
->ctrl
.header
.algo
= cpu_to_le32(alg
);
141 /* Set the default dataqueue id to 0 */
142 vcrypto
->ctrl
.header
.queue_id
= 0;
144 vcrypto
->input
.status
= cpu_to_le32(VIRTIO_CRYPTO_ERR
);
145 /* Pad cipher's parameters */
146 vcrypto
->ctrl
.u
.sym_create_session
.op_type
=
147 cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER
);
148 vcrypto
->ctrl
.u
.sym_create_session
.u
.cipher
.para
.algo
=
149 vcrypto
->ctrl
.header
.algo
;
150 vcrypto
->ctrl
.u
.sym_create_session
.u
.cipher
.para
.keylen
=
152 vcrypto
->ctrl
.u
.sym_create_session
.u
.cipher
.para
.op
=
155 sg_init_one(&outhdr
, &vcrypto
->ctrl
, sizeof(vcrypto
->ctrl
));
156 sgs
[num_out
++] = &outhdr
;
159 sg_init_one(&key_sg
, cipher_key
, keylen
);
160 sgs
[num_out
++] = &key_sg
;
162 /* Return status and session id back */
163 sg_init_one(&inhdr
, &vcrypto
->input
, sizeof(vcrypto
->input
));
164 sgs
[num_out
+ num_in
++] = &inhdr
;
166 err
= virtqueue_add_sgs(vcrypto
->ctrl_vq
, sgs
, num_out
,
167 num_in
, vcrypto
, GFP_ATOMIC
);
169 spin_unlock(&vcrypto
->ctrl_lock
);
173 virtqueue_kick(vcrypto
->ctrl_vq
);
176 * Trapping into the hypervisor, so the request should be
177 * handled immediately.
179 while (!virtqueue_get_buf(vcrypto
->ctrl_vq
, &tmp
) &&
180 !virtqueue_is_broken(vcrypto
->ctrl_vq
))
183 if (le32_to_cpu(vcrypto
->input
.status
) != VIRTIO_CRYPTO_OK
) {
184 spin_unlock(&vcrypto
->ctrl_lock
);
185 pr_err("virtio_crypto: Create session failed status: %u\n",
186 le32_to_cpu(vcrypto
->input
.status
));
192 ctx
->enc_sess_info
.session_id
=
193 le64_to_cpu(vcrypto
->input
.session_id
);
195 ctx
->dec_sess_info
.session_id
=
196 le64_to_cpu(vcrypto
->input
.session_id
);
198 spin_unlock(&vcrypto
->ctrl_lock
);
204 static int virtio_crypto_alg_skcipher_close_session(
205 struct virtio_crypto_skcipher_ctx
*ctx
,
208 struct scatterlist outhdr
, status_sg
, *sgs
[2];
210 struct virtio_crypto_destroy_session_req
*destroy_session
;
211 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
213 unsigned int num_out
= 0, num_in
= 0;
215 spin_lock(&vcrypto
->ctrl_lock
);
216 vcrypto
->ctrl_status
.status
= VIRTIO_CRYPTO_ERR
;
217 /* Pad ctrl header */
218 vcrypto
->ctrl
.header
.opcode
=
219 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION
);
220 /* Set the default virtqueue id to 0 */
221 vcrypto
->ctrl
.header
.queue_id
= 0;
223 destroy_session
= &vcrypto
->ctrl
.u
.destroy_session
;
226 destroy_session
->session_id
=
227 cpu_to_le64(ctx
->enc_sess_info
.session_id
);
229 destroy_session
->session_id
=
230 cpu_to_le64(ctx
->dec_sess_info
.session_id
);
232 sg_init_one(&outhdr
, &vcrypto
->ctrl
, sizeof(vcrypto
->ctrl
));
233 sgs
[num_out
++] = &outhdr
;
235 /* Return status and session id back */
236 sg_init_one(&status_sg
, &vcrypto
->ctrl_status
.status
,
237 sizeof(vcrypto
->ctrl_status
.status
));
238 sgs
[num_out
+ num_in
++] = &status_sg
;
240 err
= virtqueue_add_sgs(vcrypto
->ctrl_vq
, sgs
, num_out
,
241 num_in
, vcrypto
, GFP_ATOMIC
);
243 spin_unlock(&vcrypto
->ctrl_lock
);
246 virtqueue_kick(vcrypto
->ctrl_vq
);
248 while (!virtqueue_get_buf(vcrypto
->ctrl_vq
, &tmp
) &&
249 !virtqueue_is_broken(vcrypto
->ctrl_vq
))
252 if (vcrypto
->ctrl_status
.status
!= VIRTIO_CRYPTO_OK
) {
253 spin_unlock(&vcrypto
->ctrl_lock
);
254 pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
255 vcrypto
->ctrl_status
.status
,
256 destroy_session
->session_id
);
260 spin_unlock(&vcrypto
->ctrl_lock
);
265 static int virtio_crypto_alg_skcipher_init_sessions(
266 struct virtio_crypto_skcipher_ctx
*ctx
,
267 const uint8_t *key
, unsigned int keylen
)
271 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
273 if (keylen
> vcrypto
->max_cipher_key_len
) {
274 pr_err("virtio_crypto: the key is too long\n");
278 if (virtio_crypto_alg_validate_key(keylen
, &alg
))
281 /* Create encryption session */
282 ret
= virtio_crypto_alg_skcipher_init_session(ctx
,
283 alg
, key
, keylen
, 1);
286 /* Create decryption session */
287 ret
= virtio_crypto_alg_skcipher_init_session(ctx
,
288 alg
, key
, keylen
, 0);
290 virtio_crypto_alg_skcipher_close_session(ctx
, 1);
296 /* Note: kernel crypto API realization */
297 static int virtio_crypto_skcipher_setkey(struct crypto_skcipher
*tfm
,
301 struct virtio_crypto_skcipher_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
305 ret
= virtio_crypto_alg_validate_key(keylen
, &alg
);
311 int node
= virtio_crypto_get_current_node();
312 struct virtio_crypto
*vcrypto
=
313 virtcrypto_get_dev_node(node
,
314 VIRTIO_CRYPTO_SERVICE_CIPHER
, alg
);
316 pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
320 ctx
->vcrypto
= vcrypto
;
322 /* Rekeying, we should close the created sessions previously */
323 virtio_crypto_alg_skcipher_close_session(ctx
, 1);
324 virtio_crypto_alg_skcipher_close_session(ctx
, 0);
327 ret
= virtio_crypto_alg_skcipher_init_sessions(ctx
, key
, keylen
);
329 virtcrypto_dev_put(ctx
->vcrypto
);
339 __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request
*vc_sym_req
,
340 struct skcipher_request
*req
,
341 struct data_queue
*data_vq
)
343 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
344 struct virtio_crypto_skcipher_ctx
*ctx
= vc_sym_req
->skcipher_ctx
;
345 struct virtio_crypto_request
*vc_req
= &vc_sym_req
->base
;
346 unsigned int ivsize
= crypto_skcipher_ivsize(tfm
);
347 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
348 struct virtio_crypto_op_data_req
*req_data
;
349 int src_nents
, dst_nents
;
352 struct scatterlist outhdr
, iv_sg
, status_sg
, **sgs
;
355 unsigned int num_out
= 0, num_in
= 0;
359 src_nents
= sg_nents_for_len(req
->src
, req
->cryptlen
);
360 dst_nents
= sg_nents(req
->dst
);
362 pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
363 src_nents
, dst_nents
);
365 /* Why 3? outhdr + iv + inhdr */
366 sg_total
= src_nents
+ dst_nents
+ 3;
367 sgs
= kcalloc_node(sg_total
, sizeof(*sgs
), GFP_KERNEL
,
368 dev_to_node(&vcrypto
->vdev
->dev
));
372 req_data
= kzalloc_node(sizeof(*req_data
), GFP_KERNEL
,
373 dev_to_node(&vcrypto
->vdev
->dev
));
379 vc_req
->req_data
= req_data
;
380 vc_sym_req
->type
= VIRTIO_CRYPTO_SYM_OP_CIPHER
;
381 /* Head of operation */
382 if (vc_sym_req
->encrypt
) {
383 req_data
->header
.session_id
=
384 cpu_to_le64(ctx
->enc_sess_info
.session_id
);
385 req_data
->header
.opcode
=
386 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT
);
388 req_data
->header
.session_id
=
389 cpu_to_le64(ctx
->dec_sess_info
.session_id
);
390 req_data
->header
.opcode
=
391 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT
);
393 req_data
->u
.sym_req
.op_type
= cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER
);
394 req_data
->u
.sym_req
.u
.cipher
.para
.iv_len
= cpu_to_le32(ivsize
);
395 req_data
->u
.sym_req
.u
.cipher
.para
.src_data_len
=
396 cpu_to_le32(req
->cryptlen
);
398 dst_len
= virtio_crypto_alg_sg_nents_length(req
->dst
);
399 if (unlikely(dst_len
> U32_MAX
)) {
400 pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
405 pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
406 req
->cryptlen
, dst_len
);
408 if (unlikely(req
->cryptlen
+ dst_len
+ ivsize
+
409 sizeof(vc_req
->status
) > vcrypto
->max_size
)) {
410 pr_err("virtio_crypto: The length is too big\n");
415 req_data
->u
.sym_req
.u
.cipher
.para
.dst_data_len
=
416 cpu_to_le32((uint32_t)dst_len
);
419 sg_init_one(&outhdr
, req_data
, sizeof(*req_data
));
420 sgs
[num_out
++] = &outhdr
;
425 * Avoid to do DMA from the stack, switch to using
426 * dynamically-allocated for the IV
428 iv
= kzalloc_node(ivsize
, GFP_ATOMIC
,
429 dev_to_node(&vcrypto
->vdev
->dev
));
434 memcpy(iv
, req
->iv
, ivsize
);
435 if (!vc_sym_req
->encrypt
)
436 scatterwalk_map_and_copy(req
->iv
, req
->src
,
437 req
->cryptlen
- AES_BLOCK_SIZE
,
440 sg_init_one(&iv_sg
, iv
, ivsize
);
441 sgs
[num_out
++] = &iv_sg
;
445 for (i
= 0; i
< src_nents
; i
++)
446 sgs
[num_out
++] = &req
->src
[i
];
448 /* Destination data */
449 for (i
= 0; i
< dst_nents
; i
++)
450 sgs
[num_out
+ num_in
++] = &req
->dst
[i
];
453 sg_init_one(&status_sg
, &vc_req
->status
, sizeof(vc_req
->status
));
454 sgs
[num_out
+ num_in
++] = &status_sg
;
458 spin_lock_irqsave(&data_vq
->lock
, flags
);
459 err
= virtqueue_add_sgs(data_vq
->vq
, sgs
, num_out
,
460 num_in
, vc_req
, GFP_ATOMIC
);
461 virtqueue_kick(data_vq
->vq
);
462 spin_unlock_irqrestore(&data_vq
->lock
, flags
);
463 if (unlikely(err
< 0))
476 static int virtio_crypto_skcipher_encrypt(struct skcipher_request
*req
)
478 struct crypto_skcipher
*atfm
= crypto_skcipher_reqtfm(req
);
479 struct virtio_crypto_skcipher_ctx
*ctx
= crypto_skcipher_ctx(atfm
);
480 struct virtio_crypto_sym_request
*vc_sym_req
=
481 skcipher_request_ctx(req
);
482 struct virtio_crypto_request
*vc_req
= &vc_sym_req
->base
;
483 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
484 /* Use the first data virtqueue as default */
485 struct data_queue
*data_vq
= &vcrypto
->data_vq
[0];
489 if (req
->cryptlen
% AES_BLOCK_SIZE
)
492 vc_req
->dataq
= data_vq
;
493 vc_req
->alg_cb
= virtio_crypto_dataq_sym_callback
;
494 vc_sym_req
->skcipher_ctx
= ctx
;
495 vc_sym_req
->skcipher_req
= req
;
496 vc_sym_req
->encrypt
= true;
498 return crypto_transfer_skcipher_request_to_engine(data_vq
->engine
, req
);
501 static int virtio_crypto_skcipher_decrypt(struct skcipher_request
*req
)
503 struct crypto_skcipher
*atfm
= crypto_skcipher_reqtfm(req
);
504 struct virtio_crypto_skcipher_ctx
*ctx
= crypto_skcipher_ctx(atfm
);
505 struct virtio_crypto_sym_request
*vc_sym_req
=
506 skcipher_request_ctx(req
);
507 struct virtio_crypto_request
*vc_req
= &vc_sym_req
->base
;
508 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
509 /* Use the first data virtqueue as default */
510 struct data_queue
*data_vq
= &vcrypto
->data_vq
[0];
514 if (req
->cryptlen
% AES_BLOCK_SIZE
)
517 vc_req
->dataq
= data_vq
;
518 vc_req
->alg_cb
= virtio_crypto_dataq_sym_callback
;
519 vc_sym_req
->skcipher_ctx
= ctx
;
520 vc_sym_req
->skcipher_req
= req
;
521 vc_sym_req
->encrypt
= false;
523 return crypto_transfer_skcipher_request_to_engine(data_vq
->engine
, req
);
526 static int virtio_crypto_skcipher_init(struct crypto_skcipher
*tfm
)
528 struct virtio_crypto_skcipher_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
530 crypto_skcipher_set_reqsize(tfm
, sizeof(struct virtio_crypto_sym_request
));
533 ctx
->enginectx
.op
.do_one_request
= virtio_crypto_skcipher_crypt_req
;
534 ctx
->enginectx
.op
.prepare_request
= NULL
;
535 ctx
->enginectx
.op
.unprepare_request
= NULL
;
539 static void virtio_crypto_skcipher_exit(struct crypto_skcipher
*tfm
)
541 struct virtio_crypto_skcipher_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
546 virtio_crypto_alg_skcipher_close_session(ctx
, 1);
547 virtio_crypto_alg_skcipher_close_session(ctx
, 0);
548 virtcrypto_dev_put(ctx
->vcrypto
);
552 int virtio_crypto_skcipher_crypt_req(
553 struct crypto_engine
*engine
, void *vreq
)
555 struct skcipher_request
*req
= container_of(vreq
, struct skcipher_request
, base
);
556 struct virtio_crypto_sym_request
*vc_sym_req
=
557 skcipher_request_ctx(req
);
558 struct virtio_crypto_request
*vc_req
= &vc_sym_req
->base
;
559 struct data_queue
*data_vq
= vc_req
->dataq
;
562 ret
= __virtio_crypto_skcipher_do_req(vc_sym_req
, req
, data_vq
);
566 virtqueue_kick(data_vq
->vq
);
571 static void virtio_crypto_skcipher_finalize_req(
572 struct virtio_crypto_sym_request
*vc_sym_req
,
573 struct skcipher_request
*req
,
576 if (vc_sym_req
->encrypt
)
577 scatterwalk_map_and_copy(req
->iv
, req
->dst
,
578 req
->cryptlen
- AES_BLOCK_SIZE
,
580 crypto_finalize_skcipher_request(vc_sym_req
->base
.dataq
->engine
,
582 kzfree(vc_sym_req
->iv
);
583 virtcrypto_clear_request(&vc_sym_req
->base
);
586 static struct virtio_crypto_algo virtio_crypto_algs
[] = { {
587 .algonum
= VIRTIO_CRYPTO_CIPHER_AES_CBC
,
588 .service
= VIRTIO_CRYPTO_SERVICE_CIPHER
,
590 .base
.cra_name
= "cbc(aes)",
591 .base
.cra_driver_name
= "virtio_crypto_aes_cbc",
592 .base
.cra_priority
= 150,
593 .base
.cra_flags
= CRYPTO_ALG_ASYNC
,
594 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
595 .base
.cra_ctxsize
= sizeof(struct virtio_crypto_skcipher_ctx
),
596 .base
.cra_module
= THIS_MODULE
,
597 .init
= virtio_crypto_skcipher_init
,
598 .exit
= virtio_crypto_skcipher_exit
,
599 .setkey
= virtio_crypto_skcipher_setkey
,
600 .decrypt
= virtio_crypto_skcipher_decrypt
,
601 .encrypt
= virtio_crypto_skcipher_encrypt
,
602 .min_keysize
= AES_MIN_KEY_SIZE
,
603 .max_keysize
= AES_MAX_KEY_SIZE
,
604 .ivsize
= AES_BLOCK_SIZE
,
608 int virtio_crypto_algs_register(struct virtio_crypto
*vcrypto
)
613 mutex_lock(&algs_lock
);
615 for (i
= 0; i
< ARRAY_SIZE(virtio_crypto_algs
); i
++) {
617 uint32_t service
= virtio_crypto_algs
[i
].service
;
618 uint32_t algonum
= virtio_crypto_algs
[i
].algonum
;
620 if (!virtcrypto_algo_is_supported(vcrypto
, service
, algonum
))
623 if (virtio_crypto_algs
[i
].active_devs
== 0) {
624 ret
= crypto_register_skcipher(&virtio_crypto_algs
[i
].algo
);
629 virtio_crypto_algs
[i
].active_devs
++;
630 dev_info(&vcrypto
->vdev
->dev
, "Registered algo %s\n",
631 virtio_crypto_algs
[i
].algo
.base
.cra_name
);
635 mutex_unlock(&algs_lock
);
639 void virtio_crypto_algs_unregister(struct virtio_crypto
*vcrypto
)
643 mutex_lock(&algs_lock
);
645 for (i
= 0; i
< ARRAY_SIZE(virtio_crypto_algs
); i
++) {
647 uint32_t service
= virtio_crypto_algs
[i
].service
;
648 uint32_t algonum
= virtio_crypto_algs
[i
].algonum
;
650 if (virtio_crypto_algs
[i
].active_devs
== 0 ||
651 !virtcrypto_algo_is_supported(vcrypto
, service
, algonum
))
654 if (virtio_crypto_algs
[i
].active_devs
== 1)
655 crypto_unregister_skcipher(&virtio_crypto_algs
[i
].algo
);
657 virtio_crypto_algs
[i
].active_devs
--;
660 mutex_unlock(&algs_lock
);