1 /* Algorithms supported by virtio crypto device
3 * Authors: Gonglei <arei.gonglei@huawei.com>
5 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include <linux/scatterlist.h>
22 #include <crypto/algapi.h>
23 #include <linux/err.h>
24 #include <crypto/scatterwalk.h>
25 #include <linux/atomic.h>
27 #include <uapi/linux/virtio_crypto.h>
28 #include "virtio_crypto_common.h"
31 struct virtio_crypto_ablkcipher_ctx
{
32 struct crypto_engine_ctx enginectx
;
33 struct virtio_crypto
*vcrypto
;
34 struct crypto_tfm
*tfm
;
36 struct virtio_crypto_sym_session_info enc_sess_info
;
37 struct virtio_crypto_sym_session_info dec_sess_info
;
40 struct virtio_crypto_sym_request
{
41 struct virtio_crypto_request base
;
45 struct virtio_crypto_ablkcipher_ctx
*ablkcipher_ctx
;
46 struct ablkcipher_request
*ablkcipher_req
;
52 struct virtio_crypto_algo
{
55 unsigned int active_devs
;
56 struct crypto_alg algo
;
60 * The algs_lock protects the below global virtio_crypto_active_devs
61 * and crypto algorithms registion.
63 static DEFINE_MUTEX(algs_lock
);
64 static void virtio_crypto_ablkcipher_finalize_req(
65 struct virtio_crypto_sym_request
*vc_sym_req
,
66 struct ablkcipher_request
*req
,
69 static void virtio_crypto_dataq_sym_callback
70 (struct virtio_crypto_request
*vc_req
, int len
)
72 struct virtio_crypto_sym_request
*vc_sym_req
=
73 container_of(vc_req
, struct virtio_crypto_sym_request
, base
);
74 struct ablkcipher_request
*ablk_req
;
77 /* Finish the encrypt or decrypt process */
78 if (vc_sym_req
->type
== VIRTIO_CRYPTO_SYM_OP_CIPHER
) {
79 switch (vc_req
->status
) {
80 case VIRTIO_CRYPTO_OK
:
83 case VIRTIO_CRYPTO_INVSESS
:
84 case VIRTIO_CRYPTO_ERR
:
87 case VIRTIO_CRYPTO_BADMSG
:
94 ablk_req
= vc_sym_req
->ablkcipher_req
;
95 virtio_crypto_ablkcipher_finalize_req(vc_sym_req
,
100 static u64
virtio_crypto_alg_sg_nents_length(struct scatterlist
*sg
)
104 for (total
= 0; sg
; sg
= sg_next(sg
))
111 virtio_crypto_alg_validate_key(int key_len
, uint32_t *alg
)
114 case AES_KEYSIZE_128
:
115 case AES_KEYSIZE_192
:
116 case AES_KEYSIZE_256
:
117 *alg
= VIRTIO_CRYPTO_CIPHER_AES_CBC
;
120 pr_err("virtio_crypto: Unsupported key length: %d\n",
127 static int virtio_crypto_alg_ablkcipher_init_session(
128 struct virtio_crypto_ablkcipher_ctx
*ctx
,
129 uint32_t alg
, const uint8_t *key
,
133 struct scatterlist outhdr
, key_sg
, inhdr
, *sgs
[3];
135 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
136 int op
= encrypt
? VIRTIO_CRYPTO_OP_ENCRYPT
: VIRTIO_CRYPTO_OP_DECRYPT
;
138 unsigned int num_out
= 0, num_in
= 0;
141 * Avoid to do DMA from the stack, switch to using
142 * dynamically-allocated for the key
144 uint8_t *cipher_key
= kmalloc(keylen
, GFP_ATOMIC
);
149 memcpy(cipher_key
, key
, keylen
);
151 spin_lock(&vcrypto
->ctrl_lock
);
152 /* Pad ctrl header */
153 vcrypto
->ctrl
.header
.opcode
=
154 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION
);
155 vcrypto
->ctrl
.header
.algo
= cpu_to_le32(alg
);
156 /* Set the default dataqueue id to 0 */
157 vcrypto
->ctrl
.header
.queue_id
= 0;
159 vcrypto
->input
.status
= cpu_to_le32(VIRTIO_CRYPTO_ERR
);
160 /* Pad cipher's parameters */
161 vcrypto
->ctrl
.u
.sym_create_session
.op_type
=
162 cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER
);
163 vcrypto
->ctrl
.u
.sym_create_session
.u
.cipher
.para
.algo
=
164 vcrypto
->ctrl
.header
.algo
;
165 vcrypto
->ctrl
.u
.sym_create_session
.u
.cipher
.para
.keylen
=
167 vcrypto
->ctrl
.u
.sym_create_session
.u
.cipher
.para
.op
=
170 sg_init_one(&outhdr
, &vcrypto
->ctrl
, sizeof(vcrypto
->ctrl
));
171 sgs
[num_out
++] = &outhdr
;
174 sg_init_one(&key_sg
, cipher_key
, keylen
);
175 sgs
[num_out
++] = &key_sg
;
177 /* Return status and session id back */
178 sg_init_one(&inhdr
, &vcrypto
->input
, sizeof(vcrypto
->input
));
179 sgs
[num_out
+ num_in
++] = &inhdr
;
181 err
= virtqueue_add_sgs(vcrypto
->ctrl_vq
, sgs
, num_out
,
182 num_in
, vcrypto
, GFP_ATOMIC
);
184 spin_unlock(&vcrypto
->ctrl_lock
);
188 virtqueue_kick(vcrypto
->ctrl_vq
);
191 * Trapping into the hypervisor, so the request should be
192 * handled immediately.
194 while (!virtqueue_get_buf(vcrypto
->ctrl_vq
, &tmp
) &&
195 !virtqueue_is_broken(vcrypto
->ctrl_vq
))
198 if (le32_to_cpu(vcrypto
->input
.status
) != VIRTIO_CRYPTO_OK
) {
199 spin_unlock(&vcrypto
->ctrl_lock
);
200 pr_err("virtio_crypto: Create session failed status: %u\n",
201 le32_to_cpu(vcrypto
->input
.status
));
207 ctx
->enc_sess_info
.session_id
=
208 le64_to_cpu(vcrypto
->input
.session_id
);
210 ctx
->dec_sess_info
.session_id
=
211 le64_to_cpu(vcrypto
->input
.session_id
);
213 spin_unlock(&vcrypto
->ctrl_lock
);
219 static int virtio_crypto_alg_ablkcipher_close_session(
220 struct virtio_crypto_ablkcipher_ctx
*ctx
,
223 struct scatterlist outhdr
, status_sg
, *sgs
[2];
225 struct virtio_crypto_destroy_session_req
*destroy_session
;
226 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
228 unsigned int num_out
= 0, num_in
= 0;
230 spin_lock(&vcrypto
->ctrl_lock
);
231 vcrypto
->ctrl_status
.status
= VIRTIO_CRYPTO_ERR
;
232 /* Pad ctrl header */
233 vcrypto
->ctrl
.header
.opcode
=
234 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION
);
235 /* Set the default virtqueue id to 0 */
236 vcrypto
->ctrl
.header
.queue_id
= 0;
238 destroy_session
= &vcrypto
->ctrl
.u
.destroy_session
;
241 destroy_session
->session_id
=
242 cpu_to_le64(ctx
->enc_sess_info
.session_id
);
244 destroy_session
->session_id
=
245 cpu_to_le64(ctx
->dec_sess_info
.session_id
);
247 sg_init_one(&outhdr
, &vcrypto
->ctrl
, sizeof(vcrypto
->ctrl
));
248 sgs
[num_out
++] = &outhdr
;
250 /* Return status and session id back */
251 sg_init_one(&status_sg
, &vcrypto
->ctrl_status
.status
,
252 sizeof(vcrypto
->ctrl_status
.status
));
253 sgs
[num_out
+ num_in
++] = &status_sg
;
255 err
= virtqueue_add_sgs(vcrypto
->ctrl_vq
, sgs
, num_out
,
256 num_in
, vcrypto
, GFP_ATOMIC
);
258 spin_unlock(&vcrypto
->ctrl_lock
);
261 virtqueue_kick(vcrypto
->ctrl_vq
);
263 while (!virtqueue_get_buf(vcrypto
->ctrl_vq
, &tmp
) &&
264 !virtqueue_is_broken(vcrypto
->ctrl_vq
))
267 if (vcrypto
->ctrl_status
.status
!= VIRTIO_CRYPTO_OK
) {
268 spin_unlock(&vcrypto
->ctrl_lock
);
269 pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
270 vcrypto
->ctrl_status
.status
,
271 destroy_session
->session_id
);
275 spin_unlock(&vcrypto
->ctrl_lock
);
280 static int virtio_crypto_alg_ablkcipher_init_sessions(
281 struct virtio_crypto_ablkcipher_ctx
*ctx
,
282 const uint8_t *key
, unsigned int keylen
)
286 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
288 if (keylen
> vcrypto
->max_cipher_key_len
) {
289 pr_err("virtio_crypto: the key is too long\n");
293 if (virtio_crypto_alg_validate_key(keylen
, &alg
))
296 /* Create encryption session */
297 ret
= virtio_crypto_alg_ablkcipher_init_session(ctx
,
298 alg
, key
, keylen
, 1);
301 /* Create decryption session */
302 ret
= virtio_crypto_alg_ablkcipher_init_session(ctx
,
303 alg
, key
, keylen
, 0);
305 virtio_crypto_alg_ablkcipher_close_session(ctx
, 1);
311 crypto_tfm_set_flags(ctx
->tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
315 /* Note: kernel crypto API realization */
316 static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher
*tfm
,
320 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
324 ret
= virtio_crypto_alg_validate_key(keylen
, &alg
);
330 int node
= virtio_crypto_get_current_node();
331 struct virtio_crypto
*vcrypto
=
332 virtcrypto_get_dev_node(node
,
333 VIRTIO_CRYPTO_SERVICE_CIPHER
, alg
);
335 pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
339 ctx
->vcrypto
= vcrypto
;
341 /* Rekeying, we should close the created sessions previously */
342 virtio_crypto_alg_ablkcipher_close_session(ctx
, 1);
343 virtio_crypto_alg_ablkcipher_close_session(ctx
, 0);
346 ret
= virtio_crypto_alg_ablkcipher_init_sessions(ctx
, key
, keylen
);
348 virtcrypto_dev_put(ctx
->vcrypto
);
358 __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request
*vc_sym_req
,
359 struct ablkcipher_request
*req
,
360 struct data_queue
*data_vq
)
362 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
363 struct virtio_crypto_ablkcipher_ctx
*ctx
= vc_sym_req
->ablkcipher_ctx
;
364 struct virtio_crypto_request
*vc_req
= &vc_sym_req
->base
;
365 unsigned int ivsize
= crypto_ablkcipher_ivsize(tfm
);
366 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
367 struct virtio_crypto_op_data_req
*req_data
;
368 int src_nents
, dst_nents
;
371 struct scatterlist outhdr
, iv_sg
, status_sg
, **sgs
;
374 unsigned int num_out
= 0, num_in
= 0;
378 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
379 dst_nents
= sg_nents(req
->dst
);
381 pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
382 src_nents
, dst_nents
);
384 /* Why 3? outhdr + iv + inhdr */
385 sg_total
= src_nents
+ dst_nents
+ 3;
386 sgs
= kcalloc_node(sg_total
, sizeof(*sgs
), GFP_KERNEL
,
387 dev_to_node(&vcrypto
->vdev
->dev
));
391 req_data
= kzalloc_node(sizeof(*req_data
), GFP_KERNEL
,
392 dev_to_node(&vcrypto
->vdev
->dev
));
398 vc_req
->req_data
= req_data
;
399 vc_sym_req
->type
= VIRTIO_CRYPTO_SYM_OP_CIPHER
;
400 /* Head of operation */
401 if (vc_sym_req
->encrypt
) {
402 req_data
->header
.session_id
=
403 cpu_to_le64(ctx
->enc_sess_info
.session_id
);
404 req_data
->header
.opcode
=
405 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT
);
407 req_data
->header
.session_id
=
408 cpu_to_le64(ctx
->dec_sess_info
.session_id
);
409 req_data
->header
.opcode
=
410 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT
);
412 req_data
->u
.sym_req
.op_type
= cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER
);
413 req_data
->u
.sym_req
.u
.cipher
.para
.iv_len
= cpu_to_le32(ivsize
);
414 req_data
->u
.sym_req
.u
.cipher
.para
.src_data_len
=
415 cpu_to_le32(req
->nbytes
);
417 dst_len
= virtio_crypto_alg_sg_nents_length(req
->dst
);
418 if (unlikely(dst_len
> U32_MAX
)) {
419 pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
424 pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
425 req
->nbytes
, dst_len
);
427 if (unlikely(req
->nbytes
+ dst_len
+ ivsize
+
428 sizeof(vc_req
->status
) > vcrypto
->max_size
)) {
429 pr_err("virtio_crypto: The length is too big\n");
434 req_data
->u
.sym_req
.u
.cipher
.para
.dst_data_len
=
435 cpu_to_le32((uint32_t)dst_len
);
438 sg_init_one(&outhdr
, req_data
, sizeof(*req_data
));
439 sgs
[num_out
++] = &outhdr
;
444 * Avoid to do DMA from the stack, switch to using
445 * dynamically-allocated for the IV
447 iv
= kzalloc_node(ivsize
, GFP_ATOMIC
,
448 dev_to_node(&vcrypto
->vdev
->dev
));
453 memcpy(iv
, req
->info
, ivsize
);
454 sg_init_one(&iv_sg
, iv
, ivsize
);
455 sgs
[num_out
++] = &iv_sg
;
459 for (i
= 0; i
< src_nents
; i
++)
460 sgs
[num_out
++] = &req
->src
[i
];
462 /* Destination data */
463 for (i
= 0; i
< dst_nents
; i
++)
464 sgs
[num_out
+ num_in
++] = &req
->dst
[i
];
467 sg_init_one(&status_sg
, &vc_req
->status
, sizeof(vc_req
->status
));
468 sgs
[num_out
+ num_in
++] = &status_sg
;
472 spin_lock_irqsave(&data_vq
->lock
, flags
);
473 err
= virtqueue_add_sgs(data_vq
->vq
, sgs
, num_out
,
474 num_in
, vc_req
, GFP_ATOMIC
);
475 virtqueue_kick(data_vq
->vq
);
476 spin_unlock_irqrestore(&data_vq
->lock
, flags
);
477 if (unlikely(err
< 0))
490 static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request
*req
)
492 struct crypto_ablkcipher
*atfm
= crypto_ablkcipher_reqtfm(req
);
493 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_ablkcipher_ctx(atfm
);
494 struct virtio_crypto_sym_request
*vc_sym_req
=
495 ablkcipher_request_ctx(req
);
496 struct virtio_crypto_request
*vc_req
= &vc_sym_req
->base
;
497 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
498 /* Use the first data virtqueue as default */
499 struct data_queue
*data_vq
= &vcrypto
->data_vq
[0];
501 vc_req
->dataq
= data_vq
;
502 vc_req
->alg_cb
= virtio_crypto_dataq_sym_callback
;
503 vc_sym_req
->ablkcipher_ctx
= ctx
;
504 vc_sym_req
->ablkcipher_req
= req
;
505 vc_sym_req
->encrypt
= true;
507 return crypto_transfer_ablkcipher_request_to_engine(data_vq
->engine
, req
);
510 static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request
*req
)
512 struct crypto_ablkcipher
*atfm
= crypto_ablkcipher_reqtfm(req
);
513 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_ablkcipher_ctx(atfm
);
514 struct virtio_crypto_sym_request
*vc_sym_req
=
515 ablkcipher_request_ctx(req
);
516 struct virtio_crypto_request
*vc_req
= &vc_sym_req
->base
;
517 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
518 /* Use the first data virtqueue as default */
519 struct data_queue
*data_vq
= &vcrypto
->data_vq
[0];
521 vc_req
->dataq
= data_vq
;
522 vc_req
->alg_cb
= virtio_crypto_dataq_sym_callback
;
523 vc_sym_req
->ablkcipher_ctx
= ctx
;
524 vc_sym_req
->ablkcipher_req
= req
;
525 vc_sym_req
->encrypt
= false;
527 return crypto_transfer_ablkcipher_request_to_engine(data_vq
->engine
, req
);
530 static int virtio_crypto_ablkcipher_init(struct crypto_tfm
*tfm
)
532 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
534 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct virtio_crypto_sym_request
);
537 ctx
->enginectx
.op
.do_one_request
= virtio_crypto_ablkcipher_crypt_req
;
538 ctx
->enginectx
.op
.prepare_request
= NULL
;
539 ctx
->enginectx
.op
.unprepare_request
= NULL
;
543 static void virtio_crypto_ablkcipher_exit(struct crypto_tfm
*tfm
)
545 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
550 virtio_crypto_alg_ablkcipher_close_session(ctx
, 1);
551 virtio_crypto_alg_ablkcipher_close_session(ctx
, 0);
552 virtcrypto_dev_put(ctx
->vcrypto
);
556 int virtio_crypto_ablkcipher_crypt_req(
557 struct crypto_engine
*engine
, void *vreq
)
559 struct ablkcipher_request
*req
= container_of(vreq
, struct ablkcipher_request
, base
);
560 struct virtio_crypto_sym_request
*vc_sym_req
=
561 ablkcipher_request_ctx(req
);
562 struct virtio_crypto_request
*vc_req
= &vc_sym_req
->base
;
563 struct data_queue
*data_vq
= vc_req
->dataq
;
566 ret
= __virtio_crypto_ablkcipher_do_req(vc_sym_req
, req
, data_vq
);
570 virtqueue_kick(data_vq
->vq
);
575 static void virtio_crypto_ablkcipher_finalize_req(
576 struct virtio_crypto_sym_request
*vc_sym_req
,
577 struct ablkcipher_request
*req
,
580 crypto_finalize_ablkcipher_request(vc_sym_req
->base
.dataq
->engine
,
582 kzfree(vc_sym_req
->iv
);
583 virtcrypto_clear_request(&vc_sym_req
->base
);
586 static struct virtio_crypto_algo virtio_crypto_algs
[] = { {
587 .algonum
= VIRTIO_CRYPTO_CIPHER_AES_CBC
,
588 .service
= VIRTIO_CRYPTO_SERVICE_CIPHER
,
590 .cra_name
= "cbc(aes)",
591 .cra_driver_name
= "virtio_crypto_aes_cbc",
593 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
594 .cra_blocksize
= AES_BLOCK_SIZE
,
595 .cra_ctxsize
= sizeof(struct virtio_crypto_ablkcipher_ctx
),
597 .cra_module
= THIS_MODULE
,
598 .cra_type
= &crypto_ablkcipher_type
,
599 .cra_init
= virtio_crypto_ablkcipher_init
,
600 .cra_exit
= virtio_crypto_ablkcipher_exit
,
603 .setkey
= virtio_crypto_ablkcipher_setkey
,
604 .decrypt
= virtio_crypto_ablkcipher_decrypt
,
605 .encrypt
= virtio_crypto_ablkcipher_encrypt
,
606 .min_keysize
= AES_MIN_KEY_SIZE
,
607 .max_keysize
= AES_MAX_KEY_SIZE
,
608 .ivsize
= AES_BLOCK_SIZE
,
614 int virtio_crypto_algs_register(struct virtio_crypto
*vcrypto
)
619 mutex_lock(&algs_lock
);
621 for (i
= 0; i
< ARRAY_SIZE(virtio_crypto_algs
); i
++) {
623 uint32_t service
= virtio_crypto_algs
[i
].service
;
624 uint32_t algonum
= virtio_crypto_algs
[i
].algonum
;
626 if (!virtcrypto_algo_is_supported(vcrypto
, service
, algonum
))
629 if (virtio_crypto_algs
[i
].active_devs
== 0) {
630 ret
= crypto_register_alg(&virtio_crypto_algs
[i
].algo
);
635 virtio_crypto_algs
[i
].active_devs
++;
636 dev_info(&vcrypto
->vdev
->dev
, "Registered algo %s\n",
637 virtio_crypto_algs
[i
].algo
.cra_name
);
641 mutex_unlock(&algs_lock
);
645 void virtio_crypto_algs_unregister(struct virtio_crypto
*vcrypto
)
649 mutex_lock(&algs_lock
);
651 for (i
= 0; i
< ARRAY_SIZE(virtio_crypto_algs
); i
++) {
653 uint32_t service
= virtio_crypto_algs
[i
].service
;
654 uint32_t algonum
= virtio_crypto_algs
[i
].algonum
;
656 if (virtio_crypto_algs
[i
].active_devs
== 0 ||
657 !virtcrypto_algo_is_supported(vcrypto
, service
, algonum
))
660 if (virtio_crypto_algs
[i
].active_devs
== 1)
661 crypto_unregister_alg(&virtio_crypto_algs
[i
].algo
);
663 virtio_crypto_algs
[i
].active_devs
--;
666 mutex_unlock(&algs_lock
);