1 /* Algorithms supported by virtio crypto device
3 * Authors: Gonglei <arei.gonglei@huawei.com>
5 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include <linux/scatterlist.h>
22 #include <crypto/algapi.h>
23 #include <linux/err.h>
24 #include <crypto/scatterwalk.h>
25 #include <linux/atomic.h>
27 #include <uapi/linux/virtio_crypto.h>
28 #include "virtio_crypto_common.h"
31 struct virtio_crypto_ablkcipher_ctx
{
32 struct crypto_engine_ctx enginectx
;
33 struct virtio_crypto
*vcrypto
;
34 struct crypto_tfm
*tfm
;
36 struct virtio_crypto_sym_session_info enc_sess_info
;
37 struct virtio_crypto_sym_session_info dec_sess_info
;
40 struct virtio_crypto_sym_request
{
41 struct virtio_crypto_request base
;
45 struct virtio_crypto_ablkcipher_ctx
*ablkcipher_ctx
;
46 struct ablkcipher_request
*ablkcipher_req
;
53 * The algs_lock protects the below global virtio_crypto_active_devs
54 * and crypto algorithms registion.
56 static DEFINE_MUTEX(algs_lock
);
57 static unsigned int virtio_crypto_active_devs
;
58 static void virtio_crypto_ablkcipher_finalize_req(
59 struct virtio_crypto_sym_request
*vc_sym_req
,
60 struct ablkcipher_request
*req
,
63 static void virtio_crypto_dataq_sym_callback
64 (struct virtio_crypto_request
*vc_req
, int len
)
66 struct virtio_crypto_sym_request
*vc_sym_req
=
67 container_of(vc_req
, struct virtio_crypto_sym_request
, base
);
68 struct ablkcipher_request
*ablk_req
;
71 /* Finish the encrypt or decrypt process */
72 if (vc_sym_req
->type
== VIRTIO_CRYPTO_SYM_OP_CIPHER
) {
73 switch (vc_req
->status
) {
74 case VIRTIO_CRYPTO_OK
:
77 case VIRTIO_CRYPTO_INVSESS
:
78 case VIRTIO_CRYPTO_ERR
:
81 case VIRTIO_CRYPTO_BADMSG
:
88 ablk_req
= vc_sym_req
->ablkcipher_req
;
89 virtio_crypto_ablkcipher_finalize_req(vc_sym_req
,
94 static u64
virtio_crypto_alg_sg_nents_length(struct scatterlist
*sg
)
98 for (total
= 0; sg
; sg
= sg_next(sg
))
105 virtio_crypto_alg_validate_key(int key_len
, uint32_t *alg
)
108 case AES_KEYSIZE_128
:
109 case AES_KEYSIZE_192
:
110 case AES_KEYSIZE_256
:
111 *alg
= VIRTIO_CRYPTO_CIPHER_AES_CBC
;
114 pr_err("virtio_crypto: Unsupported key length: %d\n",
121 static int virtio_crypto_alg_ablkcipher_init_session(
122 struct virtio_crypto_ablkcipher_ctx
*ctx
,
123 uint32_t alg
, const uint8_t *key
,
127 struct scatterlist outhdr
, key_sg
, inhdr
, *sgs
[3];
129 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
130 int op
= encrypt
? VIRTIO_CRYPTO_OP_ENCRYPT
: VIRTIO_CRYPTO_OP_DECRYPT
;
132 unsigned int num_out
= 0, num_in
= 0;
135 * Avoid to do DMA from the stack, switch to using
136 * dynamically-allocated for the key
138 uint8_t *cipher_key
= kmalloc(keylen
, GFP_ATOMIC
);
143 memcpy(cipher_key
, key
, keylen
);
145 spin_lock(&vcrypto
->ctrl_lock
);
146 /* Pad ctrl header */
147 vcrypto
->ctrl
.header
.opcode
=
148 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION
);
149 vcrypto
->ctrl
.header
.algo
= cpu_to_le32(alg
);
150 /* Set the default dataqueue id to 0 */
151 vcrypto
->ctrl
.header
.queue_id
= 0;
153 vcrypto
->input
.status
= cpu_to_le32(VIRTIO_CRYPTO_ERR
);
154 /* Pad cipher's parameters */
155 vcrypto
->ctrl
.u
.sym_create_session
.op_type
=
156 cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER
);
157 vcrypto
->ctrl
.u
.sym_create_session
.u
.cipher
.para
.algo
=
158 vcrypto
->ctrl
.header
.algo
;
159 vcrypto
->ctrl
.u
.sym_create_session
.u
.cipher
.para
.keylen
=
161 vcrypto
->ctrl
.u
.sym_create_session
.u
.cipher
.para
.op
=
164 sg_init_one(&outhdr
, &vcrypto
->ctrl
, sizeof(vcrypto
->ctrl
));
165 sgs
[num_out
++] = &outhdr
;
168 sg_init_one(&key_sg
, cipher_key
, keylen
);
169 sgs
[num_out
++] = &key_sg
;
171 /* Return status and session id back */
172 sg_init_one(&inhdr
, &vcrypto
->input
, sizeof(vcrypto
->input
));
173 sgs
[num_out
+ num_in
++] = &inhdr
;
175 err
= virtqueue_add_sgs(vcrypto
->ctrl_vq
, sgs
, num_out
,
176 num_in
, vcrypto
, GFP_ATOMIC
);
178 spin_unlock(&vcrypto
->ctrl_lock
);
182 virtqueue_kick(vcrypto
->ctrl_vq
);
185 * Trapping into the hypervisor, so the request should be
186 * handled immediately.
188 while (!virtqueue_get_buf(vcrypto
->ctrl_vq
, &tmp
) &&
189 !virtqueue_is_broken(vcrypto
->ctrl_vq
))
192 if (le32_to_cpu(vcrypto
->input
.status
) != VIRTIO_CRYPTO_OK
) {
193 spin_unlock(&vcrypto
->ctrl_lock
);
194 pr_err("virtio_crypto: Create session failed status: %u\n",
195 le32_to_cpu(vcrypto
->input
.status
));
201 ctx
->enc_sess_info
.session_id
=
202 le64_to_cpu(vcrypto
->input
.session_id
);
204 ctx
->dec_sess_info
.session_id
=
205 le64_to_cpu(vcrypto
->input
.session_id
);
207 spin_unlock(&vcrypto
->ctrl_lock
);
213 static int virtio_crypto_alg_ablkcipher_close_session(
214 struct virtio_crypto_ablkcipher_ctx
*ctx
,
217 struct scatterlist outhdr
, status_sg
, *sgs
[2];
219 struct virtio_crypto_destroy_session_req
*destroy_session
;
220 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
222 unsigned int num_out
= 0, num_in
= 0;
224 spin_lock(&vcrypto
->ctrl_lock
);
225 vcrypto
->ctrl_status
.status
= VIRTIO_CRYPTO_ERR
;
226 /* Pad ctrl header */
227 vcrypto
->ctrl
.header
.opcode
=
228 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION
);
229 /* Set the default virtqueue id to 0 */
230 vcrypto
->ctrl
.header
.queue_id
= 0;
232 destroy_session
= &vcrypto
->ctrl
.u
.destroy_session
;
235 destroy_session
->session_id
=
236 cpu_to_le64(ctx
->enc_sess_info
.session_id
);
238 destroy_session
->session_id
=
239 cpu_to_le64(ctx
->dec_sess_info
.session_id
);
241 sg_init_one(&outhdr
, &vcrypto
->ctrl
, sizeof(vcrypto
->ctrl
));
242 sgs
[num_out
++] = &outhdr
;
244 /* Return status and session id back */
245 sg_init_one(&status_sg
, &vcrypto
->ctrl_status
.status
,
246 sizeof(vcrypto
->ctrl_status
.status
));
247 sgs
[num_out
+ num_in
++] = &status_sg
;
249 err
= virtqueue_add_sgs(vcrypto
->ctrl_vq
, sgs
, num_out
,
250 num_in
, vcrypto
, GFP_ATOMIC
);
252 spin_unlock(&vcrypto
->ctrl_lock
);
255 virtqueue_kick(vcrypto
->ctrl_vq
);
257 while (!virtqueue_get_buf(vcrypto
->ctrl_vq
, &tmp
) &&
258 !virtqueue_is_broken(vcrypto
->ctrl_vq
))
261 if (vcrypto
->ctrl_status
.status
!= VIRTIO_CRYPTO_OK
) {
262 spin_unlock(&vcrypto
->ctrl_lock
);
263 pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
264 vcrypto
->ctrl_status
.status
,
265 destroy_session
->session_id
);
269 spin_unlock(&vcrypto
->ctrl_lock
);
274 static int virtio_crypto_alg_ablkcipher_init_sessions(
275 struct virtio_crypto_ablkcipher_ctx
*ctx
,
276 const uint8_t *key
, unsigned int keylen
)
280 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
282 if (keylen
> vcrypto
->max_cipher_key_len
) {
283 pr_err("virtio_crypto: the key is too long\n");
287 if (virtio_crypto_alg_validate_key(keylen
, &alg
))
290 /* Create encryption session */
291 ret
= virtio_crypto_alg_ablkcipher_init_session(ctx
,
292 alg
, key
, keylen
, 1);
295 /* Create decryption session */
296 ret
= virtio_crypto_alg_ablkcipher_init_session(ctx
,
297 alg
, key
, keylen
, 0);
299 virtio_crypto_alg_ablkcipher_close_session(ctx
, 1);
305 crypto_tfm_set_flags(ctx
->tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
309 /* Note: kernel crypto API realization */
310 static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher
*tfm
,
314 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
319 int node
= virtio_crypto_get_current_node();
320 struct virtio_crypto
*vcrypto
=
321 virtcrypto_get_dev_node(node
);
323 pr_err("virtio_crypto: Could not find a virtio device in the system\n");
327 ctx
->vcrypto
= vcrypto
;
329 /* Rekeying, we should close the created sessions previously */
330 virtio_crypto_alg_ablkcipher_close_session(ctx
, 1);
331 virtio_crypto_alg_ablkcipher_close_session(ctx
, 0);
334 ret
= virtio_crypto_alg_ablkcipher_init_sessions(ctx
, key
, keylen
);
336 virtcrypto_dev_put(ctx
->vcrypto
);
346 __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request
*vc_sym_req
,
347 struct ablkcipher_request
*req
,
348 struct data_queue
*data_vq
)
350 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
351 struct virtio_crypto_ablkcipher_ctx
*ctx
= vc_sym_req
->ablkcipher_ctx
;
352 struct virtio_crypto_request
*vc_req
= &vc_sym_req
->base
;
353 unsigned int ivsize
= crypto_ablkcipher_ivsize(tfm
);
354 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
355 struct virtio_crypto_op_data_req
*req_data
;
356 int src_nents
, dst_nents
;
359 struct scatterlist outhdr
, iv_sg
, status_sg
, **sgs
;
362 unsigned int num_out
= 0, num_in
= 0;
366 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
367 dst_nents
= sg_nents(req
->dst
);
369 pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
370 src_nents
, dst_nents
);
372 /* Why 3? outhdr + iv + inhdr */
373 sg_total
= src_nents
+ dst_nents
+ 3;
374 sgs
= kcalloc_node(sg_total
, sizeof(*sgs
), GFP_ATOMIC
,
375 dev_to_node(&vcrypto
->vdev
->dev
));
379 req_data
= kzalloc_node(sizeof(*req_data
), GFP_ATOMIC
,
380 dev_to_node(&vcrypto
->vdev
->dev
));
386 vc_req
->req_data
= req_data
;
387 vc_sym_req
->type
= VIRTIO_CRYPTO_SYM_OP_CIPHER
;
388 /* Head of operation */
389 if (vc_sym_req
->encrypt
) {
390 req_data
->header
.session_id
=
391 cpu_to_le64(ctx
->enc_sess_info
.session_id
);
392 req_data
->header
.opcode
=
393 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT
);
395 req_data
->header
.session_id
=
396 cpu_to_le64(ctx
->dec_sess_info
.session_id
);
397 req_data
->header
.opcode
=
398 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT
);
400 req_data
->u
.sym_req
.op_type
= cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER
);
401 req_data
->u
.sym_req
.u
.cipher
.para
.iv_len
= cpu_to_le32(ivsize
);
402 req_data
->u
.sym_req
.u
.cipher
.para
.src_data_len
=
403 cpu_to_le32(req
->nbytes
);
405 dst_len
= virtio_crypto_alg_sg_nents_length(req
->dst
);
406 if (unlikely(dst_len
> U32_MAX
)) {
407 pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
412 pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
413 req
->nbytes
, dst_len
);
415 if (unlikely(req
->nbytes
+ dst_len
+ ivsize
+
416 sizeof(vc_req
->status
) > vcrypto
->max_size
)) {
417 pr_err("virtio_crypto: The length is too big\n");
422 req_data
->u
.sym_req
.u
.cipher
.para
.dst_data_len
=
423 cpu_to_le32((uint32_t)dst_len
);
426 sg_init_one(&outhdr
, req_data
, sizeof(*req_data
));
427 sgs
[num_out
++] = &outhdr
;
432 * Avoid to do DMA from the stack, switch to using
433 * dynamically-allocated for the IV
435 iv
= kzalloc_node(ivsize
, GFP_ATOMIC
,
436 dev_to_node(&vcrypto
->vdev
->dev
));
441 memcpy(iv
, req
->info
, ivsize
);
442 sg_init_one(&iv_sg
, iv
, ivsize
);
443 sgs
[num_out
++] = &iv_sg
;
447 for (i
= 0; i
< src_nents
; i
++)
448 sgs
[num_out
++] = &req
->src
[i
];
450 /* Destination data */
451 for (i
= 0; i
< dst_nents
; i
++)
452 sgs
[num_out
+ num_in
++] = &req
->dst
[i
];
455 sg_init_one(&status_sg
, &vc_req
->status
, sizeof(vc_req
->status
));
456 sgs
[num_out
+ num_in
++] = &status_sg
;
460 spin_lock_irqsave(&data_vq
->lock
, flags
);
461 err
= virtqueue_add_sgs(data_vq
->vq
, sgs
, num_out
,
462 num_in
, vc_req
, GFP_ATOMIC
);
463 virtqueue_kick(data_vq
->vq
);
464 spin_unlock_irqrestore(&data_vq
->lock
, flags
);
465 if (unlikely(err
< 0))
478 static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request
*req
)
480 struct crypto_ablkcipher
*atfm
= crypto_ablkcipher_reqtfm(req
);
481 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_ablkcipher_ctx(atfm
);
482 struct virtio_crypto_sym_request
*vc_sym_req
=
483 ablkcipher_request_ctx(req
);
484 struct virtio_crypto_request
*vc_req
= &vc_sym_req
->base
;
485 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
486 /* Use the first data virtqueue as default */
487 struct data_queue
*data_vq
= &vcrypto
->data_vq
[0];
489 vc_req
->dataq
= data_vq
;
490 vc_req
->alg_cb
= virtio_crypto_dataq_sym_callback
;
491 vc_sym_req
->ablkcipher_ctx
= ctx
;
492 vc_sym_req
->ablkcipher_req
= req
;
493 vc_sym_req
->encrypt
= true;
495 return crypto_transfer_ablkcipher_request_to_engine(data_vq
->engine
, req
);
498 static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request
*req
)
500 struct crypto_ablkcipher
*atfm
= crypto_ablkcipher_reqtfm(req
);
501 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_ablkcipher_ctx(atfm
);
502 struct virtio_crypto_sym_request
*vc_sym_req
=
503 ablkcipher_request_ctx(req
);
504 struct virtio_crypto_request
*vc_req
= &vc_sym_req
->base
;
505 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
506 /* Use the first data virtqueue as default */
507 struct data_queue
*data_vq
= &vcrypto
->data_vq
[0];
509 vc_req
->dataq
= data_vq
;
510 vc_req
->alg_cb
= virtio_crypto_dataq_sym_callback
;
511 vc_sym_req
->ablkcipher_ctx
= ctx
;
512 vc_sym_req
->ablkcipher_req
= req
;
513 vc_sym_req
->encrypt
= false;
515 return crypto_transfer_ablkcipher_request_to_engine(data_vq
->engine
, req
);
518 static int virtio_crypto_ablkcipher_init(struct crypto_tfm
*tfm
)
520 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
522 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct virtio_crypto_sym_request
);
525 ctx
->enginectx
.op
.do_one_request
= virtio_crypto_ablkcipher_crypt_req
;
526 ctx
->enginectx
.op
.prepare_request
= NULL
;
527 ctx
->enginectx
.op
.unprepare_request
= NULL
;
531 static void virtio_crypto_ablkcipher_exit(struct crypto_tfm
*tfm
)
533 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
538 virtio_crypto_alg_ablkcipher_close_session(ctx
, 1);
539 virtio_crypto_alg_ablkcipher_close_session(ctx
, 0);
540 virtcrypto_dev_put(ctx
->vcrypto
);
544 int virtio_crypto_ablkcipher_crypt_req(
545 struct crypto_engine
*engine
, void *vreq
)
547 struct ablkcipher_request
*req
= container_of(vreq
, struct ablkcipher_request
, base
);
548 struct virtio_crypto_sym_request
*vc_sym_req
=
549 ablkcipher_request_ctx(req
);
550 struct virtio_crypto_request
*vc_req
= &vc_sym_req
->base
;
551 struct data_queue
*data_vq
= vc_req
->dataq
;
554 ret
= __virtio_crypto_ablkcipher_do_req(vc_sym_req
, req
, data_vq
);
558 virtqueue_kick(data_vq
->vq
);
563 static void virtio_crypto_ablkcipher_finalize_req(
564 struct virtio_crypto_sym_request
*vc_sym_req
,
565 struct ablkcipher_request
*req
,
568 crypto_finalize_ablkcipher_request(vc_sym_req
->base
.dataq
->engine
,
570 kzfree(vc_sym_req
->iv
);
571 virtcrypto_clear_request(&vc_sym_req
->base
);
574 static struct crypto_alg virtio_crypto_algs
[] = { {
575 .cra_name
= "cbc(aes)",
576 .cra_driver_name
= "virtio_crypto_aes_cbc",
578 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
579 .cra_blocksize
= AES_BLOCK_SIZE
,
580 .cra_ctxsize
= sizeof(struct virtio_crypto_ablkcipher_ctx
),
582 .cra_module
= THIS_MODULE
,
583 .cra_type
= &crypto_ablkcipher_type
,
584 .cra_init
= virtio_crypto_ablkcipher_init
,
585 .cra_exit
= virtio_crypto_ablkcipher_exit
,
588 .setkey
= virtio_crypto_ablkcipher_setkey
,
589 .decrypt
= virtio_crypto_ablkcipher_decrypt
,
590 .encrypt
= virtio_crypto_ablkcipher_encrypt
,
591 .min_keysize
= AES_MIN_KEY_SIZE
,
592 .max_keysize
= AES_MAX_KEY_SIZE
,
593 .ivsize
= AES_BLOCK_SIZE
,
598 int virtio_crypto_algs_register(void)
602 mutex_lock(&algs_lock
);
603 if (++virtio_crypto_active_devs
!= 1)
606 ret
= crypto_register_algs(virtio_crypto_algs
,
607 ARRAY_SIZE(virtio_crypto_algs
));
609 virtio_crypto_active_devs
--;
612 mutex_unlock(&algs_lock
);
616 void virtio_crypto_algs_unregister(void)
618 mutex_lock(&algs_lock
);
619 if (--virtio_crypto_active_devs
!= 0)
622 crypto_unregister_algs(virtio_crypto_algs
,
623 ARRAY_SIZE(virtio_crypto_algs
));
626 mutex_unlock(&algs_lock
);