1 /* Algorithms supported by virtio crypto device
3 * Authors: Gonglei <arei.gonglei@huawei.com>
5 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include <linux/scatterlist.h>
22 #include <crypto/algapi.h>
23 #include <linux/err.h>
24 #include <crypto/scatterwalk.h>
25 #include <linux/atomic.h>
27 #include <uapi/linux/virtio_crypto.h>
28 #include "virtio_crypto_common.h"
31 * The algs_lock protects the below global virtio_crypto_active_devs
32 * and crypto algorithms registion.
34 static DEFINE_MUTEX(algs_lock
);
35 static unsigned int virtio_crypto_active_devs
;
37 static u64
virtio_crypto_alg_sg_nents_length(struct scatterlist
*sg
)
41 for (total
= 0; sg
; sg
= sg_next(sg
))
48 virtio_crypto_alg_validate_key(int key_len
, uint32_t *alg
)
54 *alg
= VIRTIO_CRYPTO_CIPHER_AES_CBC
;
57 pr_err("virtio_crypto: Unsupported key length: %d\n",
64 static int virtio_crypto_alg_ablkcipher_init_session(
65 struct virtio_crypto_ablkcipher_ctx
*ctx
,
66 uint32_t alg
, const uint8_t *key
,
70 struct scatterlist outhdr
, key_sg
, inhdr
, *sgs
[3];
72 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
73 int op
= encrypt
? VIRTIO_CRYPTO_OP_ENCRYPT
: VIRTIO_CRYPTO_OP_DECRYPT
;
75 unsigned int num_out
= 0, num_in
= 0;
78 * Avoid to do DMA from the stack, switch to using
79 * dynamically-allocated for the key
81 uint8_t *cipher_key
= kmalloc(keylen
, GFP_ATOMIC
);
86 memcpy(cipher_key
, key
, keylen
);
88 spin_lock(&vcrypto
->ctrl_lock
);
90 vcrypto
->ctrl
.header
.opcode
=
91 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION
);
92 vcrypto
->ctrl
.header
.algo
= cpu_to_le32(alg
);
93 /* Set the default dataqueue id to 0 */
94 vcrypto
->ctrl
.header
.queue_id
= 0;
96 vcrypto
->input
.status
= cpu_to_le32(VIRTIO_CRYPTO_ERR
);
97 /* Pad cipher's parameters */
98 vcrypto
->ctrl
.u
.sym_create_session
.op_type
=
99 cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER
);
100 vcrypto
->ctrl
.u
.sym_create_session
.u
.cipher
.para
.algo
=
101 vcrypto
->ctrl
.header
.algo
;
102 vcrypto
->ctrl
.u
.sym_create_session
.u
.cipher
.para
.keylen
=
104 vcrypto
->ctrl
.u
.sym_create_session
.u
.cipher
.para
.op
=
107 sg_init_one(&outhdr
, &vcrypto
->ctrl
, sizeof(vcrypto
->ctrl
));
108 sgs
[num_out
++] = &outhdr
;
111 sg_init_one(&key_sg
, cipher_key
, keylen
);
112 sgs
[num_out
++] = &key_sg
;
114 /* Return status and session id back */
115 sg_init_one(&inhdr
, &vcrypto
->input
, sizeof(vcrypto
->input
));
116 sgs
[num_out
+ num_in
++] = &inhdr
;
118 err
= virtqueue_add_sgs(vcrypto
->ctrl_vq
, sgs
, num_out
,
119 num_in
, vcrypto
, GFP_ATOMIC
);
121 spin_unlock(&vcrypto
->ctrl_lock
);
125 virtqueue_kick(vcrypto
->ctrl_vq
);
128 * Trapping into the hypervisor, so the request should be
129 * handled immediately.
131 while (!virtqueue_get_buf(vcrypto
->ctrl_vq
, &tmp
) &&
132 !virtqueue_is_broken(vcrypto
->ctrl_vq
))
135 if (le32_to_cpu(vcrypto
->input
.status
) != VIRTIO_CRYPTO_OK
) {
136 spin_unlock(&vcrypto
->ctrl_lock
);
137 pr_err("virtio_crypto: Create session failed status: %u\n",
138 le32_to_cpu(vcrypto
->input
.status
));
144 ctx
->enc_sess_info
.session_id
=
145 le64_to_cpu(vcrypto
->input
.session_id
);
147 ctx
->dec_sess_info
.session_id
=
148 le64_to_cpu(vcrypto
->input
.session_id
);
150 spin_unlock(&vcrypto
->ctrl_lock
);
156 static int virtio_crypto_alg_ablkcipher_close_session(
157 struct virtio_crypto_ablkcipher_ctx
*ctx
,
160 struct scatterlist outhdr
, status_sg
, *sgs
[2];
162 struct virtio_crypto_destroy_session_req
*destroy_session
;
163 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
165 unsigned int num_out
= 0, num_in
= 0;
167 spin_lock(&vcrypto
->ctrl_lock
);
168 vcrypto
->ctrl_status
.status
= VIRTIO_CRYPTO_ERR
;
169 /* Pad ctrl header */
170 vcrypto
->ctrl
.header
.opcode
=
171 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION
);
172 /* Set the default virtqueue id to 0 */
173 vcrypto
->ctrl
.header
.queue_id
= 0;
175 destroy_session
= &vcrypto
->ctrl
.u
.destroy_session
;
178 destroy_session
->session_id
=
179 cpu_to_le64(ctx
->enc_sess_info
.session_id
);
181 destroy_session
->session_id
=
182 cpu_to_le64(ctx
->dec_sess_info
.session_id
);
184 sg_init_one(&outhdr
, &vcrypto
->ctrl
, sizeof(vcrypto
->ctrl
));
185 sgs
[num_out
++] = &outhdr
;
187 /* Return status and session id back */
188 sg_init_one(&status_sg
, &vcrypto
->ctrl_status
.status
,
189 sizeof(vcrypto
->ctrl_status
.status
));
190 sgs
[num_out
+ num_in
++] = &status_sg
;
192 err
= virtqueue_add_sgs(vcrypto
->ctrl_vq
, sgs
, num_out
,
193 num_in
, vcrypto
, GFP_ATOMIC
);
195 spin_unlock(&vcrypto
->ctrl_lock
);
198 virtqueue_kick(vcrypto
->ctrl_vq
);
200 while (!virtqueue_get_buf(vcrypto
->ctrl_vq
, &tmp
) &&
201 !virtqueue_is_broken(vcrypto
->ctrl_vq
))
204 if (vcrypto
->ctrl_status
.status
!= VIRTIO_CRYPTO_OK
) {
205 spin_unlock(&vcrypto
->ctrl_lock
);
206 pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
207 vcrypto
->ctrl_status
.status
,
208 destroy_session
->session_id
);
212 spin_unlock(&vcrypto
->ctrl_lock
);
217 static int virtio_crypto_alg_ablkcipher_init_sessions(
218 struct virtio_crypto_ablkcipher_ctx
*ctx
,
219 const uint8_t *key
, unsigned int keylen
)
223 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
225 if (keylen
> vcrypto
->max_cipher_key_len
) {
226 pr_err("virtio_crypto: the key is too long\n");
230 if (virtio_crypto_alg_validate_key(keylen
, &alg
))
233 /* Create encryption session */
234 ret
= virtio_crypto_alg_ablkcipher_init_session(ctx
,
235 alg
, key
, keylen
, 1);
238 /* Create decryption session */
239 ret
= virtio_crypto_alg_ablkcipher_init_session(ctx
,
240 alg
, key
, keylen
, 0);
242 virtio_crypto_alg_ablkcipher_close_session(ctx
, 1);
248 crypto_tfm_set_flags(ctx
->tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
252 /* Note: kernel crypto API realization */
253 static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher
*tfm
,
257 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
262 int node
= virtio_crypto_get_current_node();
263 struct virtio_crypto
*vcrypto
=
264 virtcrypto_get_dev_node(node
);
266 pr_err("virtio_crypto: Could not find a virtio device in the system");
270 ctx
->vcrypto
= vcrypto
;
272 /* Rekeying, we should close the created sessions previously */
273 virtio_crypto_alg_ablkcipher_close_session(ctx
, 1);
274 virtio_crypto_alg_ablkcipher_close_session(ctx
, 0);
277 ret
= virtio_crypto_alg_ablkcipher_init_sessions(ctx
, key
, keylen
);
279 virtcrypto_dev_put(ctx
->vcrypto
);
289 __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request
*vc_req
,
290 struct ablkcipher_request
*req
,
291 struct data_queue
*data_vq
,
294 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
295 unsigned int ivsize
= crypto_ablkcipher_ivsize(tfm
);
296 struct virtio_crypto_ablkcipher_ctx
*ctx
= vc_req
->ablkcipher_ctx
;
297 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
298 struct virtio_crypto_op_data_req
*req_data
;
299 int src_nents
, dst_nents
;
302 struct scatterlist outhdr
, iv_sg
, status_sg
, **sgs
;
305 unsigned int num_out
= 0, num_in
= 0;
309 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
310 dst_nents
= sg_nents(req
->dst
);
312 pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
313 src_nents
, dst_nents
);
315 /* Why 3? outhdr + iv + inhdr */
316 sg_total
= src_nents
+ dst_nents
+ 3;
317 sgs
= kzalloc_node(sg_total
* sizeof(*sgs
), GFP_ATOMIC
,
318 dev_to_node(&vcrypto
->vdev
->dev
));
322 req_data
= kzalloc_node(sizeof(*req_data
), GFP_ATOMIC
,
323 dev_to_node(&vcrypto
->vdev
->dev
));
329 vc_req
->req_data
= req_data
;
330 vc_req
->type
= VIRTIO_CRYPTO_SYM_OP_CIPHER
;
331 /* Head of operation */
333 req_data
->header
.session_id
=
334 cpu_to_le64(ctx
->enc_sess_info
.session_id
);
335 req_data
->header
.opcode
=
336 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT
);
338 req_data
->header
.session_id
=
339 cpu_to_le64(ctx
->dec_sess_info
.session_id
);
340 req_data
->header
.opcode
=
341 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT
);
343 req_data
->u
.sym_req
.op_type
= cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER
);
344 req_data
->u
.sym_req
.u
.cipher
.para
.iv_len
= cpu_to_le32(ivsize
);
345 req_data
->u
.sym_req
.u
.cipher
.para
.src_data_len
=
346 cpu_to_le32(req
->nbytes
);
348 dst_len
= virtio_crypto_alg_sg_nents_length(req
->dst
);
349 if (unlikely(dst_len
> U32_MAX
)) {
350 pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
355 pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
356 req
->nbytes
, dst_len
);
358 if (unlikely(req
->nbytes
+ dst_len
+ ivsize
+
359 sizeof(vc_req
->status
) > vcrypto
->max_size
)) {
360 pr_err("virtio_crypto: The length is too big\n");
365 req_data
->u
.sym_req
.u
.cipher
.para
.dst_data_len
=
366 cpu_to_le32((uint32_t)dst_len
);
369 sg_init_one(&outhdr
, req_data
, sizeof(*req_data
));
370 sgs
[num_out
++] = &outhdr
;
375 * Avoid to do DMA from the stack, switch to using
376 * dynamically-allocated for the IV
378 iv
= kzalloc_node(ivsize
, GFP_ATOMIC
,
379 dev_to_node(&vcrypto
->vdev
->dev
));
384 memcpy(iv
, req
->info
, ivsize
);
385 sg_init_one(&iv_sg
, iv
, ivsize
);
386 sgs
[num_out
++] = &iv_sg
;
390 for (i
= 0; i
< src_nents
; i
++)
391 sgs
[num_out
++] = &req
->src
[i
];
393 /* Destination data */
394 for (i
= 0; i
< dst_nents
; i
++)
395 sgs
[num_out
+ num_in
++] = &req
->dst
[i
];
398 sg_init_one(&status_sg
, &vc_req
->status
, sizeof(vc_req
->status
));
399 sgs
[num_out
+ num_in
++] = &status_sg
;
403 spin_lock_irqsave(&data_vq
->lock
, flags
);
404 err
= virtqueue_add_sgs(data_vq
->vq
, sgs
, num_out
,
405 num_in
, vc_req
, GFP_ATOMIC
);
406 virtqueue_kick(data_vq
->vq
);
407 spin_unlock_irqrestore(&data_vq
->lock
, flags
);
408 if (unlikely(err
< 0))
421 static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request
*req
)
423 struct crypto_ablkcipher
*atfm
= crypto_ablkcipher_reqtfm(req
);
424 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_ablkcipher_ctx(atfm
);
425 struct virtio_crypto_request
*vc_req
= ablkcipher_request_ctx(req
);
426 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
428 /* Use the first data virtqueue as default */
429 struct data_queue
*data_vq
= &vcrypto
->data_vq
[0];
431 vc_req
->ablkcipher_ctx
= ctx
;
432 vc_req
->ablkcipher_req
= req
;
433 ret
= __virtio_crypto_ablkcipher_do_req(vc_req
, req
, data_vq
, 1);
435 pr_err("virtio_crypto: Encryption failed!\n");
442 static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request
*req
)
444 struct crypto_ablkcipher
*atfm
= crypto_ablkcipher_reqtfm(req
);
445 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_ablkcipher_ctx(atfm
);
446 struct virtio_crypto_request
*vc_req
= ablkcipher_request_ctx(req
);
447 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
449 /* Use the first data virtqueue as default */
450 struct data_queue
*data_vq
= &vcrypto
->data_vq
[0];
452 vc_req
->ablkcipher_ctx
= ctx
;
453 vc_req
->ablkcipher_req
= req
;
455 ret
= __virtio_crypto_ablkcipher_do_req(vc_req
, req
, data_vq
, 0);
457 pr_err("virtio_crypto: Decryption failed!\n");
464 static int virtio_crypto_ablkcipher_init(struct crypto_tfm
*tfm
)
466 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
468 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct virtio_crypto_request
);
474 static void virtio_crypto_ablkcipher_exit(struct crypto_tfm
*tfm
)
476 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
481 virtio_crypto_alg_ablkcipher_close_session(ctx
, 1);
482 virtio_crypto_alg_ablkcipher_close_session(ctx
, 0);
483 virtcrypto_dev_put(ctx
->vcrypto
);
487 static struct crypto_alg virtio_crypto_algs
[] = { {
488 .cra_name
= "cbc(aes)",
489 .cra_driver_name
= "virtio_crypto_aes_cbc",
491 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
492 .cra_blocksize
= AES_BLOCK_SIZE
,
493 .cra_ctxsize
= sizeof(struct virtio_crypto_ablkcipher_ctx
),
495 .cra_module
= THIS_MODULE
,
496 .cra_type
= &crypto_ablkcipher_type
,
497 .cra_init
= virtio_crypto_ablkcipher_init
,
498 .cra_exit
= virtio_crypto_ablkcipher_exit
,
501 .setkey
= virtio_crypto_ablkcipher_setkey
,
502 .decrypt
= virtio_crypto_ablkcipher_decrypt
,
503 .encrypt
= virtio_crypto_ablkcipher_encrypt
,
504 .min_keysize
= AES_MIN_KEY_SIZE
,
505 .max_keysize
= AES_MAX_KEY_SIZE
,
506 .ivsize
= AES_BLOCK_SIZE
,
511 int virtio_crypto_algs_register(void)
515 mutex_lock(&algs_lock
);
516 if (++virtio_crypto_active_devs
!= 1)
519 ret
= crypto_register_algs(virtio_crypto_algs
,
520 ARRAY_SIZE(virtio_crypto_algs
));
522 virtio_crypto_active_devs
--;
525 mutex_unlock(&algs_lock
);
529 void virtio_crypto_algs_unregister(void)
531 mutex_lock(&algs_lock
);
532 if (--virtio_crypto_active_devs
!= 0)
535 crypto_unregister_algs(virtio_crypto_algs
,
536 ARRAY_SIZE(virtio_crypto_algs
));
539 mutex_unlock(&algs_lock
);