1 /* Algorithms supported by virtio crypto device
3 * Authors: Gonglei <arei.gonglei@huawei.com>
5 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include <linux/scatterlist.h>
22 #include <crypto/algapi.h>
23 #include <linux/err.h>
24 #include <crypto/scatterwalk.h>
25 #include <linux/atomic.h>
27 #include <uapi/linux/virtio_crypto.h>
28 #include "virtio_crypto_common.h"
31 * The algs_lock protects the below global virtio_crypto_active_devs
32 * and crypto algorithms registion.
34 static DEFINE_MUTEX(algs_lock
);
35 static unsigned int virtio_crypto_active_devs
;
37 static u64
virtio_crypto_alg_sg_nents_length(struct scatterlist
*sg
)
41 for (total
= 0; sg
; sg
= sg_next(sg
))
48 virtio_crypto_alg_validate_key(int key_len
, uint32_t *alg
)
54 *alg
= VIRTIO_CRYPTO_CIPHER_AES_CBC
;
57 pr_err("virtio_crypto: Unsupported key length: %d\n",
64 static int virtio_crypto_alg_ablkcipher_init_session(
65 struct virtio_crypto_ablkcipher_ctx
*ctx
,
66 uint32_t alg
, const uint8_t *key
,
70 struct scatterlist outhdr
, key_sg
, inhdr
, *sgs
[3];
72 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
73 int op
= encrypt
? VIRTIO_CRYPTO_OP_ENCRYPT
: VIRTIO_CRYPTO_OP_DECRYPT
;
75 unsigned int num_out
= 0, num_in
= 0;
78 * Avoid to do DMA from the stack, switch to using
79 * dynamically-allocated for the key
81 uint8_t *cipher_key
= kmalloc(keylen
, GFP_ATOMIC
);
86 memcpy(cipher_key
, key
, keylen
);
88 spin_lock(&vcrypto
->ctrl_lock
);
90 vcrypto
->ctrl
.header
.opcode
=
91 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION
);
92 vcrypto
->ctrl
.header
.algo
= cpu_to_le32(alg
);
93 /* Set the default dataqueue id to 0 */
94 vcrypto
->ctrl
.header
.queue_id
= 0;
96 vcrypto
->input
.status
= cpu_to_le32(VIRTIO_CRYPTO_ERR
);
97 /* Pad cipher's parameters */
98 vcrypto
->ctrl
.u
.sym_create_session
.op_type
=
99 cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER
);
100 vcrypto
->ctrl
.u
.sym_create_session
.u
.cipher
.para
.algo
=
101 vcrypto
->ctrl
.header
.algo
;
102 vcrypto
->ctrl
.u
.sym_create_session
.u
.cipher
.para
.keylen
=
104 vcrypto
->ctrl
.u
.sym_create_session
.u
.cipher
.para
.op
=
107 sg_init_one(&outhdr
, &vcrypto
->ctrl
, sizeof(vcrypto
->ctrl
));
108 sgs
[num_out
++] = &outhdr
;
111 sg_init_one(&key_sg
, cipher_key
, keylen
);
112 sgs
[num_out
++] = &key_sg
;
114 /* Return status and session id back */
115 sg_init_one(&inhdr
, &vcrypto
->input
, sizeof(vcrypto
->input
));
116 sgs
[num_out
+ num_in
++] = &inhdr
;
118 err
= virtqueue_add_sgs(vcrypto
->ctrl_vq
, sgs
, num_out
,
119 num_in
, vcrypto
, GFP_ATOMIC
);
121 spin_unlock(&vcrypto
->ctrl_lock
);
125 virtqueue_kick(vcrypto
->ctrl_vq
);
128 * Trapping into the hypervisor, so the request should be
129 * handled immediately.
131 while (!virtqueue_get_buf(vcrypto
->ctrl_vq
, &tmp
) &&
132 !virtqueue_is_broken(vcrypto
->ctrl_vq
))
135 if (le32_to_cpu(vcrypto
->input
.status
) != VIRTIO_CRYPTO_OK
) {
136 spin_unlock(&vcrypto
->ctrl_lock
);
137 pr_err("virtio_crypto: Create session failed status: %u\n",
138 le32_to_cpu(vcrypto
->input
.status
));
144 ctx
->enc_sess_info
.session_id
=
145 le64_to_cpu(vcrypto
->input
.session_id
);
147 ctx
->dec_sess_info
.session_id
=
148 le64_to_cpu(vcrypto
->input
.session_id
);
150 spin_unlock(&vcrypto
->ctrl_lock
);
156 static int virtio_crypto_alg_ablkcipher_close_session(
157 struct virtio_crypto_ablkcipher_ctx
*ctx
,
160 struct scatterlist outhdr
, status_sg
, *sgs
[2];
162 struct virtio_crypto_destroy_session_req
*destroy_session
;
163 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
165 unsigned int num_out
= 0, num_in
= 0;
167 spin_lock(&vcrypto
->ctrl_lock
);
168 vcrypto
->ctrl_status
.status
= VIRTIO_CRYPTO_ERR
;
169 /* Pad ctrl header */
170 vcrypto
->ctrl
.header
.opcode
=
171 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION
);
172 /* Set the default virtqueue id to 0 */
173 vcrypto
->ctrl
.header
.queue_id
= 0;
175 destroy_session
= &vcrypto
->ctrl
.u
.destroy_session
;
178 destroy_session
->session_id
=
179 cpu_to_le64(ctx
->enc_sess_info
.session_id
);
181 destroy_session
->session_id
=
182 cpu_to_le64(ctx
->dec_sess_info
.session_id
);
184 sg_init_one(&outhdr
, &vcrypto
->ctrl
, sizeof(vcrypto
->ctrl
));
185 sgs
[num_out
++] = &outhdr
;
187 /* Return status and session id back */
188 sg_init_one(&status_sg
, &vcrypto
->ctrl_status
.status
,
189 sizeof(vcrypto
->ctrl_status
.status
));
190 sgs
[num_out
+ num_in
++] = &status_sg
;
192 err
= virtqueue_add_sgs(vcrypto
->ctrl_vq
, sgs
, num_out
,
193 num_in
, vcrypto
, GFP_ATOMIC
);
195 spin_unlock(&vcrypto
->ctrl_lock
);
198 virtqueue_kick(vcrypto
->ctrl_vq
);
200 while (!virtqueue_get_buf(vcrypto
->ctrl_vq
, &tmp
) &&
201 !virtqueue_is_broken(vcrypto
->ctrl_vq
))
204 if (vcrypto
->ctrl_status
.status
!= VIRTIO_CRYPTO_OK
) {
205 spin_unlock(&vcrypto
->ctrl_lock
);
206 pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
207 vcrypto
->ctrl_status
.status
,
208 destroy_session
->session_id
);
212 spin_unlock(&vcrypto
->ctrl_lock
);
217 static int virtio_crypto_alg_ablkcipher_init_sessions(
218 struct virtio_crypto_ablkcipher_ctx
*ctx
,
219 const uint8_t *key
, unsigned int keylen
)
223 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
225 if (keylen
> vcrypto
->max_cipher_key_len
) {
226 pr_err("virtio_crypto: the key is too long\n");
230 if (virtio_crypto_alg_validate_key(keylen
, &alg
))
233 /* Create encryption session */
234 ret
= virtio_crypto_alg_ablkcipher_init_session(ctx
,
235 alg
, key
, keylen
, 1);
238 /* Create decryption session */
239 ret
= virtio_crypto_alg_ablkcipher_init_session(ctx
,
240 alg
, key
, keylen
, 0);
242 virtio_crypto_alg_ablkcipher_close_session(ctx
, 1);
248 crypto_tfm_set_flags(ctx
->tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
252 /* Note: kernel crypto API realization */
253 static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher
*tfm
,
257 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
262 int node
= virtio_crypto_get_current_node();
263 struct virtio_crypto
*vcrypto
=
264 virtcrypto_get_dev_node(node
);
266 pr_err("virtio_crypto: Could not find a virtio device in the system");
270 ctx
->vcrypto
= vcrypto
;
272 /* Rekeying, we should close the created sessions previously */
273 virtio_crypto_alg_ablkcipher_close_session(ctx
, 1);
274 virtio_crypto_alg_ablkcipher_close_session(ctx
, 0);
277 ret
= virtio_crypto_alg_ablkcipher_init_sessions(ctx
, key
, keylen
);
279 virtcrypto_dev_put(ctx
->vcrypto
);
289 __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request
*vc_req
,
290 struct ablkcipher_request
*req
,
291 struct data_queue
*data_vq
)
293 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
294 unsigned int ivsize
= crypto_ablkcipher_ivsize(tfm
);
295 struct virtio_crypto_ablkcipher_ctx
*ctx
= vc_req
->ablkcipher_ctx
;
296 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
297 struct virtio_crypto_op_data_req
*req_data
;
298 int src_nents
, dst_nents
;
301 struct scatterlist outhdr
, iv_sg
, status_sg
, **sgs
;
304 unsigned int num_out
= 0, num_in
= 0;
308 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
309 dst_nents
= sg_nents(req
->dst
);
311 pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
312 src_nents
, dst_nents
);
314 /* Why 3? outhdr + iv + inhdr */
315 sg_total
= src_nents
+ dst_nents
+ 3;
316 sgs
= kzalloc_node(sg_total
* sizeof(*sgs
), GFP_ATOMIC
,
317 dev_to_node(&vcrypto
->vdev
->dev
));
321 req_data
= kzalloc_node(sizeof(*req_data
), GFP_ATOMIC
,
322 dev_to_node(&vcrypto
->vdev
->dev
));
328 vc_req
->req_data
= req_data
;
329 vc_req
->type
= VIRTIO_CRYPTO_SYM_OP_CIPHER
;
330 /* Head of operation */
331 if (vc_req
->encrypt
) {
332 req_data
->header
.session_id
=
333 cpu_to_le64(ctx
->enc_sess_info
.session_id
);
334 req_data
->header
.opcode
=
335 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT
);
337 req_data
->header
.session_id
=
338 cpu_to_le64(ctx
->dec_sess_info
.session_id
);
339 req_data
->header
.opcode
=
340 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT
);
342 req_data
->u
.sym_req
.op_type
= cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER
);
343 req_data
->u
.sym_req
.u
.cipher
.para
.iv_len
= cpu_to_le32(ivsize
);
344 req_data
->u
.sym_req
.u
.cipher
.para
.src_data_len
=
345 cpu_to_le32(req
->nbytes
);
347 dst_len
= virtio_crypto_alg_sg_nents_length(req
->dst
);
348 if (unlikely(dst_len
> U32_MAX
)) {
349 pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
354 pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
355 req
->nbytes
, dst_len
);
357 if (unlikely(req
->nbytes
+ dst_len
+ ivsize
+
358 sizeof(vc_req
->status
) > vcrypto
->max_size
)) {
359 pr_err("virtio_crypto: The length is too big\n");
364 req_data
->u
.sym_req
.u
.cipher
.para
.dst_data_len
=
365 cpu_to_le32((uint32_t)dst_len
);
368 sg_init_one(&outhdr
, req_data
, sizeof(*req_data
));
369 sgs
[num_out
++] = &outhdr
;
374 * Avoid to do DMA from the stack, switch to using
375 * dynamically-allocated for the IV
377 iv
= kzalloc_node(ivsize
, GFP_ATOMIC
,
378 dev_to_node(&vcrypto
->vdev
->dev
));
383 memcpy(iv
, req
->info
, ivsize
);
384 sg_init_one(&iv_sg
, iv
, ivsize
);
385 sgs
[num_out
++] = &iv_sg
;
389 for (i
= 0; i
< src_nents
; i
++)
390 sgs
[num_out
++] = &req
->src
[i
];
392 /* Destination data */
393 for (i
= 0; i
< dst_nents
; i
++)
394 sgs
[num_out
+ num_in
++] = &req
->dst
[i
];
397 sg_init_one(&status_sg
, &vc_req
->status
, sizeof(vc_req
->status
));
398 sgs
[num_out
+ num_in
++] = &status_sg
;
402 spin_lock_irqsave(&data_vq
->lock
, flags
);
403 err
= virtqueue_add_sgs(data_vq
->vq
, sgs
, num_out
,
404 num_in
, vc_req
, GFP_ATOMIC
);
405 virtqueue_kick(data_vq
->vq
);
406 spin_unlock_irqrestore(&data_vq
->lock
, flags
);
407 if (unlikely(err
< 0))
420 static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request
*req
)
422 struct crypto_ablkcipher
*atfm
= crypto_ablkcipher_reqtfm(req
);
423 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_ablkcipher_ctx(atfm
);
424 struct virtio_crypto_request
*vc_req
= ablkcipher_request_ctx(req
);
425 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
426 /* Use the first data virtqueue as default */
427 struct data_queue
*data_vq
= &vcrypto
->data_vq
[0];
429 vc_req
->ablkcipher_ctx
= ctx
;
430 vc_req
->ablkcipher_req
= req
;
431 vc_req
->encrypt
= true;
432 vc_req
->dataq
= data_vq
;
434 return crypto_transfer_cipher_request_to_engine(data_vq
->engine
, req
);
437 static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request
*req
)
439 struct crypto_ablkcipher
*atfm
= crypto_ablkcipher_reqtfm(req
);
440 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_ablkcipher_ctx(atfm
);
441 struct virtio_crypto_request
*vc_req
= ablkcipher_request_ctx(req
);
442 struct virtio_crypto
*vcrypto
= ctx
->vcrypto
;
443 /* Use the first data virtqueue as default */
444 struct data_queue
*data_vq
= &vcrypto
->data_vq
[0];
446 vc_req
->ablkcipher_ctx
= ctx
;
447 vc_req
->ablkcipher_req
= req
;
449 vc_req
->encrypt
= false;
450 vc_req
->dataq
= data_vq
;
452 return crypto_transfer_cipher_request_to_engine(data_vq
->engine
, req
);
455 static int virtio_crypto_ablkcipher_init(struct crypto_tfm
*tfm
)
457 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
459 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct virtio_crypto_request
);
465 static void virtio_crypto_ablkcipher_exit(struct crypto_tfm
*tfm
)
467 struct virtio_crypto_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
472 virtio_crypto_alg_ablkcipher_close_session(ctx
, 1);
473 virtio_crypto_alg_ablkcipher_close_session(ctx
, 0);
474 virtcrypto_dev_put(ctx
->vcrypto
);
478 int virtio_crypto_ablkcipher_crypt_req(
479 struct crypto_engine
*engine
,
480 struct ablkcipher_request
*req
)
482 struct virtio_crypto_request
*vc_req
= ablkcipher_request_ctx(req
);
483 struct data_queue
*data_vq
= vc_req
->dataq
;
486 ret
= __virtio_crypto_ablkcipher_do_req(vc_req
, req
, data_vq
);
490 virtqueue_kick(data_vq
->vq
);
495 void virtio_crypto_ablkcipher_finalize_req(
496 struct virtio_crypto_request
*vc_req
,
497 struct ablkcipher_request
*req
,
500 crypto_finalize_cipher_request(vc_req
->dataq
->engine
, req
, err
);
502 virtcrypto_clear_request(vc_req
);
505 static struct crypto_alg virtio_crypto_algs
[] = { {
506 .cra_name
= "cbc(aes)",
507 .cra_driver_name
= "virtio_crypto_aes_cbc",
509 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
510 .cra_blocksize
= AES_BLOCK_SIZE
,
511 .cra_ctxsize
= sizeof(struct virtio_crypto_ablkcipher_ctx
),
513 .cra_module
= THIS_MODULE
,
514 .cra_type
= &crypto_ablkcipher_type
,
515 .cra_init
= virtio_crypto_ablkcipher_init
,
516 .cra_exit
= virtio_crypto_ablkcipher_exit
,
519 .setkey
= virtio_crypto_ablkcipher_setkey
,
520 .decrypt
= virtio_crypto_ablkcipher_decrypt
,
521 .encrypt
= virtio_crypto_ablkcipher_encrypt
,
522 .min_keysize
= AES_MIN_KEY_SIZE
,
523 .max_keysize
= AES_MAX_KEY_SIZE
,
524 .ivsize
= AES_BLOCK_SIZE
,
529 int virtio_crypto_algs_register(void)
533 mutex_lock(&algs_lock
);
534 if (++virtio_crypto_active_devs
!= 1)
537 ret
= crypto_register_algs(virtio_crypto_algs
,
538 ARRAY_SIZE(virtio_crypto_algs
));
540 virtio_crypto_active_devs
--;
543 mutex_unlock(&algs_lock
);
547 void virtio_crypto_algs_unregister(void)
549 mutex_lock(&algs_lock
);
550 if (--virtio_crypto_active_devs
!= 0)
553 crypto_unregister_algs(virtio_crypto_algs
,
554 ARRAY_SIZE(virtio_crypto_algs
));
557 mutex_unlock(&algs_lock
);