1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
6 #include <linux/device.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/interrupt.h>
9 #include <linux/moduleparam.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <crypto/aes.h>
13 #include <crypto/internal/des.h>
14 #include <crypto/internal/skcipher.h>
18 static unsigned int aes_sw_max_len
= CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN
;
19 module_param(aes_sw_max_len
, uint
, 0644);
20 MODULE_PARM_DESC(aes_sw_max_len
,
21 "Only use hardware for AES requests larger than this "
22 "[0=always use hardware; anything <16 breaks AES-GCM; default="
23 __stringify(CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN
)"]");
25 static LIST_HEAD(skcipher_algs
);
27 static void qce_skcipher_done(void *data
)
29 struct crypto_async_request
*async_req
= data
;
30 struct skcipher_request
*req
= skcipher_request_cast(async_req
);
31 struct qce_cipher_reqctx
*rctx
= skcipher_request_ctx(req
);
32 struct qce_alg_template
*tmpl
= to_cipher_tmpl(crypto_skcipher_reqtfm(req
));
33 struct qce_device
*qce
= tmpl
->qce
;
34 struct qce_result_dump
*result_buf
= qce
->dma
.result_buf
;
35 enum dma_data_direction dir_src
, dir_dst
;
40 diff_dst
= (req
->src
!= req
->dst
) ? true : false;
41 dir_src
= diff_dst
? DMA_TO_DEVICE
: DMA_BIDIRECTIONAL
;
42 dir_dst
= diff_dst
? DMA_FROM_DEVICE
: DMA_BIDIRECTIONAL
;
44 error
= qce_dma_terminate_all(&qce
->dma
);
46 dev_dbg(qce
->dev
, "skcipher dma termination error (%d)\n",
50 dma_unmap_sg(qce
->dev
, rctx
->src_sg
, rctx
->src_nents
, dir_src
);
51 dma_unmap_sg(qce
->dev
, rctx
->dst_sg
, rctx
->dst_nents
, dir_dst
);
53 sg_free_table(&rctx
->dst_tbl
);
55 error
= qce_check_status(qce
, &status
);
57 dev_dbg(qce
->dev
, "skcipher operation error (%x)\n", status
);
59 memcpy(rctx
->iv
, result_buf
->encr_cntr_iv
, rctx
->ivsize
);
60 qce
->async_req_done(tmpl
->qce
, error
);
64 qce_skcipher_async_req_handle(struct crypto_async_request
*async_req
)
66 struct skcipher_request
*req
= skcipher_request_cast(async_req
);
67 struct qce_cipher_reqctx
*rctx
= skcipher_request_ctx(req
);
68 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
69 struct qce_alg_template
*tmpl
= to_cipher_tmpl(crypto_skcipher_reqtfm(req
));
70 struct qce_device
*qce
= tmpl
->qce
;
71 enum dma_data_direction dir_src
, dir_dst
;
72 struct scatterlist
*sg
;
75 int dst_nents
, src_nents
, ret
;
78 rctx
->ivsize
= crypto_skcipher_ivsize(skcipher
);
79 rctx
->cryptlen
= req
->cryptlen
;
81 diff_dst
= (req
->src
!= req
->dst
) ? true : false;
82 dir_src
= diff_dst
? DMA_TO_DEVICE
: DMA_BIDIRECTIONAL
;
83 dir_dst
= diff_dst
? DMA_FROM_DEVICE
: DMA_BIDIRECTIONAL
;
85 rctx
->src_nents
= sg_nents_for_len(req
->src
, req
->cryptlen
);
87 rctx
->dst_nents
= sg_nents_for_len(req
->dst
, req
->cryptlen
);
89 rctx
->dst_nents
= rctx
->src_nents
;
90 if (rctx
->src_nents
< 0) {
91 dev_err(qce
->dev
, "Invalid numbers of src SG.\n");
92 return rctx
->src_nents
;
94 if (rctx
->dst_nents
< 0) {
95 dev_err(qce
->dev
, "Invalid numbers of dst SG.\n");
96 return -rctx
->dst_nents
;
101 gfp
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
102 GFP_KERNEL
: GFP_ATOMIC
;
104 ret
= sg_alloc_table(&rctx
->dst_tbl
, rctx
->dst_nents
, gfp
);
108 sg_init_one(&rctx
->result_sg
, qce
->dma
.result_buf
, QCE_RESULT_BUF_SZ
);
110 sg
= qce_sgtable_add(&rctx
->dst_tbl
, req
->dst
, req
->cryptlen
);
116 sg
= qce_sgtable_add(&rctx
->dst_tbl
, &rctx
->result_sg
,
124 rctx
->dst_sg
= rctx
->dst_tbl
.sgl
;
126 dst_nents
= dma_map_sg(qce
->dev
, rctx
->dst_sg
, rctx
->dst_nents
, dir_dst
);
133 src_nents
= dma_map_sg(qce
->dev
, req
->src
, rctx
->src_nents
, dir_src
);
136 goto error_unmap_dst
;
138 rctx
->src_sg
= req
->src
;
140 rctx
->src_sg
= rctx
->dst_sg
;
141 src_nents
= dst_nents
- 1;
144 ret
= qce_dma_prep_sgs(&qce
->dma
, rctx
->src_sg
, src_nents
,
145 rctx
->dst_sg
, dst_nents
,
146 qce_skcipher_done
, async_req
);
148 goto error_unmap_src
;
150 qce_dma_issue_pending(&qce
->dma
);
152 ret
= qce_start(async_req
, tmpl
->crypto_alg_type
);
154 goto error_terminate
;
159 qce_dma_terminate_all(&qce
->dma
);
162 dma_unmap_sg(qce
->dev
, req
->src
, rctx
->src_nents
, dir_src
);
164 dma_unmap_sg(qce
->dev
, rctx
->dst_sg
, rctx
->dst_nents
, dir_dst
);
166 sg_free_table(&rctx
->dst_tbl
);
170 static int qce_skcipher_setkey(struct crypto_skcipher
*ablk
, const u8
*key
,
173 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(ablk
);
174 struct qce_cipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
175 unsigned long flags
= to_cipher_tmpl(ablk
)->alg_flags
;
176 unsigned int __keylen
;
183 * AES XTS key1 = key2 not supported by crypto engine.
184 * Revisit to request a fallback cipher in this case.
187 __keylen
= keylen
>> 1;
188 if (!memcmp(key
, key
+ __keylen
, __keylen
))
195 case AES_KEYSIZE_128
:
196 case AES_KEYSIZE_256
:
197 memcpy(ctx
->enc_key
, key
, keylen
);
199 case AES_KEYSIZE_192
:
205 ret
= crypto_skcipher_setkey(ctx
->fallback
, key
, keylen
);
207 ctx
->enc_keylen
= keylen
;
211 static int qce_des_setkey(struct crypto_skcipher
*ablk
, const u8
*key
,
214 struct qce_cipher_ctx
*ctx
= crypto_skcipher_ctx(ablk
);
217 err
= verify_skcipher_des_key(ablk
, key
);
221 ctx
->enc_keylen
= keylen
;
222 memcpy(ctx
->enc_key
, key
, keylen
);
226 static int qce_des3_setkey(struct crypto_skcipher
*ablk
, const u8
*key
,
229 struct qce_cipher_ctx
*ctx
= crypto_skcipher_ctx(ablk
);
233 err
= verify_skcipher_des3_key(ablk
, key
);
238 * The crypto engine does not support any two keys
239 * being the same for triple des algorithms. The
240 * verify_skcipher_des3_key does not check for all the
241 * below conditions. Return -ENOKEY in case any two keys
242 * are the same. Revisit to see if a fallback cipher
243 * is needed to handle this condition.
245 memcpy(_key
, key
, DES3_EDE_KEY_SIZE
);
246 if (!((_key
[0] ^ _key
[2]) | (_key
[1] ^ _key
[3])) ||
247 !((_key
[2] ^ _key
[4]) | (_key
[3] ^ _key
[5])) ||
248 !((_key
[0] ^ _key
[4]) | (_key
[1] ^ _key
[5])))
251 ctx
->enc_keylen
= keylen
;
252 memcpy(ctx
->enc_key
, key
, keylen
);
256 static int qce_skcipher_crypt(struct skcipher_request
*req
, int encrypt
)
258 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
259 struct qce_cipher_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
260 struct qce_cipher_reqctx
*rctx
= skcipher_request_ctx(req
);
261 struct qce_alg_template
*tmpl
= to_cipher_tmpl(tfm
);
262 unsigned int blocksize
= crypto_skcipher_blocksize(tfm
);
266 rctx
->flags
= tmpl
->alg_flags
;
267 rctx
->flags
|= encrypt
? QCE_ENCRYPT
: QCE_DECRYPT
;
268 keylen
= IS_XTS(rctx
->flags
) ? ctx
->enc_keylen
>> 1 : ctx
->enc_keylen
;
270 /* CE does not handle 0 length messages */
275 * ECB and CBC algorithms require message lengths to be
276 * multiples of block size.
278 if (IS_ECB(rctx
->flags
) || IS_CBC(rctx
->flags
))
279 if (!IS_ALIGNED(req
->cryptlen
, blocksize
))
283 * Conditions for requesting a fallback cipher
284 * AES-192 (not supported by crypto engine (CE))
285 * AES-XTS request with len <= 512 byte (not recommended to use CE)
286 * AES-XTS request with len > QCE_SECTOR_SIZE and
287 * is not a multiple of it.(Revisit this condition to check if it is
288 * needed in all versions of CE)
290 if (IS_AES(rctx
->flags
) &&
291 ((keylen
!= AES_KEYSIZE_128
&& keylen
!= AES_KEYSIZE_256
) ||
292 (IS_XTS(rctx
->flags
) && ((req
->cryptlen
<= aes_sw_max_len
) ||
293 (req
->cryptlen
> QCE_SECTOR_SIZE
&&
294 req
->cryptlen
% QCE_SECTOR_SIZE
))))) {
295 skcipher_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
296 skcipher_request_set_callback(&rctx
->fallback_req
,
300 skcipher_request_set_crypt(&rctx
->fallback_req
, req
->src
,
301 req
->dst
, req
->cryptlen
, req
->iv
);
302 ret
= encrypt
? crypto_skcipher_encrypt(&rctx
->fallback_req
) :
303 crypto_skcipher_decrypt(&rctx
->fallback_req
);
307 return tmpl
->qce
->async_req_enqueue(tmpl
->qce
, &req
->base
);
310 static int qce_skcipher_encrypt(struct skcipher_request
*req
)
312 return qce_skcipher_crypt(req
, 1);
315 static int qce_skcipher_decrypt(struct skcipher_request
*req
)
317 return qce_skcipher_crypt(req
, 0);
320 static int qce_skcipher_init(struct crypto_skcipher
*tfm
)
322 /* take the size without the fallback skcipher_request at the end */
323 crypto_skcipher_set_reqsize(tfm
, offsetof(struct qce_cipher_reqctx
,
328 static int qce_skcipher_init_fallback(struct crypto_skcipher
*tfm
)
330 struct qce_cipher_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
332 ctx
->fallback
= crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm
->base
),
333 0, CRYPTO_ALG_NEED_FALLBACK
);
334 if (IS_ERR(ctx
->fallback
))
335 return PTR_ERR(ctx
->fallback
);
337 crypto_skcipher_set_reqsize(tfm
, sizeof(struct qce_cipher_reqctx
) +
338 crypto_skcipher_reqsize(ctx
->fallback
));
342 static void qce_skcipher_exit(struct crypto_skcipher
*tfm
)
344 struct qce_cipher_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
346 crypto_free_skcipher(ctx
->fallback
);
349 struct qce_skcipher_def
{
352 const char *drv_name
;
353 unsigned int blocksize
;
354 unsigned int chunksize
;
356 unsigned int min_keysize
;
357 unsigned int max_keysize
;
360 static const struct qce_skcipher_def skcipher_def
[] = {
362 .flags
= QCE_ALG_AES
| QCE_MODE_ECB
,
364 .drv_name
= "ecb-aes-qce",
365 .blocksize
= AES_BLOCK_SIZE
,
367 .min_keysize
= AES_MIN_KEY_SIZE
,
368 .max_keysize
= AES_MAX_KEY_SIZE
,
371 .flags
= QCE_ALG_AES
| QCE_MODE_CBC
,
373 .drv_name
= "cbc-aes-qce",
374 .blocksize
= AES_BLOCK_SIZE
,
375 .ivsize
= AES_BLOCK_SIZE
,
376 .min_keysize
= AES_MIN_KEY_SIZE
,
377 .max_keysize
= AES_MAX_KEY_SIZE
,
380 .flags
= QCE_ALG_AES
| QCE_MODE_CTR
,
382 .drv_name
= "ctr-aes-qce",
384 .chunksize
= AES_BLOCK_SIZE
,
385 .ivsize
= AES_BLOCK_SIZE
,
386 .min_keysize
= AES_MIN_KEY_SIZE
,
387 .max_keysize
= AES_MAX_KEY_SIZE
,
390 .flags
= QCE_ALG_AES
| QCE_MODE_XTS
,
392 .drv_name
= "xts-aes-qce",
393 .blocksize
= AES_BLOCK_SIZE
,
394 .ivsize
= AES_BLOCK_SIZE
,
395 .min_keysize
= AES_MIN_KEY_SIZE
* 2,
396 .max_keysize
= AES_MAX_KEY_SIZE
* 2,
399 .flags
= QCE_ALG_DES
| QCE_MODE_ECB
,
401 .drv_name
= "ecb-des-qce",
402 .blocksize
= DES_BLOCK_SIZE
,
404 .min_keysize
= DES_KEY_SIZE
,
405 .max_keysize
= DES_KEY_SIZE
,
408 .flags
= QCE_ALG_DES
| QCE_MODE_CBC
,
410 .drv_name
= "cbc-des-qce",
411 .blocksize
= DES_BLOCK_SIZE
,
412 .ivsize
= DES_BLOCK_SIZE
,
413 .min_keysize
= DES_KEY_SIZE
,
414 .max_keysize
= DES_KEY_SIZE
,
417 .flags
= QCE_ALG_3DES
| QCE_MODE_ECB
,
418 .name
= "ecb(des3_ede)",
419 .drv_name
= "ecb-3des-qce",
420 .blocksize
= DES3_EDE_BLOCK_SIZE
,
422 .min_keysize
= DES3_EDE_KEY_SIZE
,
423 .max_keysize
= DES3_EDE_KEY_SIZE
,
426 .flags
= QCE_ALG_3DES
| QCE_MODE_CBC
,
427 .name
= "cbc(des3_ede)",
428 .drv_name
= "cbc-3des-qce",
429 .blocksize
= DES3_EDE_BLOCK_SIZE
,
430 .ivsize
= DES3_EDE_BLOCK_SIZE
,
431 .min_keysize
= DES3_EDE_KEY_SIZE
,
432 .max_keysize
= DES3_EDE_KEY_SIZE
,
436 static int qce_skcipher_register_one(const struct qce_skcipher_def
*def
,
437 struct qce_device
*qce
)
439 struct qce_alg_template
*tmpl
;
440 struct skcipher_alg
*alg
;
443 tmpl
= kzalloc(sizeof(*tmpl
), GFP_KERNEL
);
447 alg
= &tmpl
->alg
.skcipher
;
449 snprintf(alg
->base
.cra_name
, CRYPTO_MAX_ALG_NAME
, "%s", def
->name
);
450 snprintf(alg
->base
.cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
453 alg
->base
.cra_blocksize
= def
->blocksize
;
454 alg
->chunksize
= def
->chunksize
;
455 alg
->ivsize
= def
->ivsize
;
456 alg
->min_keysize
= def
->min_keysize
;
457 alg
->max_keysize
= def
->max_keysize
;
458 alg
->setkey
= IS_3DES(def
->flags
) ? qce_des3_setkey
:
459 IS_DES(def
->flags
) ? qce_des_setkey
:
461 alg
->encrypt
= qce_skcipher_encrypt
;
462 alg
->decrypt
= qce_skcipher_decrypt
;
464 alg
->base
.cra_priority
= 300;
465 alg
->base
.cra_flags
= CRYPTO_ALG_ASYNC
|
466 CRYPTO_ALG_ALLOCATES_MEMORY
|
467 CRYPTO_ALG_KERN_DRIVER_ONLY
;
468 alg
->base
.cra_ctxsize
= sizeof(struct qce_cipher_ctx
);
469 alg
->base
.cra_alignmask
= 0;
470 alg
->base
.cra_module
= THIS_MODULE
;
472 if (IS_AES(def
->flags
)) {
473 alg
->base
.cra_flags
|= CRYPTO_ALG_NEED_FALLBACK
;
474 alg
->init
= qce_skcipher_init_fallback
;
475 alg
->exit
= qce_skcipher_exit
;
477 alg
->init
= qce_skcipher_init
;
480 INIT_LIST_HEAD(&tmpl
->entry
);
481 tmpl
->crypto_alg_type
= CRYPTO_ALG_TYPE_SKCIPHER
;
482 tmpl
->alg_flags
= def
->flags
;
485 ret
= crypto_register_skcipher(alg
);
487 dev_err(qce
->dev
, "%s registration failed\n", alg
->base
.cra_name
);
492 list_add_tail(&tmpl
->entry
, &skcipher_algs
);
493 dev_dbg(qce
->dev
, "%s is registered\n", alg
->base
.cra_name
);
497 static void qce_skcipher_unregister(struct qce_device
*qce
)
499 struct qce_alg_template
*tmpl
, *n
;
501 list_for_each_entry_safe(tmpl
, n
, &skcipher_algs
, entry
) {
502 crypto_unregister_skcipher(&tmpl
->alg
.skcipher
);
503 list_del(&tmpl
->entry
);
508 static int qce_skcipher_register(struct qce_device
*qce
)
512 for (i
= 0; i
< ARRAY_SIZE(skcipher_def
); i
++) {
513 ret
= qce_skcipher_register_one(&skcipher_def
[i
], qce
);
520 qce_skcipher_unregister(qce
);
524 const struct qce_algo_ops skcipher_ops
= {
525 .type
= CRYPTO_ALG_TYPE_SKCIPHER
,
526 .register_algs
= qce_skcipher_register
,
527 .unregister_algs
= qce_skcipher_unregister
,
528 .async_req_handle
= qce_skcipher_async_req_handle
,