1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
6 #include <linux/device.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/interrupt.h>
9 #include <linux/moduleparam.h>
10 #include <linux/types.h>
11 #include <crypto/aes.h>
12 #include <crypto/internal/des.h>
13 #include <crypto/internal/skcipher.h>
17 static unsigned int aes_sw_max_len
= CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN
;
18 module_param(aes_sw_max_len
, uint
, 0644);
19 MODULE_PARM_DESC(aes_sw_max_len
,
20 "Only use hardware for AES requests larger than this "
21 "[0=always use hardware; anything <16 breaks AES-GCM; default="
22 __stringify(CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN
)"]");
24 static LIST_HEAD(skcipher_algs
);
26 static void qce_skcipher_done(void *data
)
28 struct crypto_async_request
*async_req
= data
;
29 struct skcipher_request
*req
= skcipher_request_cast(async_req
);
30 struct qce_cipher_reqctx
*rctx
= skcipher_request_ctx(req
);
31 struct qce_alg_template
*tmpl
= to_cipher_tmpl(crypto_skcipher_reqtfm(req
));
32 struct qce_device
*qce
= tmpl
->qce
;
33 struct qce_result_dump
*result_buf
= qce
->dma
.result_buf
;
34 enum dma_data_direction dir_src
, dir_dst
;
39 diff_dst
= (req
->src
!= req
->dst
) ? true : false;
40 dir_src
= diff_dst
? DMA_TO_DEVICE
: DMA_BIDIRECTIONAL
;
41 dir_dst
= diff_dst
? DMA_FROM_DEVICE
: DMA_BIDIRECTIONAL
;
43 error
= qce_dma_terminate_all(&qce
->dma
);
45 dev_dbg(qce
->dev
, "skcipher dma termination error (%d)\n",
49 dma_unmap_sg(qce
->dev
, rctx
->src_sg
, rctx
->src_nents
, dir_src
);
50 dma_unmap_sg(qce
->dev
, rctx
->dst_sg
, rctx
->dst_nents
, dir_dst
);
52 sg_free_table(&rctx
->dst_tbl
);
54 error
= qce_check_status(qce
, &status
);
56 dev_dbg(qce
->dev
, "skcipher operation error (%x)\n", status
);
58 memcpy(rctx
->iv
, result_buf
->encr_cntr_iv
, rctx
->ivsize
);
59 qce
->async_req_done(tmpl
->qce
, error
);
63 qce_skcipher_async_req_handle(struct crypto_async_request
*async_req
)
65 struct skcipher_request
*req
= skcipher_request_cast(async_req
);
66 struct qce_cipher_reqctx
*rctx
= skcipher_request_ctx(req
);
67 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
68 struct qce_alg_template
*tmpl
= to_cipher_tmpl(crypto_skcipher_reqtfm(req
));
69 struct qce_device
*qce
= tmpl
->qce
;
70 enum dma_data_direction dir_src
, dir_dst
;
71 struct scatterlist
*sg
;
77 rctx
->ivsize
= crypto_skcipher_ivsize(skcipher
);
78 rctx
->cryptlen
= req
->cryptlen
;
80 diff_dst
= (req
->src
!= req
->dst
) ? true : false;
81 dir_src
= diff_dst
? DMA_TO_DEVICE
: DMA_BIDIRECTIONAL
;
82 dir_dst
= diff_dst
? DMA_FROM_DEVICE
: DMA_BIDIRECTIONAL
;
84 rctx
->src_nents
= sg_nents_for_len(req
->src
, req
->cryptlen
);
86 rctx
->dst_nents
= sg_nents_for_len(req
->dst
, req
->cryptlen
);
88 rctx
->dst_nents
= rctx
->src_nents
;
89 if (rctx
->src_nents
< 0) {
90 dev_err(qce
->dev
, "Invalid numbers of src SG.\n");
91 return rctx
->src_nents
;
93 if (rctx
->dst_nents
< 0) {
94 dev_err(qce
->dev
, "Invalid numbers of dst SG.\n");
95 return -rctx
->dst_nents
;
100 gfp
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
101 GFP_KERNEL
: GFP_ATOMIC
;
103 ret
= sg_alloc_table(&rctx
->dst_tbl
, rctx
->dst_nents
, gfp
);
107 sg_init_one(&rctx
->result_sg
, qce
->dma
.result_buf
, QCE_RESULT_BUF_SZ
);
109 sg
= qce_sgtable_add(&rctx
->dst_tbl
, req
->dst
, req
->cryptlen
);
115 sg
= qce_sgtable_add(&rctx
->dst_tbl
, &rctx
->result_sg
,
123 rctx
->dst_sg
= rctx
->dst_tbl
.sgl
;
125 ret
= dma_map_sg(qce
->dev
, rctx
->dst_sg
, rctx
->dst_nents
, dir_dst
);
130 ret
= dma_map_sg(qce
->dev
, req
->src
, rctx
->src_nents
, dir_src
);
132 goto error_unmap_dst
;
133 rctx
->src_sg
= req
->src
;
135 rctx
->src_sg
= rctx
->dst_sg
;
138 ret
= qce_dma_prep_sgs(&qce
->dma
, rctx
->src_sg
, rctx
->src_nents
,
139 rctx
->dst_sg
, rctx
->dst_nents
,
140 qce_skcipher_done
, async_req
);
142 goto error_unmap_src
;
144 qce_dma_issue_pending(&qce
->dma
);
146 ret
= qce_start(async_req
, tmpl
->crypto_alg_type
, req
->cryptlen
, 0);
148 goto error_terminate
;
153 qce_dma_terminate_all(&qce
->dma
);
156 dma_unmap_sg(qce
->dev
, req
->src
, rctx
->src_nents
, dir_src
);
158 dma_unmap_sg(qce
->dev
, rctx
->dst_sg
, rctx
->dst_nents
, dir_dst
);
160 sg_free_table(&rctx
->dst_tbl
);
164 static int qce_skcipher_setkey(struct crypto_skcipher
*ablk
, const u8
*key
,
167 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(ablk
);
168 struct qce_cipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
169 unsigned long flags
= to_cipher_tmpl(ablk
)->alg_flags
;
175 switch (IS_XTS(flags
) ? keylen
>> 1 : keylen
) {
176 case AES_KEYSIZE_128
:
177 case AES_KEYSIZE_256
:
178 memcpy(ctx
->enc_key
, key
, keylen
);
182 ret
= crypto_skcipher_setkey(ctx
->fallback
, key
, keylen
);
184 ctx
->enc_keylen
= keylen
;
188 static int qce_des_setkey(struct crypto_skcipher
*ablk
, const u8
*key
,
191 struct qce_cipher_ctx
*ctx
= crypto_skcipher_ctx(ablk
);
194 err
= verify_skcipher_des_key(ablk
, key
);
198 ctx
->enc_keylen
= keylen
;
199 memcpy(ctx
->enc_key
, key
, keylen
);
203 static int qce_des3_setkey(struct crypto_skcipher
*ablk
, const u8
*key
,
206 struct qce_cipher_ctx
*ctx
= crypto_skcipher_ctx(ablk
);
209 err
= verify_skcipher_des3_key(ablk
, key
);
213 ctx
->enc_keylen
= keylen
;
214 memcpy(ctx
->enc_key
, key
, keylen
);
218 static int qce_skcipher_crypt(struct skcipher_request
*req
, int encrypt
)
220 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
221 struct qce_cipher_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
222 struct qce_cipher_reqctx
*rctx
= skcipher_request_ctx(req
);
223 struct qce_alg_template
*tmpl
= to_cipher_tmpl(tfm
);
227 rctx
->flags
= tmpl
->alg_flags
;
228 rctx
->flags
|= encrypt
? QCE_ENCRYPT
: QCE_DECRYPT
;
229 keylen
= IS_XTS(rctx
->flags
) ? ctx
->enc_keylen
>> 1 : ctx
->enc_keylen
;
231 /* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and
232 * is not a multiple of it; pass such requests to the fallback
234 if (IS_AES(rctx
->flags
) &&
235 (((keylen
!= AES_KEYSIZE_128
&& keylen
!= AES_KEYSIZE_256
) ||
236 req
->cryptlen
<= aes_sw_max_len
) ||
237 (IS_XTS(rctx
->flags
) && req
->cryptlen
> QCE_SECTOR_SIZE
&&
238 req
->cryptlen
% QCE_SECTOR_SIZE
))) {
239 skcipher_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
240 skcipher_request_set_callback(&rctx
->fallback_req
,
244 skcipher_request_set_crypt(&rctx
->fallback_req
, req
->src
,
245 req
->dst
, req
->cryptlen
, req
->iv
);
246 ret
= encrypt
? crypto_skcipher_encrypt(&rctx
->fallback_req
) :
247 crypto_skcipher_decrypt(&rctx
->fallback_req
);
251 return tmpl
->qce
->async_req_enqueue(tmpl
->qce
, &req
->base
);
254 static int qce_skcipher_encrypt(struct skcipher_request
*req
)
256 return qce_skcipher_crypt(req
, 1);
259 static int qce_skcipher_decrypt(struct skcipher_request
*req
)
261 return qce_skcipher_crypt(req
, 0);
264 static int qce_skcipher_init(struct crypto_skcipher
*tfm
)
266 /* take the size without the fallback skcipher_request at the end */
267 crypto_skcipher_set_reqsize(tfm
, offsetof(struct qce_cipher_reqctx
,
272 static int qce_skcipher_init_fallback(struct crypto_skcipher
*tfm
)
274 struct qce_cipher_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
276 ctx
->fallback
= crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm
->base
),
277 0, CRYPTO_ALG_NEED_FALLBACK
);
278 if (IS_ERR(ctx
->fallback
))
279 return PTR_ERR(ctx
->fallback
);
281 crypto_skcipher_set_reqsize(tfm
, sizeof(struct qce_cipher_reqctx
) +
282 crypto_skcipher_reqsize(ctx
->fallback
));
286 static void qce_skcipher_exit(struct crypto_skcipher
*tfm
)
288 struct qce_cipher_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
290 crypto_free_skcipher(ctx
->fallback
);
293 struct qce_skcipher_def
{
296 const char *drv_name
;
297 unsigned int blocksize
;
298 unsigned int chunksize
;
300 unsigned int min_keysize
;
301 unsigned int max_keysize
;
304 static const struct qce_skcipher_def skcipher_def
[] = {
306 .flags
= QCE_ALG_AES
| QCE_MODE_ECB
,
308 .drv_name
= "ecb-aes-qce",
309 .blocksize
= AES_BLOCK_SIZE
,
310 .ivsize
= AES_BLOCK_SIZE
,
311 .min_keysize
= AES_MIN_KEY_SIZE
,
312 .max_keysize
= AES_MAX_KEY_SIZE
,
315 .flags
= QCE_ALG_AES
| QCE_MODE_CBC
,
317 .drv_name
= "cbc-aes-qce",
318 .blocksize
= AES_BLOCK_SIZE
,
319 .ivsize
= AES_BLOCK_SIZE
,
320 .min_keysize
= AES_MIN_KEY_SIZE
,
321 .max_keysize
= AES_MAX_KEY_SIZE
,
324 .flags
= QCE_ALG_AES
| QCE_MODE_CTR
,
326 .drv_name
= "ctr-aes-qce",
328 .chunksize
= AES_BLOCK_SIZE
,
329 .ivsize
= AES_BLOCK_SIZE
,
330 .min_keysize
= AES_MIN_KEY_SIZE
,
331 .max_keysize
= AES_MAX_KEY_SIZE
,
334 .flags
= QCE_ALG_AES
| QCE_MODE_XTS
,
336 .drv_name
= "xts-aes-qce",
337 .blocksize
= AES_BLOCK_SIZE
,
338 .ivsize
= AES_BLOCK_SIZE
,
339 .min_keysize
= AES_MIN_KEY_SIZE
* 2,
340 .max_keysize
= AES_MAX_KEY_SIZE
* 2,
343 .flags
= QCE_ALG_DES
| QCE_MODE_ECB
,
345 .drv_name
= "ecb-des-qce",
346 .blocksize
= DES_BLOCK_SIZE
,
348 .min_keysize
= DES_KEY_SIZE
,
349 .max_keysize
= DES_KEY_SIZE
,
352 .flags
= QCE_ALG_DES
| QCE_MODE_CBC
,
354 .drv_name
= "cbc-des-qce",
355 .blocksize
= DES_BLOCK_SIZE
,
356 .ivsize
= DES_BLOCK_SIZE
,
357 .min_keysize
= DES_KEY_SIZE
,
358 .max_keysize
= DES_KEY_SIZE
,
361 .flags
= QCE_ALG_3DES
| QCE_MODE_ECB
,
362 .name
= "ecb(des3_ede)",
363 .drv_name
= "ecb-3des-qce",
364 .blocksize
= DES3_EDE_BLOCK_SIZE
,
366 .min_keysize
= DES3_EDE_KEY_SIZE
,
367 .max_keysize
= DES3_EDE_KEY_SIZE
,
370 .flags
= QCE_ALG_3DES
| QCE_MODE_CBC
,
371 .name
= "cbc(des3_ede)",
372 .drv_name
= "cbc-3des-qce",
373 .blocksize
= DES3_EDE_BLOCK_SIZE
,
374 .ivsize
= DES3_EDE_BLOCK_SIZE
,
375 .min_keysize
= DES3_EDE_KEY_SIZE
,
376 .max_keysize
= DES3_EDE_KEY_SIZE
,
380 static int qce_skcipher_register_one(const struct qce_skcipher_def
*def
,
381 struct qce_device
*qce
)
383 struct qce_alg_template
*tmpl
;
384 struct skcipher_alg
*alg
;
387 tmpl
= kzalloc(sizeof(*tmpl
), GFP_KERNEL
);
391 alg
= &tmpl
->alg
.skcipher
;
393 snprintf(alg
->base
.cra_name
, CRYPTO_MAX_ALG_NAME
, "%s", def
->name
);
394 snprintf(alg
->base
.cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
397 alg
->base
.cra_blocksize
= def
->blocksize
;
398 alg
->chunksize
= def
->chunksize
;
399 alg
->ivsize
= def
->ivsize
;
400 alg
->min_keysize
= def
->min_keysize
;
401 alg
->max_keysize
= def
->max_keysize
;
402 alg
->setkey
= IS_3DES(def
->flags
) ? qce_des3_setkey
:
403 IS_DES(def
->flags
) ? qce_des_setkey
:
405 alg
->encrypt
= qce_skcipher_encrypt
;
406 alg
->decrypt
= qce_skcipher_decrypt
;
408 alg
->base
.cra_priority
= 300;
409 alg
->base
.cra_flags
= CRYPTO_ALG_ASYNC
|
410 CRYPTO_ALG_ALLOCATES_MEMORY
|
411 CRYPTO_ALG_KERN_DRIVER_ONLY
;
412 alg
->base
.cra_ctxsize
= sizeof(struct qce_cipher_ctx
);
413 alg
->base
.cra_alignmask
= 0;
414 alg
->base
.cra_module
= THIS_MODULE
;
416 if (IS_AES(def
->flags
)) {
417 alg
->base
.cra_flags
|= CRYPTO_ALG_NEED_FALLBACK
;
418 alg
->init
= qce_skcipher_init_fallback
;
419 alg
->exit
= qce_skcipher_exit
;
421 alg
->init
= qce_skcipher_init
;
424 INIT_LIST_HEAD(&tmpl
->entry
);
425 tmpl
->crypto_alg_type
= CRYPTO_ALG_TYPE_SKCIPHER
;
426 tmpl
->alg_flags
= def
->flags
;
429 ret
= crypto_register_skcipher(alg
);
432 dev_err(qce
->dev
, "%s registration failed\n", alg
->base
.cra_name
);
436 list_add_tail(&tmpl
->entry
, &skcipher_algs
);
437 dev_dbg(qce
->dev
, "%s is registered\n", alg
->base
.cra_name
);
441 static void qce_skcipher_unregister(struct qce_device
*qce
)
443 struct qce_alg_template
*tmpl
, *n
;
445 list_for_each_entry_safe(tmpl
, n
, &skcipher_algs
, entry
) {
446 crypto_unregister_skcipher(&tmpl
->alg
.skcipher
);
447 list_del(&tmpl
->entry
);
452 static int qce_skcipher_register(struct qce_device
*qce
)
456 for (i
= 0; i
< ARRAY_SIZE(skcipher_def
); i
++) {
457 ret
= qce_skcipher_register_one(&skcipher_def
[i
], qce
);
464 qce_skcipher_unregister(qce
);
468 const struct qce_algo_ops skcipher_ops
= {
469 .type
= CRYPTO_ALG_TYPE_SKCIPHER
,
470 .register_algs
= qce_skcipher_register
,
471 .unregister_algs
= qce_skcipher_unregister
,
472 .async_req_handle
= qce_skcipher_async_req_handle
,