2 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/device.h>
15 #include <linux/interrupt.h>
16 #include <linux/types.h>
17 #include <crypto/aes.h>
18 #include <crypto/des.h>
19 #include <crypto/internal/skcipher.h>
23 static LIST_HEAD(ablkcipher_algs
);
25 static void qce_ablkcipher_done(void *data
)
27 struct crypto_async_request
*async_req
= data
;
28 struct ablkcipher_request
*req
= ablkcipher_request_cast(async_req
);
29 struct qce_cipher_reqctx
*rctx
= ablkcipher_request_ctx(req
);
30 struct qce_alg_template
*tmpl
= to_cipher_tmpl(async_req
->tfm
);
31 struct qce_device
*qce
= tmpl
->qce
;
32 enum dma_data_direction dir_src
, dir_dst
;
37 diff_dst
= (req
->src
!= req
->dst
) ? true : false;
38 dir_src
= diff_dst
? DMA_TO_DEVICE
: DMA_BIDIRECTIONAL
;
39 dir_dst
= diff_dst
? DMA_FROM_DEVICE
: DMA_BIDIRECTIONAL
;
41 error
= qce_dma_terminate_all(&qce
->dma
);
43 dev_dbg(qce
->dev
, "ablkcipher dma termination error (%d)\n",
47 dma_unmap_sg(qce
->dev
, rctx
->src_sg
, rctx
->src_nents
, dir_src
);
48 dma_unmap_sg(qce
->dev
, rctx
->dst_sg
, rctx
->dst_nents
, dir_dst
);
50 sg_free_table(&rctx
->dst_tbl
);
52 error
= qce_check_status(qce
, &status
);
54 dev_dbg(qce
->dev
, "ablkcipher operation error (%x)\n", status
);
56 qce
->async_req_done(tmpl
->qce
, error
);
60 qce_ablkcipher_async_req_handle(struct crypto_async_request
*async_req
)
62 struct ablkcipher_request
*req
= ablkcipher_request_cast(async_req
);
63 struct qce_cipher_reqctx
*rctx
= ablkcipher_request_ctx(req
);
64 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
65 struct qce_alg_template
*tmpl
= to_cipher_tmpl(async_req
->tfm
);
66 struct qce_device
*qce
= tmpl
->qce
;
67 enum dma_data_direction dir_src
, dir_dst
;
68 struct scatterlist
*sg
;
74 rctx
->ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
75 rctx
->cryptlen
= req
->nbytes
;
77 diff_dst
= (req
->src
!= req
->dst
) ? true : false;
78 dir_src
= diff_dst
? DMA_TO_DEVICE
: DMA_BIDIRECTIONAL
;
79 dir_dst
= diff_dst
? DMA_FROM_DEVICE
: DMA_BIDIRECTIONAL
;
81 rctx
->src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
83 rctx
->dst_nents
= sg_nents_for_len(req
->dst
, req
->nbytes
);
85 rctx
->dst_nents
= rctx
->src_nents
;
86 if (rctx
->src_nents
< 0) {
87 dev_err(qce
->dev
, "Invalid numbers of src SG.\n");
88 return rctx
->src_nents
;
90 if (rctx
->dst_nents
< 0) {
91 dev_err(qce
->dev
, "Invalid numbers of dst SG.\n");
92 return -rctx
->dst_nents
;
97 gfp
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
98 GFP_KERNEL
: GFP_ATOMIC
;
100 ret
= sg_alloc_table(&rctx
->dst_tbl
, rctx
->dst_nents
, gfp
);
104 sg_init_one(&rctx
->result_sg
, qce
->dma
.result_buf
, QCE_RESULT_BUF_SZ
);
106 sg
= qce_sgtable_add(&rctx
->dst_tbl
, req
->dst
);
112 sg
= qce_sgtable_add(&rctx
->dst_tbl
, &rctx
->result_sg
);
119 rctx
->dst_sg
= rctx
->dst_tbl
.sgl
;
121 ret
= dma_map_sg(qce
->dev
, rctx
->dst_sg
, rctx
->dst_nents
, dir_dst
);
126 ret
= dma_map_sg(qce
->dev
, req
->src
, rctx
->src_nents
, dir_src
);
128 goto error_unmap_dst
;
129 rctx
->src_sg
= req
->src
;
131 rctx
->src_sg
= rctx
->dst_sg
;
134 ret
= qce_dma_prep_sgs(&qce
->dma
, rctx
->src_sg
, rctx
->src_nents
,
135 rctx
->dst_sg
, rctx
->dst_nents
,
136 qce_ablkcipher_done
, async_req
);
138 goto error_unmap_src
;
140 qce_dma_issue_pending(&qce
->dma
);
142 ret
= qce_start(async_req
, tmpl
->crypto_alg_type
, req
->nbytes
, 0);
144 goto error_terminate
;
149 qce_dma_terminate_all(&qce
->dma
);
152 dma_unmap_sg(qce
->dev
, req
->src
, rctx
->src_nents
, dir_src
);
154 dma_unmap_sg(qce
->dev
, rctx
->dst_sg
, rctx
->dst_nents
, dir_dst
);
156 sg_free_table(&rctx
->dst_tbl
);
160 static int qce_ablkcipher_setkey(struct crypto_ablkcipher
*ablk
, const u8
*key
,
163 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(ablk
);
164 struct qce_cipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
165 unsigned long flags
= to_cipher_tmpl(tfm
)->alg_flags
;
173 case AES_KEYSIZE_128
:
174 case AES_KEYSIZE_256
:
179 } else if (IS_DES(flags
)) {
180 u32 tmp
[DES_EXPKEY_WORDS
];
182 ret
= des_ekey(tmp
, key
);
183 if (!ret
&& (crypto_ablkcipher_get_flags(ablk
) &
184 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS
))
188 ctx
->enc_keylen
= keylen
;
189 memcpy(ctx
->enc_key
, key
, keylen
);
192 ret
= crypto_sync_skcipher_setkey(ctx
->fallback
, key
, keylen
);
194 ctx
->enc_keylen
= keylen
;
197 crypto_ablkcipher_set_flags(ablk
, CRYPTO_TFM_RES_WEAK_KEY
);
201 static int qce_des3_setkey(struct crypto_ablkcipher
*ablk
, const u8
*key
,
204 struct qce_cipher_ctx
*ctx
= crypto_ablkcipher_ctx(ablk
);
208 flags
= crypto_ablkcipher_get_flags(ablk
);
209 err
= __des3_verify_key(&flags
, key
);
211 crypto_ablkcipher_set_flags(ablk
, flags
);
215 ctx
->enc_keylen
= keylen
;
216 memcpy(ctx
->enc_key
, key
, keylen
);
220 static int qce_ablkcipher_crypt(struct ablkcipher_request
*req
, int encrypt
)
222 struct crypto_tfm
*tfm
=
223 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req
));
224 struct qce_cipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
225 struct qce_cipher_reqctx
*rctx
= ablkcipher_request_ctx(req
);
226 struct qce_alg_template
*tmpl
= to_cipher_tmpl(tfm
);
229 rctx
->flags
= tmpl
->alg_flags
;
230 rctx
->flags
|= encrypt
? QCE_ENCRYPT
: QCE_DECRYPT
;
232 if (IS_AES(rctx
->flags
) && ctx
->enc_keylen
!= AES_KEYSIZE_128
&&
233 ctx
->enc_keylen
!= AES_KEYSIZE_256
) {
234 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq
, ctx
->fallback
);
236 skcipher_request_set_sync_tfm(subreq
, ctx
->fallback
);
237 skcipher_request_set_callback(subreq
, req
->base
.flags
,
239 skcipher_request_set_crypt(subreq
, req
->src
, req
->dst
,
240 req
->nbytes
, req
->info
);
241 ret
= encrypt
? crypto_skcipher_encrypt(subreq
) :
242 crypto_skcipher_decrypt(subreq
);
243 skcipher_request_zero(subreq
);
247 return tmpl
->qce
->async_req_enqueue(tmpl
->qce
, &req
->base
);
250 static int qce_ablkcipher_encrypt(struct ablkcipher_request
*req
)
252 return qce_ablkcipher_crypt(req
, 1);
255 static int qce_ablkcipher_decrypt(struct ablkcipher_request
*req
)
257 return qce_ablkcipher_crypt(req
, 0);
260 static int qce_ablkcipher_init(struct crypto_tfm
*tfm
)
262 struct qce_cipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
264 memset(ctx
, 0, sizeof(*ctx
));
265 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct qce_cipher_reqctx
);
267 ctx
->fallback
= crypto_alloc_sync_skcipher(crypto_tfm_alg_name(tfm
),
268 0, CRYPTO_ALG_NEED_FALLBACK
);
269 return PTR_ERR_OR_ZERO(ctx
->fallback
);
272 static void qce_ablkcipher_exit(struct crypto_tfm
*tfm
)
274 struct qce_cipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
276 crypto_free_sync_skcipher(ctx
->fallback
);
279 struct qce_ablkcipher_def
{
282 const char *drv_name
;
283 unsigned int blocksize
;
285 unsigned int min_keysize
;
286 unsigned int max_keysize
;
289 static const struct qce_ablkcipher_def ablkcipher_def
[] = {
291 .flags
= QCE_ALG_AES
| QCE_MODE_ECB
,
293 .drv_name
= "ecb-aes-qce",
294 .blocksize
= AES_BLOCK_SIZE
,
295 .ivsize
= AES_BLOCK_SIZE
,
296 .min_keysize
= AES_MIN_KEY_SIZE
,
297 .max_keysize
= AES_MAX_KEY_SIZE
,
300 .flags
= QCE_ALG_AES
| QCE_MODE_CBC
,
302 .drv_name
= "cbc-aes-qce",
303 .blocksize
= AES_BLOCK_SIZE
,
304 .ivsize
= AES_BLOCK_SIZE
,
305 .min_keysize
= AES_MIN_KEY_SIZE
,
306 .max_keysize
= AES_MAX_KEY_SIZE
,
309 .flags
= QCE_ALG_AES
| QCE_MODE_CTR
,
311 .drv_name
= "ctr-aes-qce",
312 .blocksize
= AES_BLOCK_SIZE
,
313 .ivsize
= AES_BLOCK_SIZE
,
314 .min_keysize
= AES_MIN_KEY_SIZE
,
315 .max_keysize
= AES_MAX_KEY_SIZE
,
318 .flags
= QCE_ALG_AES
| QCE_MODE_XTS
,
320 .drv_name
= "xts-aes-qce",
321 .blocksize
= AES_BLOCK_SIZE
,
322 .ivsize
= AES_BLOCK_SIZE
,
323 .min_keysize
= AES_MIN_KEY_SIZE
,
324 .max_keysize
= AES_MAX_KEY_SIZE
,
327 .flags
= QCE_ALG_DES
| QCE_MODE_ECB
,
329 .drv_name
= "ecb-des-qce",
330 .blocksize
= DES_BLOCK_SIZE
,
332 .min_keysize
= DES_KEY_SIZE
,
333 .max_keysize
= DES_KEY_SIZE
,
336 .flags
= QCE_ALG_DES
| QCE_MODE_CBC
,
338 .drv_name
= "cbc-des-qce",
339 .blocksize
= DES_BLOCK_SIZE
,
340 .ivsize
= DES_BLOCK_SIZE
,
341 .min_keysize
= DES_KEY_SIZE
,
342 .max_keysize
= DES_KEY_SIZE
,
345 .flags
= QCE_ALG_3DES
| QCE_MODE_ECB
,
346 .name
= "ecb(des3_ede)",
347 .drv_name
= "ecb-3des-qce",
348 .blocksize
= DES3_EDE_BLOCK_SIZE
,
350 .min_keysize
= DES3_EDE_KEY_SIZE
,
351 .max_keysize
= DES3_EDE_KEY_SIZE
,
354 .flags
= QCE_ALG_3DES
| QCE_MODE_CBC
,
355 .name
= "cbc(des3_ede)",
356 .drv_name
= "cbc-3des-qce",
357 .blocksize
= DES3_EDE_BLOCK_SIZE
,
358 .ivsize
= DES3_EDE_BLOCK_SIZE
,
359 .min_keysize
= DES3_EDE_KEY_SIZE
,
360 .max_keysize
= DES3_EDE_KEY_SIZE
,
364 static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def
*def
,
365 struct qce_device
*qce
)
367 struct qce_alg_template
*tmpl
;
368 struct crypto_alg
*alg
;
371 tmpl
= kzalloc(sizeof(*tmpl
), GFP_KERNEL
);
375 alg
= &tmpl
->alg
.crypto
;
377 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s", def
->name
);
378 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
381 alg
->cra_blocksize
= def
->blocksize
;
382 alg
->cra_ablkcipher
.ivsize
= def
->ivsize
;
383 alg
->cra_ablkcipher
.min_keysize
= def
->min_keysize
;
384 alg
->cra_ablkcipher
.max_keysize
= def
->max_keysize
;
385 alg
->cra_ablkcipher
.setkey
= IS_3DES(def
->flags
) ?
386 qce_des3_setkey
: qce_ablkcipher_setkey
;
387 alg
->cra_ablkcipher
.encrypt
= qce_ablkcipher_encrypt
;
388 alg
->cra_ablkcipher
.decrypt
= qce_ablkcipher_decrypt
;
390 alg
->cra_priority
= 300;
391 alg
->cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
|
392 CRYPTO_ALG_NEED_FALLBACK
;
393 alg
->cra_ctxsize
= sizeof(struct qce_cipher_ctx
);
394 alg
->cra_alignmask
= 0;
395 alg
->cra_type
= &crypto_ablkcipher_type
;
396 alg
->cra_module
= THIS_MODULE
;
397 alg
->cra_init
= qce_ablkcipher_init
;
398 alg
->cra_exit
= qce_ablkcipher_exit
;
400 INIT_LIST_HEAD(&tmpl
->entry
);
401 tmpl
->crypto_alg_type
= CRYPTO_ALG_TYPE_ABLKCIPHER
;
402 tmpl
->alg_flags
= def
->flags
;
405 ret
= crypto_register_alg(alg
);
408 dev_err(qce
->dev
, "%s registration failed\n", alg
->cra_name
);
412 list_add_tail(&tmpl
->entry
, &ablkcipher_algs
);
413 dev_dbg(qce
->dev
, "%s is registered\n", alg
->cra_name
);
417 static void qce_ablkcipher_unregister(struct qce_device
*qce
)
419 struct qce_alg_template
*tmpl
, *n
;
421 list_for_each_entry_safe(tmpl
, n
, &ablkcipher_algs
, entry
) {
422 crypto_unregister_alg(&tmpl
->alg
.crypto
);
423 list_del(&tmpl
->entry
);
428 static int qce_ablkcipher_register(struct qce_device
*qce
)
432 for (i
= 0; i
< ARRAY_SIZE(ablkcipher_def
); i
++) {
433 ret
= qce_ablkcipher_register_one(&ablkcipher_def
[i
], qce
);
440 qce_ablkcipher_unregister(qce
);
444 const struct qce_algo_ops ablkcipher_ops
= {
445 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
446 .register_algs
= qce_ablkcipher_register
,
447 .unregister_algs
= qce_ablkcipher_unregister
,
448 .async_req_handle
= qce_ablkcipher_async_req_handle
,