Linux 4.8-rc8
[linux/fpc-iii.git] / drivers / crypto / marvell / cipher.c
blobd19dc9614e6e42aab67fbd5b952db9abe4b568de
1 /*
2 * Cipher algorithms supported by the CESA: DES, 3DES and AES.
4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
5 * Author: Arnaud Ebalard <arno@natisbad.org>
7 * This work is based on an initial version written by
8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
15 #include <crypto/aes.h>
16 #include <crypto/des.h>
18 #include "cesa.h"
20 struct mv_cesa_des_ctx {
21 struct mv_cesa_ctx base;
22 u8 key[DES_KEY_SIZE];
25 struct mv_cesa_des3_ctx {
26 struct mv_cesa_ctx base;
27 u8 key[DES3_EDE_KEY_SIZE];
30 struct mv_cesa_aes_ctx {
31 struct mv_cesa_ctx base;
32 struct crypto_aes_ctx aes;
35 struct mv_cesa_ablkcipher_dma_iter {
36 struct mv_cesa_dma_iter base;
37 struct mv_cesa_sg_dma_iter src;
38 struct mv_cesa_sg_dma_iter dst;
41 static inline void
42 mv_cesa_ablkcipher_req_iter_init(struct mv_cesa_ablkcipher_dma_iter *iter,
43 struct ablkcipher_request *req)
45 mv_cesa_req_dma_iter_init(&iter->base, req->nbytes);
46 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
47 mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
50 static inline bool
51 mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter)
53 iter->src.op_offset = 0;
54 iter->dst.op_offset = 0;
56 return mv_cesa_req_dma_iter_next_op(&iter->base);
59 static inline void
60 mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req)
62 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
64 if (req->dst != req->src) {
65 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
66 DMA_FROM_DEVICE);
67 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
68 DMA_TO_DEVICE);
69 } else {
70 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
71 DMA_BIDIRECTIONAL);
73 mv_cesa_dma_cleanup(&creq->base);
76 static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req)
78 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
80 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
81 mv_cesa_ablkcipher_dma_cleanup(req);
84 static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
86 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
87 struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
88 struct mv_cesa_engine *engine = creq->base.engine;
89 size_t len = min_t(size_t, req->nbytes - sreq->offset,
90 CESA_SA_SRAM_PAYLOAD_SIZE);
92 mv_cesa_adjust_op(engine, &sreq->op);
93 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
95 len = sg_pcopy_to_buffer(req->src, creq->src_nents,
96 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
97 len, sreq->offset);
99 sreq->size = len;
100 mv_cesa_set_crypt_op_len(&sreq->op, len);
102 /* FIXME: only update enc_len field */
103 if (!sreq->skip_ctx) {
104 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
105 sreq->skip_ctx = true;
106 } else {
107 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc));
110 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
111 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
112 BUG_ON(readl(engine->regs + CESA_SA_CMD) &
113 CESA_SA_CMD_EN_CESA_SA_ACCL0);
114 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
117 static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req,
118 u32 status)
120 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
121 struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
122 struct mv_cesa_engine *engine = creq->base.engine;
123 size_t len;
125 len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
126 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
127 sreq->size, sreq->offset);
129 sreq->offset += len;
130 if (sreq->offset < req->nbytes)
131 return -EINPROGRESS;
133 return 0;
136 static int mv_cesa_ablkcipher_process(struct crypto_async_request *req,
137 u32 status)
139 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
140 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
141 struct mv_cesa_req *basereq = &creq->base;
143 if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
144 return mv_cesa_ablkcipher_std_process(ablkreq, status);
146 return mv_cesa_dma_process(basereq, status);
149 static void mv_cesa_ablkcipher_step(struct crypto_async_request *req)
151 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
152 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
154 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
155 mv_cesa_dma_step(&creq->base);
156 else
157 mv_cesa_ablkcipher_std_step(ablkreq);
160 static inline void
161 mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req)
163 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
164 struct mv_cesa_req *basereq = &creq->base;
166 mv_cesa_dma_prepare(basereq, basereq->engine);
169 static inline void
170 mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req)
172 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
173 struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
175 sreq->size = 0;
176 sreq->offset = 0;
179 static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
180 struct mv_cesa_engine *engine)
182 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
183 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
184 creq->base.engine = engine;
186 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
187 mv_cesa_ablkcipher_dma_prepare(ablkreq);
188 else
189 mv_cesa_ablkcipher_std_prepare(ablkreq);
192 static inline void
193 mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req)
195 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
197 mv_cesa_ablkcipher_cleanup(ablkreq);
200 static void
201 mv_cesa_ablkcipher_complete(struct crypto_async_request *req)
203 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
204 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
205 struct mv_cesa_engine *engine = creq->base.engine;
206 unsigned int ivsize;
208 atomic_sub(ablkreq->nbytes, &engine->load);
209 ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq));
211 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
212 struct mv_cesa_req *basereq;
214 basereq = &creq->base;
215 memcpy(ablkreq->info, basereq->chain.last->data, ivsize);
216 } else {
217 memcpy_fromio(ablkreq->info,
218 engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
219 ivsize);
223 static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = {
224 .step = mv_cesa_ablkcipher_step,
225 .process = mv_cesa_ablkcipher_process,
226 .cleanup = mv_cesa_ablkcipher_req_cleanup,
227 .complete = mv_cesa_ablkcipher_complete,
230 static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm)
232 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
234 ctx->base.ops = &mv_cesa_ablkcipher_req_ops;
236 tfm->crt_ablkcipher.reqsize = sizeof(struct mv_cesa_ablkcipher_req);
238 return 0;
241 static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
242 unsigned int len)
244 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
245 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
246 int remaining;
247 int offset;
248 int ret;
249 int i;
251 ret = crypto_aes_expand_key(&ctx->aes, key, len);
252 if (ret) {
253 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
254 return ret;
257 remaining = (ctx->aes.key_length - 16) / 4;
258 offset = ctx->aes.key_length + 24 - remaining;
259 for (i = 0; i < remaining; i++)
260 ctx->aes.key_dec[4 + i] =
261 cpu_to_le32(ctx->aes.key_enc[offset + i]);
263 return 0;
266 static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
267 unsigned int len)
269 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
270 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
271 u32 tmp[DES_EXPKEY_WORDS];
272 int ret;
274 if (len != DES_KEY_SIZE) {
275 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
276 return -EINVAL;
279 ret = des_ekey(tmp, key);
280 if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
281 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
282 return -EINVAL;
285 memcpy(ctx->key, key, DES_KEY_SIZE);
287 return 0;
290 static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher,
291 const u8 *key, unsigned int len)
293 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
294 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
296 if (len != DES3_EDE_KEY_SIZE) {
297 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
298 return -EINVAL;
301 memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
303 return 0;
306 static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
307 const struct mv_cesa_op_ctx *op_templ)
309 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
310 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
311 GFP_KERNEL : GFP_ATOMIC;
312 struct mv_cesa_req *basereq = &creq->base;
313 struct mv_cesa_ablkcipher_dma_iter iter;
314 bool skip_ctx = false;
315 int ret;
316 unsigned int ivsize;
318 basereq->chain.first = NULL;
319 basereq->chain.last = NULL;
321 if (req->src != req->dst) {
322 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
323 DMA_TO_DEVICE);
324 if (!ret)
325 return -ENOMEM;
327 ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents,
328 DMA_FROM_DEVICE);
329 if (!ret) {
330 ret = -ENOMEM;
331 goto err_unmap_src;
333 } else {
334 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
335 DMA_BIDIRECTIONAL);
336 if (!ret)
337 return -ENOMEM;
340 mv_cesa_tdma_desc_iter_init(&basereq->chain);
341 mv_cesa_ablkcipher_req_iter_init(&iter, req);
343 do {
344 struct mv_cesa_op_ctx *op;
346 op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx, flags);
347 if (IS_ERR(op)) {
348 ret = PTR_ERR(op);
349 goto err_free_tdma;
351 skip_ctx = true;
353 mv_cesa_set_crypt_op_len(op, iter.base.op_len);
355 /* Add input transfers */
356 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
357 &iter.src, flags);
358 if (ret)
359 goto err_free_tdma;
361 /* Add dummy desc to launch the crypto operation */
362 ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags);
363 if (ret)
364 goto err_free_tdma;
366 /* Add output transfers */
367 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
368 &iter.dst, flags);
369 if (ret)
370 goto err_free_tdma;
372 } while (mv_cesa_ablkcipher_req_iter_next_op(&iter));
374 /* Add output data for IV */
375 ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
376 ret = mv_cesa_dma_add_iv_op(&basereq->chain, CESA_SA_CRYPT_IV_SRAM_OFFSET,
377 ivsize, CESA_TDMA_SRC_IN_SRAM, flags);
379 if (ret)
380 goto err_free_tdma;
382 basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
384 return 0;
386 err_free_tdma:
387 mv_cesa_dma_cleanup(basereq);
388 if (req->dst != req->src)
389 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
390 DMA_FROM_DEVICE);
392 err_unmap_src:
393 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
394 req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
396 return ret;
399 static inline int
400 mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req,
401 const struct mv_cesa_op_ctx *op_templ)
403 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
404 struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
405 struct mv_cesa_req *basereq = &creq->base;
407 sreq->op = *op_templ;
408 sreq->skip_ctx = false;
409 basereq->chain.first = NULL;
410 basereq->chain.last = NULL;
412 return 0;
415 static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
416 struct mv_cesa_op_ctx *tmpl)
418 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
419 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
420 unsigned int blksize = crypto_ablkcipher_blocksize(tfm);
421 int ret;
423 if (!IS_ALIGNED(req->nbytes, blksize))
424 return -EINVAL;
426 creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
427 if (creq->src_nents < 0) {
428 dev_err(cesa_dev->dev, "Invalid number of src SG");
429 return creq->src_nents;
431 creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
432 if (creq->dst_nents < 0) {
433 dev_err(cesa_dev->dev, "Invalid number of dst SG");
434 return creq->dst_nents;
437 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
438 CESA_SA_DESC_CFG_OP_MSK);
440 if (cesa_dev->caps->has_tdma)
441 ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl);
442 else
443 ret = mv_cesa_ablkcipher_std_req_init(req, tmpl);
445 return ret;
448 static int mv_cesa_ablkcipher_queue_req(struct ablkcipher_request *req,
449 struct mv_cesa_op_ctx *tmpl)
451 int ret;
452 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
453 struct mv_cesa_engine *engine;
455 ret = mv_cesa_ablkcipher_req_init(req, tmpl);
456 if (ret)
457 return ret;
459 engine = mv_cesa_select_engine(req->nbytes);
460 mv_cesa_ablkcipher_prepare(&req->base, engine);
462 ret = mv_cesa_queue_req(&req->base, &creq->base);
464 if (mv_cesa_req_needs_cleanup(&req->base, ret))
465 mv_cesa_ablkcipher_cleanup(req);
467 return ret;
470 static int mv_cesa_des_op(struct ablkcipher_request *req,
471 struct mv_cesa_op_ctx *tmpl)
473 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
475 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
476 CESA_SA_DESC_CFG_CRYPTM_MSK);
478 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
480 return mv_cesa_ablkcipher_queue_req(req, tmpl);
483 static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req)
485 struct mv_cesa_op_ctx tmpl;
487 mv_cesa_set_op_cfg(&tmpl,
488 CESA_SA_DESC_CFG_CRYPTCM_ECB |
489 CESA_SA_DESC_CFG_DIR_ENC);
491 return mv_cesa_des_op(req, &tmpl);
494 static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req)
496 struct mv_cesa_op_ctx tmpl;
498 mv_cesa_set_op_cfg(&tmpl,
499 CESA_SA_DESC_CFG_CRYPTCM_ECB |
500 CESA_SA_DESC_CFG_DIR_DEC);
502 return mv_cesa_des_op(req, &tmpl);
505 struct crypto_alg mv_cesa_ecb_des_alg = {
506 .cra_name = "ecb(des)",
507 .cra_driver_name = "mv-ecb-des",
508 .cra_priority = 300,
509 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
510 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
511 .cra_blocksize = DES_BLOCK_SIZE,
512 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
513 .cra_alignmask = 0,
514 .cra_type = &crypto_ablkcipher_type,
515 .cra_module = THIS_MODULE,
516 .cra_init = mv_cesa_ablkcipher_cra_init,
517 .cra_u = {
518 .ablkcipher = {
519 .min_keysize = DES_KEY_SIZE,
520 .max_keysize = DES_KEY_SIZE,
521 .setkey = mv_cesa_des_setkey,
522 .encrypt = mv_cesa_ecb_des_encrypt,
523 .decrypt = mv_cesa_ecb_des_decrypt,
528 static int mv_cesa_cbc_des_op(struct ablkcipher_request *req,
529 struct mv_cesa_op_ctx *tmpl)
531 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
532 CESA_SA_DESC_CFG_CRYPTCM_MSK);
534 memcpy(tmpl->ctx.blkcipher.iv, req->info, DES_BLOCK_SIZE);
536 return mv_cesa_des_op(req, tmpl);
539 static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req)
541 struct mv_cesa_op_ctx tmpl;
543 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
545 return mv_cesa_cbc_des_op(req, &tmpl);
548 static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req)
550 struct mv_cesa_op_ctx tmpl;
552 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
554 return mv_cesa_cbc_des_op(req, &tmpl);
557 struct crypto_alg mv_cesa_cbc_des_alg = {
558 .cra_name = "cbc(des)",
559 .cra_driver_name = "mv-cbc-des",
560 .cra_priority = 300,
561 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
562 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
563 .cra_blocksize = DES_BLOCK_SIZE,
564 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
565 .cra_alignmask = 0,
566 .cra_type = &crypto_ablkcipher_type,
567 .cra_module = THIS_MODULE,
568 .cra_init = mv_cesa_ablkcipher_cra_init,
569 .cra_u = {
570 .ablkcipher = {
571 .min_keysize = DES_KEY_SIZE,
572 .max_keysize = DES_KEY_SIZE,
573 .ivsize = DES_BLOCK_SIZE,
574 .setkey = mv_cesa_des_setkey,
575 .encrypt = mv_cesa_cbc_des_encrypt,
576 .decrypt = mv_cesa_cbc_des_decrypt,
581 static int mv_cesa_des3_op(struct ablkcipher_request *req,
582 struct mv_cesa_op_ctx *tmpl)
584 struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
586 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
587 CESA_SA_DESC_CFG_CRYPTM_MSK);
589 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
591 return mv_cesa_ablkcipher_queue_req(req, tmpl);
594 static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req)
596 struct mv_cesa_op_ctx tmpl;
598 mv_cesa_set_op_cfg(&tmpl,
599 CESA_SA_DESC_CFG_CRYPTCM_ECB |
600 CESA_SA_DESC_CFG_3DES_EDE |
601 CESA_SA_DESC_CFG_DIR_ENC);
603 return mv_cesa_des3_op(req, &tmpl);
606 static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req)
608 struct mv_cesa_op_ctx tmpl;
610 mv_cesa_set_op_cfg(&tmpl,
611 CESA_SA_DESC_CFG_CRYPTCM_ECB |
612 CESA_SA_DESC_CFG_3DES_EDE |
613 CESA_SA_DESC_CFG_DIR_DEC);
615 return mv_cesa_des3_op(req, &tmpl);
618 struct crypto_alg mv_cesa_ecb_des3_ede_alg = {
619 .cra_name = "ecb(des3_ede)",
620 .cra_driver_name = "mv-ecb-des3-ede",
621 .cra_priority = 300,
622 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
623 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
624 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
625 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
626 .cra_alignmask = 0,
627 .cra_type = &crypto_ablkcipher_type,
628 .cra_module = THIS_MODULE,
629 .cra_init = mv_cesa_ablkcipher_cra_init,
630 .cra_u = {
631 .ablkcipher = {
632 .min_keysize = DES3_EDE_KEY_SIZE,
633 .max_keysize = DES3_EDE_KEY_SIZE,
634 .ivsize = DES3_EDE_BLOCK_SIZE,
635 .setkey = mv_cesa_des3_ede_setkey,
636 .encrypt = mv_cesa_ecb_des3_ede_encrypt,
637 .decrypt = mv_cesa_ecb_des3_ede_decrypt,
642 static int mv_cesa_cbc_des3_op(struct ablkcipher_request *req,
643 struct mv_cesa_op_ctx *tmpl)
645 memcpy(tmpl->ctx.blkcipher.iv, req->info, DES3_EDE_BLOCK_SIZE);
647 return mv_cesa_des3_op(req, tmpl);
650 static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req)
652 struct mv_cesa_op_ctx tmpl;
654 mv_cesa_set_op_cfg(&tmpl,
655 CESA_SA_DESC_CFG_CRYPTCM_CBC |
656 CESA_SA_DESC_CFG_3DES_EDE |
657 CESA_SA_DESC_CFG_DIR_ENC);
659 return mv_cesa_cbc_des3_op(req, &tmpl);
662 static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req)
664 struct mv_cesa_op_ctx tmpl;
666 mv_cesa_set_op_cfg(&tmpl,
667 CESA_SA_DESC_CFG_CRYPTCM_CBC |
668 CESA_SA_DESC_CFG_3DES_EDE |
669 CESA_SA_DESC_CFG_DIR_DEC);
671 return mv_cesa_cbc_des3_op(req, &tmpl);
674 struct crypto_alg mv_cesa_cbc_des3_ede_alg = {
675 .cra_name = "cbc(des3_ede)",
676 .cra_driver_name = "mv-cbc-des3-ede",
677 .cra_priority = 300,
678 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
679 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
680 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
681 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
682 .cra_alignmask = 0,
683 .cra_type = &crypto_ablkcipher_type,
684 .cra_module = THIS_MODULE,
685 .cra_init = mv_cesa_ablkcipher_cra_init,
686 .cra_u = {
687 .ablkcipher = {
688 .min_keysize = DES3_EDE_KEY_SIZE,
689 .max_keysize = DES3_EDE_KEY_SIZE,
690 .ivsize = DES3_EDE_BLOCK_SIZE,
691 .setkey = mv_cesa_des3_ede_setkey,
692 .encrypt = mv_cesa_cbc_des3_ede_encrypt,
693 .decrypt = mv_cesa_cbc_des3_ede_decrypt,
698 static int mv_cesa_aes_op(struct ablkcipher_request *req,
699 struct mv_cesa_op_ctx *tmpl)
701 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
702 int i;
703 u32 *key;
704 u32 cfg;
706 cfg = CESA_SA_DESC_CFG_CRYPTM_AES;
708 if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC)
709 key = ctx->aes.key_dec;
710 else
711 key = ctx->aes.key_enc;
713 for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++)
714 tmpl->ctx.blkcipher.key[i] = cpu_to_le32(key[i]);
716 if (ctx->aes.key_length == 24)
717 cfg |= CESA_SA_DESC_CFG_AES_LEN_192;
718 else if (ctx->aes.key_length == 32)
719 cfg |= CESA_SA_DESC_CFG_AES_LEN_256;
721 mv_cesa_update_op_cfg(tmpl, cfg,
722 CESA_SA_DESC_CFG_CRYPTM_MSK |
723 CESA_SA_DESC_CFG_AES_LEN_MSK);
725 return mv_cesa_ablkcipher_queue_req(req, tmpl);
728 static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req)
730 struct mv_cesa_op_ctx tmpl;
732 mv_cesa_set_op_cfg(&tmpl,
733 CESA_SA_DESC_CFG_CRYPTCM_ECB |
734 CESA_SA_DESC_CFG_DIR_ENC);
736 return mv_cesa_aes_op(req, &tmpl);
739 static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req)
741 struct mv_cesa_op_ctx tmpl;
743 mv_cesa_set_op_cfg(&tmpl,
744 CESA_SA_DESC_CFG_CRYPTCM_ECB |
745 CESA_SA_DESC_CFG_DIR_DEC);
747 return mv_cesa_aes_op(req, &tmpl);
750 struct crypto_alg mv_cesa_ecb_aes_alg = {
751 .cra_name = "ecb(aes)",
752 .cra_driver_name = "mv-ecb-aes",
753 .cra_priority = 300,
754 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
755 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
756 .cra_blocksize = AES_BLOCK_SIZE,
757 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
758 .cra_alignmask = 0,
759 .cra_type = &crypto_ablkcipher_type,
760 .cra_module = THIS_MODULE,
761 .cra_init = mv_cesa_ablkcipher_cra_init,
762 .cra_u = {
763 .ablkcipher = {
764 .min_keysize = AES_MIN_KEY_SIZE,
765 .max_keysize = AES_MAX_KEY_SIZE,
766 .setkey = mv_cesa_aes_setkey,
767 .encrypt = mv_cesa_ecb_aes_encrypt,
768 .decrypt = mv_cesa_ecb_aes_decrypt,
773 static int mv_cesa_cbc_aes_op(struct ablkcipher_request *req,
774 struct mv_cesa_op_ctx *tmpl)
776 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
777 CESA_SA_DESC_CFG_CRYPTCM_MSK);
778 memcpy(tmpl->ctx.blkcipher.iv, req->info, AES_BLOCK_SIZE);
780 return mv_cesa_aes_op(req, tmpl);
783 static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req)
785 struct mv_cesa_op_ctx tmpl;
787 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
789 return mv_cesa_cbc_aes_op(req, &tmpl);
792 static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req)
794 struct mv_cesa_op_ctx tmpl;
796 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
798 return mv_cesa_cbc_aes_op(req, &tmpl);
801 struct crypto_alg mv_cesa_cbc_aes_alg = {
802 .cra_name = "cbc(aes)",
803 .cra_driver_name = "mv-cbc-aes",
804 .cra_priority = 300,
805 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
806 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
807 .cra_blocksize = AES_BLOCK_SIZE,
808 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
809 .cra_alignmask = 0,
810 .cra_type = &crypto_ablkcipher_type,
811 .cra_module = THIS_MODULE,
812 .cra_init = mv_cesa_ablkcipher_cra_init,
813 .cra_u = {
814 .ablkcipher = {
815 .min_keysize = AES_MIN_KEY_SIZE,
816 .max_keysize = AES_MAX_KEY_SIZE,
817 .ivsize = AES_BLOCK_SIZE,
818 .setkey = mv_cesa_aes_setkey,
819 .encrypt = mv_cesa_cbc_aes_encrypt,
820 .decrypt = mv_cesa_cbc_aes_decrypt,