gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / crypto / marvell / cesa / cipher.c
blobf133c2ccb5aee5365660f8a823189cf336d7f569
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Cipher algorithms supported by the CESA: DES, 3DES and AES.
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 * Author: Arnaud Ebalard <arno@natisbad.org>
8 * This work is based on an initial version written by
9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
12 #include <crypto/aes.h>
13 #include <crypto/internal/des.h>
15 #include "cesa.h"
17 struct mv_cesa_des_ctx {
18 struct mv_cesa_ctx base;
19 u8 key[DES_KEY_SIZE];
22 struct mv_cesa_des3_ctx {
23 struct mv_cesa_ctx base;
24 u8 key[DES3_EDE_KEY_SIZE];
27 struct mv_cesa_aes_ctx {
28 struct mv_cesa_ctx base;
29 struct crypto_aes_ctx aes;
32 struct mv_cesa_skcipher_dma_iter {
33 struct mv_cesa_dma_iter base;
34 struct mv_cesa_sg_dma_iter src;
35 struct mv_cesa_sg_dma_iter dst;
38 static inline void
39 mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter *iter,
40 struct skcipher_request *req)
42 mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen);
43 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
44 mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
47 static inline bool
48 mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter *iter)
50 iter->src.op_offset = 0;
51 iter->dst.op_offset = 0;
53 return mv_cesa_req_dma_iter_next_op(&iter->base);
56 static inline void
57 mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
59 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
61 if (req->dst != req->src) {
62 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
63 DMA_FROM_DEVICE);
64 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
65 DMA_TO_DEVICE);
66 } else {
67 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
68 DMA_BIDIRECTIONAL);
70 mv_cesa_dma_cleanup(&creq->base);
73 static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
75 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
77 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
78 mv_cesa_skcipher_dma_cleanup(req);
81 static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
83 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
84 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
85 struct mv_cesa_engine *engine = creq->base.engine;
86 size_t len = min_t(size_t, req->cryptlen - sreq->offset,
87 CESA_SA_SRAM_PAYLOAD_SIZE);
89 mv_cesa_adjust_op(engine, &sreq->op);
90 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
92 len = sg_pcopy_to_buffer(req->src, creq->src_nents,
93 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
94 len, sreq->offset);
96 sreq->size = len;
97 mv_cesa_set_crypt_op_len(&sreq->op, len);
99 /* FIXME: only update enc_len field */
100 if (!sreq->skip_ctx) {
101 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
102 sreq->skip_ctx = true;
103 } else {
104 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc));
107 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
108 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
109 WARN_ON(readl(engine->regs + CESA_SA_CMD) &
110 CESA_SA_CMD_EN_CESA_SA_ACCL0);
111 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
114 static int mv_cesa_skcipher_std_process(struct skcipher_request *req,
115 u32 status)
117 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
118 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
119 struct mv_cesa_engine *engine = creq->base.engine;
120 size_t len;
122 len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
123 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
124 sreq->size, sreq->offset);
126 sreq->offset += len;
127 if (sreq->offset < req->cryptlen)
128 return -EINPROGRESS;
130 return 0;
133 static int mv_cesa_skcipher_process(struct crypto_async_request *req,
134 u32 status)
136 struct skcipher_request *skreq = skcipher_request_cast(req);
137 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
138 struct mv_cesa_req *basereq = &creq->base;
140 if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
141 return mv_cesa_skcipher_std_process(skreq, status);
143 return mv_cesa_dma_process(basereq, status);
146 static void mv_cesa_skcipher_step(struct crypto_async_request *req)
148 struct skcipher_request *skreq = skcipher_request_cast(req);
149 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
151 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
152 mv_cesa_dma_step(&creq->base);
153 else
154 mv_cesa_skcipher_std_step(skreq);
157 static inline void
158 mv_cesa_skcipher_dma_prepare(struct skcipher_request *req)
160 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
161 struct mv_cesa_req *basereq = &creq->base;
163 mv_cesa_dma_prepare(basereq, basereq->engine);
166 static inline void
167 mv_cesa_skcipher_std_prepare(struct skcipher_request *req)
169 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
170 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
172 sreq->size = 0;
173 sreq->offset = 0;
176 static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req,
177 struct mv_cesa_engine *engine)
179 struct skcipher_request *skreq = skcipher_request_cast(req);
180 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
182 creq->base.engine = engine;
184 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
185 mv_cesa_skcipher_dma_prepare(skreq);
186 else
187 mv_cesa_skcipher_std_prepare(skreq);
190 static inline void
191 mv_cesa_skcipher_req_cleanup(struct crypto_async_request *req)
193 struct skcipher_request *skreq = skcipher_request_cast(req);
195 mv_cesa_skcipher_cleanup(skreq);
198 static void
199 mv_cesa_skcipher_complete(struct crypto_async_request *req)
201 struct skcipher_request *skreq = skcipher_request_cast(req);
202 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
203 struct mv_cesa_engine *engine = creq->base.engine;
204 unsigned int ivsize;
206 atomic_sub(skreq->cryptlen, &engine->load);
207 ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
209 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
210 struct mv_cesa_req *basereq;
212 basereq = &creq->base;
213 memcpy(skreq->iv, basereq->chain.last->op->ctx.skcipher.iv,
214 ivsize);
215 } else {
216 memcpy_fromio(skreq->iv,
217 engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
218 ivsize);
222 static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = {
223 .step = mv_cesa_skcipher_step,
224 .process = mv_cesa_skcipher_process,
225 .cleanup = mv_cesa_skcipher_req_cleanup,
226 .complete = mv_cesa_skcipher_complete,
229 static void mv_cesa_skcipher_cra_exit(struct crypto_tfm *tfm)
231 void *ctx = crypto_tfm_ctx(tfm);
233 memzero_explicit(ctx, tfm->__crt_alg->cra_ctxsize);
236 static int mv_cesa_skcipher_cra_init(struct crypto_tfm *tfm)
238 struct mv_cesa_ctx *ctx = crypto_tfm_ctx(tfm);
240 ctx->ops = &mv_cesa_skcipher_req_ops;
242 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
243 sizeof(struct mv_cesa_skcipher_req));
245 return 0;
248 static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
249 unsigned int len)
251 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
252 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
253 int remaining;
254 int offset;
255 int ret;
256 int i;
258 ret = aes_expandkey(&ctx->aes, key, len);
259 if (ret)
260 return ret;
262 remaining = (ctx->aes.key_length - 16) / 4;
263 offset = ctx->aes.key_length + 24 - remaining;
264 for (i = 0; i < remaining; i++)
265 ctx->aes.key_dec[4 + i] =
266 cpu_to_le32(ctx->aes.key_enc[offset + i]);
268 return 0;
271 static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
272 unsigned int len)
274 struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
275 int err;
277 err = verify_skcipher_des_key(cipher, key);
278 if (err)
279 return err;
281 memcpy(ctx->key, key, DES_KEY_SIZE);
283 return 0;
286 static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
287 const u8 *key, unsigned int len)
289 struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
290 int err;
292 err = verify_skcipher_des3_key(cipher, key);
293 if (err)
294 return err;
296 memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
298 return 0;
301 static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
302 const struct mv_cesa_op_ctx *op_templ)
304 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
305 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
306 GFP_KERNEL : GFP_ATOMIC;
307 struct mv_cesa_req *basereq = &creq->base;
308 struct mv_cesa_skcipher_dma_iter iter;
309 bool skip_ctx = false;
310 int ret;
312 basereq->chain.first = NULL;
313 basereq->chain.last = NULL;
315 if (req->src != req->dst) {
316 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
317 DMA_TO_DEVICE);
318 if (!ret)
319 return -ENOMEM;
321 ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents,
322 DMA_FROM_DEVICE);
323 if (!ret) {
324 ret = -ENOMEM;
325 goto err_unmap_src;
327 } else {
328 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
329 DMA_BIDIRECTIONAL);
330 if (!ret)
331 return -ENOMEM;
334 mv_cesa_tdma_desc_iter_init(&basereq->chain);
335 mv_cesa_skcipher_req_iter_init(&iter, req);
337 do {
338 struct mv_cesa_op_ctx *op;
340 op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx,
341 flags);
342 if (IS_ERR(op)) {
343 ret = PTR_ERR(op);
344 goto err_free_tdma;
346 skip_ctx = true;
348 mv_cesa_set_crypt_op_len(op, iter.base.op_len);
350 /* Add input transfers */
351 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
352 &iter.src, flags);
353 if (ret)
354 goto err_free_tdma;
356 /* Add dummy desc to launch the crypto operation */
357 ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags);
358 if (ret)
359 goto err_free_tdma;
361 /* Add output transfers */
362 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
363 &iter.dst, flags);
364 if (ret)
365 goto err_free_tdma;
367 } while (mv_cesa_skcipher_req_iter_next_op(&iter));
369 /* Add output data for IV */
370 ret = mv_cesa_dma_add_result_op(&basereq->chain,
371 CESA_SA_CFG_SRAM_OFFSET,
372 CESA_SA_DATA_SRAM_OFFSET,
373 CESA_TDMA_SRC_IN_SRAM, flags);
375 if (ret)
376 goto err_free_tdma;
378 basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
380 return 0;
382 err_free_tdma:
383 mv_cesa_dma_cleanup(basereq);
384 if (req->dst != req->src)
385 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
386 DMA_FROM_DEVICE);
388 err_unmap_src:
389 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
390 req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
392 return ret;
395 static inline int
396 mv_cesa_skcipher_std_req_init(struct skcipher_request *req,
397 const struct mv_cesa_op_ctx *op_templ)
399 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
400 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
401 struct mv_cesa_req *basereq = &creq->base;
403 sreq->op = *op_templ;
404 sreq->skip_ctx = false;
405 basereq->chain.first = NULL;
406 basereq->chain.last = NULL;
408 return 0;
411 static int mv_cesa_skcipher_req_init(struct skcipher_request *req,
412 struct mv_cesa_op_ctx *tmpl)
414 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
415 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
416 unsigned int blksize = crypto_skcipher_blocksize(tfm);
417 int ret;
419 if (!IS_ALIGNED(req->cryptlen, blksize))
420 return -EINVAL;
422 creq->src_nents = sg_nents_for_len(req->src, req->cryptlen);
423 if (creq->src_nents < 0) {
424 dev_err(cesa_dev->dev, "Invalid number of src SG");
425 return creq->src_nents;
427 creq->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
428 if (creq->dst_nents < 0) {
429 dev_err(cesa_dev->dev, "Invalid number of dst SG");
430 return creq->dst_nents;
433 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
434 CESA_SA_DESC_CFG_OP_MSK);
436 if (cesa_dev->caps->has_tdma)
437 ret = mv_cesa_skcipher_dma_req_init(req, tmpl);
438 else
439 ret = mv_cesa_skcipher_std_req_init(req, tmpl);
441 return ret;
444 static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
445 struct mv_cesa_op_ctx *tmpl)
447 int ret;
448 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
449 struct mv_cesa_engine *engine;
451 ret = mv_cesa_skcipher_req_init(req, tmpl);
452 if (ret)
453 return ret;
455 engine = mv_cesa_select_engine(req->cryptlen);
456 mv_cesa_skcipher_prepare(&req->base, engine);
458 ret = mv_cesa_queue_req(&req->base, &creq->base);
460 if (mv_cesa_req_needs_cleanup(&req->base, ret))
461 mv_cesa_skcipher_cleanup(req);
463 return ret;
466 static int mv_cesa_des_op(struct skcipher_request *req,
467 struct mv_cesa_op_ctx *tmpl)
469 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
471 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
472 CESA_SA_DESC_CFG_CRYPTM_MSK);
474 memcpy(tmpl->ctx.skcipher.key, ctx->key, DES_KEY_SIZE);
476 return mv_cesa_skcipher_queue_req(req, tmpl);
479 static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
481 struct mv_cesa_op_ctx tmpl;
483 mv_cesa_set_op_cfg(&tmpl,
484 CESA_SA_DESC_CFG_CRYPTCM_ECB |
485 CESA_SA_DESC_CFG_DIR_ENC);
487 return mv_cesa_des_op(req, &tmpl);
490 static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req)
492 struct mv_cesa_op_ctx tmpl;
494 mv_cesa_set_op_cfg(&tmpl,
495 CESA_SA_DESC_CFG_CRYPTCM_ECB |
496 CESA_SA_DESC_CFG_DIR_DEC);
498 return mv_cesa_des_op(req, &tmpl);
501 struct skcipher_alg mv_cesa_ecb_des_alg = {
502 .setkey = mv_cesa_des_setkey,
503 .encrypt = mv_cesa_ecb_des_encrypt,
504 .decrypt = mv_cesa_ecb_des_decrypt,
505 .min_keysize = DES_KEY_SIZE,
506 .max_keysize = DES_KEY_SIZE,
507 .base = {
508 .cra_name = "ecb(des)",
509 .cra_driver_name = "mv-ecb-des",
510 .cra_priority = 300,
511 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
512 .cra_blocksize = DES_BLOCK_SIZE,
513 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
514 .cra_alignmask = 0,
515 .cra_module = THIS_MODULE,
516 .cra_init = mv_cesa_skcipher_cra_init,
517 .cra_exit = mv_cesa_skcipher_cra_exit,
521 static int mv_cesa_cbc_des_op(struct skcipher_request *req,
522 struct mv_cesa_op_ctx *tmpl)
524 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
525 CESA_SA_DESC_CFG_CRYPTCM_MSK);
527 memcpy(tmpl->ctx.skcipher.iv, req->iv, DES_BLOCK_SIZE);
529 return mv_cesa_des_op(req, tmpl);
532 static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
534 struct mv_cesa_op_ctx tmpl;
536 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
538 return mv_cesa_cbc_des_op(req, &tmpl);
541 static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req)
543 struct mv_cesa_op_ctx tmpl;
545 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
547 return mv_cesa_cbc_des_op(req, &tmpl);
550 struct skcipher_alg mv_cesa_cbc_des_alg = {
551 .setkey = mv_cesa_des_setkey,
552 .encrypt = mv_cesa_cbc_des_encrypt,
553 .decrypt = mv_cesa_cbc_des_decrypt,
554 .min_keysize = DES_KEY_SIZE,
555 .max_keysize = DES_KEY_SIZE,
556 .ivsize = DES_BLOCK_SIZE,
557 .base = {
558 .cra_name = "cbc(des)",
559 .cra_driver_name = "mv-cbc-des",
560 .cra_priority = 300,
561 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
562 .cra_blocksize = DES_BLOCK_SIZE,
563 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
564 .cra_alignmask = 0,
565 .cra_module = THIS_MODULE,
566 .cra_init = mv_cesa_skcipher_cra_init,
567 .cra_exit = mv_cesa_skcipher_cra_exit,
571 static int mv_cesa_des3_op(struct skcipher_request *req,
572 struct mv_cesa_op_ctx *tmpl)
574 struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
576 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
577 CESA_SA_DESC_CFG_CRYPTM_MSK);
579 memcpy(tmpl->ctx.skcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
581 return mv_cesa_skcipher_queue_req(req, tmpl);
584 static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
586 struct mv_cesa_op_ctx tmpl;
588 mv_cesa_set_op_cfg(&tmpl,
589 CESA_SA_DESC_CFG_CRYPTCM_ECB |
590 CESA_SA_DESC_CFG_3DES_EDE |
591 CESA_SA_DESC_CFG_DIR_ENC);
593 return mv_cesa_des3_op(req, &tmpl);
596 static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req)
598 struct mv_cesa_op_ctx tmpl;
600 mv_cesa_set_op_cfg(&tmpl,
601 CESA_SA_DESC_CFG_CRYPTCM_ECB |
602 CESA_SA_DESC_CFG_3DES_EDE |
603 CESA_SA_DESC_CFG_DIR_DEC);
605 return mv_cesa_des3_op(req, &tmpl);
608 struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
609 .setkey = mv_cesa_des3_ede_setkey,
610 .encrypt = mv_cesa_ecb_des3_ede_encrypt,
611 .decrypt = mv_cesa_ecb_des3_ede_decrypt,
612 .min_keysize = DES3_EDE_KEY_SIZE,
613 .max_keysize = DES3_EDE_KEY_SIZE,
614 .ivsize = DES3_EDE_BLOCK_SIZE,
615 .base = {
616 .cra_name = "ecb(des3_ede)",
617 .cra_driver_name = "mv-ecb-des3-ede",
618 .cra_priority = 300,
619 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
620 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
621 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
622 .cra_alignmask = 0,
623 .cra_module = THIS_MODULE,
624 .cra_init = mv_cesa_skcipher_cra_init,
625 .cra_exit = mv_cesa_skcipher_cra_exit,
629 static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
630 struct mv_cesa_op_ctx *tmpl)
632 memcpy(tmpl->ctx.skcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
634 return mv_cesa_des3_op(req, tmpl);
637 static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
639 struct mv_cesa_op_ctx tmpl;
641 mv_cesa_set_op_cfg(&tmpl,
642 CESA_SA_DESC_CFG_CRYPTCM_CBC |
643 CESA_SA_DESC_CFG_3DES_EDE |
644 CESA_SA_DESC_CFG_DIR_ENC);
646 return mv_cesa_cbc_des3_op(req, &tmpl);
649 static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req)
651 struct mv_cesa_op_ctx tmpl;
653 mv_cesa_set_op_cfg(&tmpl,
654 CESA_SA_DESC_CFG_CRYPTCM_CBC |
655 CESA_SA_DESC_CFG_3DES_EDE |
656 CESA_SA_DESC_CFG_DIR_DEC);
658 return mv_cesa_cbc_des3_op(req, &tmpl);
661 struct skcipher_alg mv_cesa_cbc_des3_ede_alg = {
662 .setkey = mv_cesa_des3_ede_setkey,
663 .encrypt = mv_cesa_cbc_des3_ede_encrypt,
664 .decrypt = mv_cesa_cbc_des3_ede_decrypt,
665 .min_keysize = DES3_EDE_KEY_SIZE,
666 .max_keysize = DES3_EDE_KEY_SIZE,
667 .ivsize = DES3_EDE_BLOCK_SIZE,
668 .base = {
669 .cra_name = "cbc(des3_ede)",
670 .cra_driver_name = "mv-cbc-des3-ede",
671 .cra_priority = 300,
672 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
673 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
674 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
675 .cra_alignmask = 0,
676 .cra_module = THIS_MODULE,
677 .cra_init = mv_cesa_skcipher_cra_init,
678 .cra_exit = mv_cesa_skcipher_cra_exit,
682 static int mv_cesa_aes_op(struct skcipher_request *req,
683 struct mv_cesa_op_ctx *tmpl)
685 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
686 int i;
687 u32 *key;
688 u32 cfg;
690 cfg = CESA_SA_DESC_CFG_CRYPTM_AES;
692 if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC)
693 key = ctx->aes.key_dec;
694 else
695 key = ctx->aes.key_enc;
697 for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++)
698 tmpl->ctx.skcipher.key[i] = cpu_to_le32(key[i]);
700 if (ctx->aes.key_length == 24)
701 cfg |= CESA_SA_DESC_CFG_AES_LEN_192;
702 else if (ctx->aes.key_length == 32)
703 cfg |= CESA_SA_DESC_CFG_AES_LEN_256;
705 mv_cesa_update_op_cfg(tmpl, cfg,
706 CESA_SA_DESC_CFG_CRYPTM_MSK |
707 CESA_SA_DESC_CFG_AES_LEN_MSK);
709 return mv_cesa_skcipher_queue_req(req, tmpl);
712 static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
714 struct mv_cesa_op_ctx tmpl;
716 mv_cesa_set_op_cfg(&tmpl,
717 CESA_SA_DESC_CFG_CRYPTCM_ECB |
718 CESA_SA_DESC_CFG_DIR_ENC);
720 return mv_cesa_aes_op(req, &tmpl);
723 static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req)
725 struct mv_cesa_op_ctx tmpl;
727 mv_cesa_set_op_cfg(&tmpl,
728 CESA_SA_DESC_CFG_CRYPTCM_ECB |
729 CESA_SA_DESC_CFG_DIR_DEC);
731 return mv_cesa_aes_op(req, &tmpl);
734 struct skcipher_alg mv_cesa_ecb_aes_alg = {
735 .setkey = mv_cesa_aes_setkey,
736 .encrypt = mv_cesa_ecb_aes_encrypt,
737 .decrypt = mv_cesa_ecb_aes_decrypt,
738 .min_keysize = AES_MIN_KEY_SIZE,
739 .max_keysize = AES_MAX_KEY_SIZE,
740 .base = {
741 .cra_name = "ecb(aes)",
742 .cra_driver_name = "mv-ecb-aes",
743 .cra_priority = 300,
744 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
745 .cra_blocksize = AES_BLOCK_SIZE,
746 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
747 .cra_alignmask = 0,
748 .cra_module = THIS_MODULE,
749 .cra_init = mv_cesa_skcipher_cra_init,
750 .cra_exit = mv_cesa_skcipher_cra_exit,
754 static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
755 struct mv_cesa_op_ctx *tmpl)
757 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
758 CESA_SA_DESC_CFG_CRYPTCM_MSK);
759 memcpy(tmpl->ctx.skcipher.iv, req->iv, AES_BLOCK_SIZE);
761 return mv_cesa_aes_op(req, tmpl);
764 static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
766 struct mv_cesa_op_ctx tmpl;
768 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
770 return mv_cesa_cbc_aes_op(req, &tmpl);
773 static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req)
775 struct mv_cesa_op_ctx tmpl;
777 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
779 return mv_cesa_cbc_aes_op(req, &tmpl);
782 struct skcipher_alg mv_cesa_cbc_aes_alg = {
783 .setkey = mv_cesa_aes_setkey,
784 .encrypt = mv_cesa_cbc_aes_encrypt,
785 .decrypt = mv_cesa_cbc_aes_decrypt,
786 .min_keysize = AES_MIN_KEY_SIZE,
787 .max_keysize = AES_MAX_KEY_SIZE,
788 .ivsize = AES_BLOCK_SIZE,
789 .base = {
790 .cra_name = "cbc(aes)",
791 .cra_driver_name = "mv-cbc-aes",
792 .cra_priority = 300,
793 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
794 .cra_blocksize = AES_BLOCK_SIZE,
795 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
796 .cra_alignmask = 0,
797 .cra_module = THIS_MODULE,
798 .cra_init = mv_cesa_skcipher_cra_init,
799 .cra_exit = mv_cesa_skcipher_cra_exit,