sh_eth: fix EESIPR values for SH77{34|63}
[linux/fpc-iii.git] / drivers / crypto / marvell / cipher.c
blob098871a22a54f05e86057a2f22c0432640008ddf
1 /*
2 * Cipher algorithms supported by the CESA: DES, 3DES and AES.
4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
5 * Author: Arnaud Ebalard <arno@natisbad.org>
7 * This work is based on an initial version written by
8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
15 #include <crypto/aes.h>
16 #include <crypto/des.h>
18 #include "cesa.h"
20 struct mv_cesa_des_ctx {
21 struct mv_cesa_ctx base;
22 u8 key[DES_KEY_SIZE];
25 struct mv_cesa_des3_ctx {
26 struct mv_cesa_ctx base;
27 u8 key[DES3_EDE_KEY_SIZE];
30 struct mv_cesa_aes_ctx {
31 struct mv_cesa_ctx base;
32 struct crypto_aes_ctx aes;
35 struct mv_cesa_ablkcipher_dma_iter {
36 struct mv_cesa_dma_iter base;
37 struct mv_cesa_sg_dma_iter src;
38 struct mv_cesa_sg_dma_iter dst;
41 static inline void
42 mv_cesa_ablkcipher_req_iter_init(struct mv_cesa_ablkcipher_dma_iter *iter,
43 struct ablkcipher_request *req)
45 mv_cesa_req_dma_iter_init(&iter->base, req->nbytes);
46 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
47 mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
50 static inline bool
51 mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter)
53 iter->src.op_offset = 0;
54 iter->dst.op_offset = 0;
56 return mv_cesa_req_dma_iter_next_op(&iter->base);
59 static inline void
60 mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req)
62 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
64 if (req->dst != req->src) {
65 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
66 DMA_FROM_DEVICE);
67 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
68 DMA_TO_DEVICE);
69 } else {
70 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
71 DMA_BIDIRECTIONAL);
73 mv_cesa_dma_cleanup(&creq->base);
76 static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req)
78 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
80 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
81 mv_cesa_ablkcipher_dma_cleanup(req);
84 static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
86 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
87 struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
88 struct mv_cesa_engine *engine = creq->base.engine;
89 size_t len = min_t(size_t, req->nbytes - sreq->offset,
90 CESA_SA_SRAM_PAYLOAD_SIZE);
92 mv_cesa_adjust_op(engine, &sreq->op);
93 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
95 len = sg_pcopy_to_buffer(req->src, creq->src_nents,
96 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
97 len, sreq->offset);
99 sreq->size = len;
100 mv_cesa_set_crypt_op_len(&sreq->op, len);
102 /* FIXME: only update enc_len field */
103 if (!sreq->skip_ctx) {
104 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
105 sreq->skip_ctx = true;
106 } else {
107 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc));
110 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
111 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
112 BUG_ON(readl(engine->regs + CESA_SA_CMD) &
113 CESA_SA_CMD_EN_CESA_SA_ACCL0);
114 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
117 static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req,
118 u32 status)
120 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
121 struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
122 struct mv_cesa_engine *engine = creq->base.engine;
123 size_t len;
125 len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
126 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
127 sreq->size, sreq->offset);
129 sreq->offset += len;
130 if (sreq->offset < req->nbytes)
131 return -EINPROGRESS;
133 return 0;
136 static int mv_cesa_ablkcipher_process(struct crypto_async_request *req,
137 u32 status)
139 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
140 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
141 struct mv_cesa_req *basereq = &creq->base;
143 if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
144 return mv_cesa_ablkcipher_std_process(ablkreq, status);
146 return mv_cesa_dma_process(basereq, status);
149 static void mv_cesa_ablkcipher_step(struct crypto_async_request *req)
151 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
152 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
154 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
155 mv_cesa_dma_step(&creq->base);
156 else
157 mv_cesa_ablkcipher_std_step(ablkreq);
160 static inline void
161 mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req)
163 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
164 struct mv_cesa_req *basereq = &creq->base;
166 mv_cesa_dma_prepare(basereq, basereq->engine);
169 static inline void
170 mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req)
172 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
173 struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
175 sreq->size = 0;
176 sreq->offset = 0;
179 static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
180 struct mv_cesa_engine *engine)
182 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
183 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
184 creq->base.engine = engine;
186 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
187 mv_cesa_ablkcipher_dma_prepare(ablkreq);
188 else
189 mv_cesa_ablkcipher_std_prepare(ablkreq);
192 static inline void
193 mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req)
195 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
197 mv_cesa_ablkcipher_cleanup(ablkreq);
200 static void
201 mv_cesa_ablkcipher_complete(struct crypto_async_request *req)
203 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
204 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
205 struct mv_cesa_engine *engine = creq->base.engine;
206 unsigned int ivsize;
208 atomic_sub(ablkreq->nbytes, &engine->load);
209 ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq));
211 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
212 struct mv_cesa_req *basereq;
214 basereq = &creq->base;
215 memcpy(ablkreq->info, basereq->chain.last->op->ctx.blkcipher.iv,
216 ivsize);
217 } else {
218 memcpy_fromio(ablkreq->info,
219 engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
220 ivsize);
224 static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = {
225 .step = mv_cesa_ablkcipher_step,
226 .process = mv_cesa_ablkcipher_process,
227 .cleanup = mv_cesa_ablkcipher_req_cleanup,
228 .complete = mv_cesa_ablkcipher_complete,
231 static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm)
233 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
235 ctx->base.ops = &mv_cesa_ablkcipher_req_ops;
237 tfm->crt_ablkcipher.reqsize = sizeof(struct mv_cesa_ablkcipher_req);
239 return 0;
242 static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
243 unsigned int len)
245 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
246 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
247 int remaining;
248 int offset;
249 int ret;
250 int i;
252 ret = crypto_aes_expand_key(&ctx->aes, key, len);
253 if (ret) {
254 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
255 return ret;
258 remaining = (ctx->aes.key_length - 16) / 4;
259 offset = ctx->aes.key_length + 24 - remaining;
260 for (i = 0; i < remaining; i++)
261 ctx->aes.key_dec[4 + i] =
262 cpu_to_le32(ctx->aes.key_enc[offset + i]);
264 return 0;
267 static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
268 unsigned int len)
270 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
271 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
272 u32 tmp[DES_EXPKEY_WORDS];
273 int ret;
275 if (len != DES_KEY_SIZE) {
276 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
277 return -EINVAL;
280 ret = des_ekey(tmp, key);
281 if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
282 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
283 return -EINVAL;
286 memcpy(ctx->key, key, DES_KEY_SIZE);
288 return 0;
291 static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher,
292 const u8 *key, unsigned int len)
294 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
295 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
297 if (len != DES3_EDE_KEY_SIZE) {
298 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
299 return -EINVAL;
302 memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
304 return 0;
307 static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
308 const struct mv_cesa_op_ctx *op_templ)
310 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
311 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
312 GFP_KERNEL : GFP_ATOMIC;
313 struct mv_cesa_req *basereq = &creq->base;
314 struct mv_cesa_ablkcipher_dma_iter iter;
315 bool skip_ctx = false;
316 int ret;
317 unsigned int ivsize;
319 basereq->chain.first = NULL;
320 basereq->chain.last = NULL;
322 if (req->src != req->dst) {
323 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
324 DMA_TO_DEVICE);
325 if (!ret)
326 return -ENOMEM;
328 ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents,
329 DMA_FROM_DEVICE);
330 if (!ret) {
331 ret = -ENOMEM;
332 goto err_unmap_src;
334 } else {
335 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
336 DMA_BIDIRECTIONAL);
337 if (!ret)
338 return -ENOMEM;
341 mv_cesa_tdma_desc_iter_init(&basereq->chain);
342 mv_cesa_ablkcipher_req_iter_init(&iter, req);
344 do {
345 struct mv_cesa_op_ctx *op;
347 op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx, flags);
348 if (IS_ERR(op)) {
349 ret = PTR_ERR(op);
350 goto err_free_tdma;
352 skip_ctx = true;
354 mv_cesa_set_crypt_op_len(op, iter.base.op_len);
356 /* Add input transfers */
357 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
358 &iter.src, flags);
359 if (ret)
360 goto err_free_tdma;
362 /* Add dummy desc to launch the crypto operation */
363 ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags);
364 if (ret)
365 goto err_free_tdma;
367 /* Add output transfers */
368 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
369 &iter.dst, flags);
370 if (ret)
371 goto err_free_tdma;
373 } while (mv_cesa_ablkcipher_req_iter_next_op(&iter));
375 /* Add output data for IV */
376 ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
377 ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
378 CESA_SA_DATA_SRAM_OFFSET,
379 CESA_TDMA_SRC_IN_SRAM, flags);
381 if (ret)
382 goto err_free_tdma;
384 basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
386 return 0;
388 err_free_tdma:
389 mv_cesa_dma_cleanup(basereq);
390 if (req->dst != req->src)
391 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
392 DMA_FROM_DEVICE);
394 err_unmap_src:
395 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
396 req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
398 return ret;
401 static inline int
402 mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req,
403 const struct mv_cesa_op_ctx *op_templ)
405 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
406 struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
407 struct mv_cesa_req *basereq = &creq->base;
409 sreq->op = *op_templ;
410 sreq->skip_ctx = false;
411 basereq->chain.first = NULL;
412 basereq->chain.last = NULL;
414 return 0;
417 static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
418 struct mv_cesa_op_ctx *tmpl)
420 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
421 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
422 unsigned int blksize = crypto_ablkcipher_blocksize(tfm);
423 int ret;
425 if (!IS_ALIGNED(req->nbytes, blksize))
426 return -EINVAL;
428 creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
429 if (creq->src_nents < 0) {
430 dev_err(cesa_dev->dev, "Invalid number of src SG");
431 return creq->src_nents;
433 creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
434 if (creq->dst_nents < 0) {
435 dev_err(cesa_dev->dev, "Invalid number of dst SG");
436 return creq->dst_nents;
439 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
440 CESA_SA_DESC_CFG_OP_MSK);
442 if (cesa_dev->caps->has_tdma)
443 ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl);
444 else
445 ret = mv_cesa_ablkcipher_std_req_init(req, tmpl);
447 return ret;
450 static int mv_cesa_ablkcipher_queue_req(struct ablkcipher_request *req,
451 struct mv_cesa_op_ctx *tmpl)
453 int ret;
454 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
455 struct mv_cesa_engine *engine;
457 ret = mv_cesa_ablkcipher_req_init(req, tmpl);
458 if (ret)
459 return ret;
461 engine = mv_cesa_select_engine(req->nbytes);
462 mv_cesa_ablkcipher_prepare(&req->base, engine);
464 ret = mv_cesa_queue_req(&req->base, &creq->base);
466 if (mv_cesa_req_needs_cleanup(&req->base, ret))
467 mv_cesa_ablkcipher_cleanup(req);
469 return ret;
472 static int mv_cesa_des_op(struct ablkcipher_request *req,
473 struct mv_cesa_op_ctx *tmpl)
475 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
477 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
478 CESA_SA_DESC_CFG_CRYPTM_MSK);
480 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
482 return mv_cesa_ablkcipher_queue_req(req, tmpl);
485 static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req)
487 struct mv_cesa_op_ctx tmpl;
489 mv_cesa_set_op_cfg(&tmpl,
490 CESA_SA_DESC_CFG_CRYPTCM_ECB |
491 CESA_SA_DESC_CFG_DIR_ENC);
493 return mv_cesa_des_op(req, &tmpl);
496 static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req)
498 struct mv_cesa_op_ctx tmpl;
500 mv_cesa_set_op_cfg(&tmpl,
501 CESA_SA_DESC_CFG_CRYPTCM_ECB |
502 CESA_SA_DESC_CFG_DIR_DEC);
504 return mv_cesa_des_op(req, &tmpl);
507 struct crypto_alg mv_cesa_ecb_des_alg = {
508 .cra_name = "ecb(des)",
509 .cra_driver_name = "mv-ecb-des",
510 .cra_priority = 300,
511 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
512 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
513 .cra_blocksize = DES_BLOCK_SIZE,
514 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
515 .cra_alignmask = 0,
516 .cra_type = &crypto_ablkcipher_type,
517 .cra_module = THIS_MODULE,
518 .cra_init = mv_cesa_ablkcipher_cra_init,
519 .cra_u = {
520 .ablkcipher = {
521 .min_keysize = DES_KEY_SIZE,
522 .max_keysize = DES_KEY_SIZE,
523 .setkey = mv_cesa_des_setkey,
524 .encrypt = mv_cesa_ecb_des_encrypt,
525 .decrypt = mv_cesa_ecb_des_decrypt,
530 static int mv_cesa_cbc_des_op(struct ablkcipher_request *req,
531 struct mv_cesa_op_ctx *tmpl)
533 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
534 CESA_SA_DESC_CFG_CRYPTCM_MSK);
536 memcpy(tmpl->ctx.blkcipher.iv, req->info, DES_BLOCK_SIZE);
538 return mv_cesa_des_op(req, tmpl);
541 static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req)
543 struct mv_cesa_op_ctx tmpl;
545 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
547 return mv_cesa_cbc_des_op(req, &tmpl);
550 static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req)
552 struct mv_cesa_op_ctx tmpl;
554 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
556 return mv_cesa_cbc_des_op(req, &tmpl);
559 struct crypto_alg mv_cesa_cbc_des_alg = {
560 .cra_name = "cbc(des)",
561 .cra_driver_name = "mv-cbc-des",
562 .cra_priority = 300,
563 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
564 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
565 .cra_blocksize = DES_BLOCK_SIZE,
566 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
567 .cra_alignmask = 0,
568 .cra_type = &crypto_ablkcipher_type,
569 .cra_module = THIS_MODULE,
570 .cra_init = mv_cesa_ablkcipher_cra_init,
571 .cra_u = {
572 .ablkcipher = {
573 .min_keysize = DES_KEY_SIZE,
574 .max_keysize = DES_KEY_SIZE,
575 .ivsize = DES_BLOCK_SIZE,
576 .setkey = mv_cesa_des_setkey,
577 .encrypt = mv_cesa_cbc_des_encrypt,
578 .decrypt = mv_cesa_cbc_des_decrypt,
583 static int mv_cesa_des3_op(struct ablkcipher_request *req,
584 struct mv_cesa_op_ctx *tmpl)
586 struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
588 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
589 CESA_SA_DESC_CFG_CRYPTM_MSK);
591 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
593 return mv_cesa_ablkcipher_queue_req(req, tmpl);
596 static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req)
598 struct mv_cesa_op_ctx tmpl;
600 mv_cesa_set_op_cfg(&tmpl,
601 CESA_SA_DESC_CFG_CRYPTCM_ECB |
602 CESA_SA_DESC_CFG_3DES_EDE |
603 CESA_SA_DESC_CFG_DIR_ENC);
605 return mv_cesa_des3_op(req, &tmpl);
608 static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req)
610 struct mv_cesa_op_ctx tmpl;
612 mv_cesa_set_op_cfg(&tmpl,
613 CESA_SA_DESC_CFG_CRYPTCM_ECB |
614 CESA_SA_DESC_CFG_3DES_EDE |
615 CESA_SA_DESC_CFG_DIR_DEC);
617 return mv_cesa_des3_op(req, &tmpl);
620 struct crypto_alg mv_cesa_ecb_des3_ede_alg = {
621 .cra_name = "ecb(des3_ede)",
622 .cra_driver_name = "mv-ecb-des3-ede",
623 .cra_priority = 300,
624 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
625 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
626 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
627 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
628 .cra_alignmask = 0,
629 .cra_type = &crypto_ablkcipher_type,
630 .cra_module = THIS_MODULE,
631 .cra_init = mv_cesa_ablkcipher_cra_init,
632 .cra_u = {
633 .ablkcipher = {
634 .min_keysize = DES3_EDE_KEY_SIZE,
635 .max_keysize = DES3_EDE_KEY_SIZE,
636 .ivsize = DES3_EDE_BLOCK_SIZE,
637 .setkey = mv_cesa_des3_ede_setkey,
638 .encrypt = mv_cesa_ecb_des3_ede_encrypt,
639 .decrypt = mv_cesa_ecb_des3_ede_decrypt,
644 static int mv_cesa_cbc_des3_op(struct ablkcipher_request *req,
645 struct mv_cesa_op_ctx *tmpl)
647 memcpy(tmpl->ctx.blkcipher.iv, req->info, DES3_EDE_BLOCK_SIZE);
649 return mv_cesa_des3_op(req, tmpl);
652 static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req)
654 struct mv_cesa_op_ctx tmpl;
656 mv_cesa_set_op_cfg(&tmpl,
657 CESA_SA_DESC_CFG_CRYPTCM_CBC |
658 CESA_SA_DESC_CFG_3DES_EDE |
659 CESA_SA_DESC_CFG_DIR_ENC);
661 return mv_cesa_cbc_des3_op(req, &tmpl);
664 static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req)
666 struct mv_cesa_op_ctx tmpl;
668 mv_cesa_set_op_cfg(&tmpl,
669 CESA_SA_DESC_CFG_CRYPTCM_CBC |
670 CESA_SA_DESC_CFG_3DES_EDE |
671 CESA_SA_DESC_CFG_DIR_DEC);
673 return mv_cesa_cbc_des3_op(req, &tmpl);
676 struct crypto_alg mv_cesa_cbc_des3_ede_alg = {
677 .cra_name = "cbc(des3_ede)",
678 .cra_driver_name = "mv-cbc-des3-ede",
679 .cra_priority = 300,
680 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
681 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
682 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
683 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
684 .cra_alignmask = 0,
685 .cra_type = &crypto_ablkcipher_type,
686 .cra_module = THIS_MODULE,
687 .cra_init = mv_cesa_ablkcipher_cra_init,
688 .cra_u = {
689 .ablkcipher = {
690 .min_keysize = DES3_EDE_KEY_SIZE,
691 .max_keysize = DES3_EDE_KEY_SIZE,
692 .ivsize = DES3_EDE_BLOCK_SIZE,
693 .setkey = mv_cesa_des3_ede_setkey,
694 .encrypt = mv_cesa_cbc_des3_ede_encrypt,
695 .decrypt = mv_cesa_cbc_des3_ede_decrypt,
700 static int mv_cesa_aes_op(struct ablkcipher_request *req,
701 struct mv_cesa_op_ctx *tmpl)
703 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
704 int i;
705 u32 *key;
706 u32 cfg;
708 cfg = CESA_SA_DESC_CFG_CRYPTM_AES;
710 if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC)
711 key = ctx->aes.key_dec;
712 else
713 key = ctx->aes.key_enc;
715 for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++)
716 tmpl->ctx.blkcipher.key[i] = cpu_to_le32(key[i]);
718 if (ctx->aes.key_length == 24)
719 cfg |= CESA_SA_DESC_CFG_AES_LEN_192;
720 else if (ctx->aes.key_length == 32)
721 cfg |= CESA_SA_DESC_CFG_AES_LEN_256;
723 mv_cesa_update_op_cfg(tmpl, cfg,
724 CESA_SA_DESC_CFG_CRYPTM_MSK |
725 CESA_SA_DESC_CFG_AES_LEN_MSK);
727 return mv_cesa_ablkcipher_queue_req(req, tmpl);
730 static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req)
732 struct mv_cesa_op_ctx tmpl;
734 mv_cesa_set_op_cfg(&tmpl,
735 CESA_SA_DESC_CFG_CRYPTCM_ECB |
736 CESA_SA_DESC_CFG_DIR_ENC);
738 return mv_cesa_aes_op(req, &tmpl);
741 static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req)
743 struct mv_cesa_op_ctx tmpl;
745 mv_cesa_set_op_cfg(&tmpl,
746 CESA_SA_DESC_CFG_CRYPTCM_ECB |
747 CESA_SA_DESC_CFG_DIR_DEC);
749 return mv_cesa_aes_op(req, &tmpl);
752 struct crypto_alg mv_cesa_ecb_aes_alg = {
753 .cra_name = "ecb(aes)",
754 .cra_driver_name = "mv-ecb-aes",
755 .cra_priority = 300,
756 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
757 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
758 .cra_blocksize = AES_BLOCK_SIZE,
759 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
760 .cra_alignmask = 0,
761 .cra_type = &crypto_ablkcipher_type,
762 .cra_module = THIS_MODULE,
763 .cra_init = mv_cesa_ablkcipher_cra_init,
764 .cra_u = {
765 .ablkcipher = {
766 .min_keysize = AES_MIN_KEY_SIZE,
767 .max_keysize = AES_MAX_KEY_SIZE,
768 .setkey = mv_cesa_aes_setkey,
769 .encrypt = mv_cesa_ecb_aes_encrypt,
770 .decrypt = mv_cesa_ecb_aes_decrypt,
775 static int mv_cesa_cbc_aes_op(struct ablkcipher_request *req,
776 struct mv_cesa_op_ctx *tmpl)
778 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
779 CESA_SA_DESC_CFG_CRYPTCM_MSK);
780 memcpy(tmpl->ctx.blkcipher.iv, req->info, AES_BLOCK_SIZE);
782 return mv_cesa_aes_op(req, tmpl);
785 static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req)
787 struct mv_cesa_op_ctx tmpl;
789 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
791 return mv_cesa_cbc_aes_op(req, &tmpl);
794 static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req)
796 struct mv_cesa_op_ctx tmpl;
798 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
800 return mv_cesa_cbc_aes_op(req, &tmpl);
803 struct crypto_alg mv_cesa_cbc_aes_alg = {
804 .cra_name = "cbc(aes)",
805 .cra_driver_name = "mv-cbc-aes",
806 .cra_priority = 300,
807 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
808 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
809 .cra_blocksize = AES_BLOCK_SIZE,
810 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
811 .cra_alignmask = 0,
812 .cra_type = &crypto_ablkcipher_type,
813 .cra_module = THIS_MODULE,
814 .cra_init = mv_cesa_ablkcipher_cra_init,
815 .cra_u = {
816 .ablkcipher = {
817 .min_keysize = AES_MIN_KEY_SIZE,
818 .max_keysize = AES_MAX_KEY_SIZE,
819 .ivsize = AES_BLOCK_SIZE,
820 .setkey = mv_cesa_aes_setkey,
821 .encrypt = mv_cesa_cbc_aes_encrypt,
822 .decrypt = mv_cesa_cbc_aes_decrypt,