2 * Cipher algorithms supported by the CESA: DES, 3DES and AES.
4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
5 * Author: Arnaud Ebalard <arno@natisbad.org>
7 * This work is based on an initial version written by
8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
15 #include <crypto/aes.h>
16 #include <crypto/des.h>
20 struct mv_cesa_des_ctx
{
21 struct mv_cesa_ctx base
;
25 struct mv_cesa_des3_ctx
{
26 struct mv_cesa_ctx base
;
27 u8 key
[DES3_EDE_KEY_SIZE
];
30 struct mv_cesa_aes_ctx
{
31 struct mv_cesa_ctx base
;
32 struct crypto_aes_ctx aes
;
35 struct mv_cesa_ablkcipher_dma_iter
{
36 struct mv_cesa_dma_iter base
;
37 struct mv_cesa_sg_dma_iter src
;
38 struct mv_cesa_sg_dma_iter dst
;
42 mv_cesa_ablkcipher_req_iter_init(struct mv_cesa_ablkcipher_dma_iter
*iter
,
43 struct ablkcipher_request
*req
)
45 mv_cesa_req_dma_iter_init(&iter
->base
, req
->nbytes
);
46 mv_cesa_sg_dma_iter_init(&iter
->src
, req
->src
, DMA_TO_DEVICE
);
47 mv_cesa_sg_dma_iter_init(&iter
->dst
, req
->dst
, DMA_FROM_DEVICE
);
51 mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter
*iter
)
53 iter
->src
.op_offset
= 0;
54 iter
->dst
.op_offset
= 0;
56 return mv_cesa_req_dma_iter_next_op(&iter
->base
);
60 mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request
*req
)
62 struct mv_cesa_ablkcipher_req
*creq
= ablkcipher_request_ctx(req
);
64 if (req
->dst
!= req
->src
) {
65 dma_unmap_sg(cesa_dev
->dev
, req
->dst
, creq
->dst_nents
,
67 dma_unmap_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
,
70 dma_unmap_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
,
73 mv_cesa_dma_cleanup(&creq
->req
.dma
);
76 static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request
*req
)
78 struct mv_cesa_ablkcipher_req
*creq
= ablkcipher_request_ctx(req
);
80 if (creq
->req
.base
.type
== CESA_DMA_REQ
)
81 mv_cesa_ablkcipher_dma_cleanup(req
);
84 static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request
*req
)
86 struct mv_cesa_ablkcipher_req
*creq
= ablkcipher_request_ctx(req
);
87 struct mv_cesa_ablkcipher_std_req
*sreq
= &creq
->req
.std
;
88 struct mv_cesa_engine
*engine
= sreq
->base
.engine
;
89 size_t len
= min_t(size_t, req
->nbytes
- sreq
->offset
,
90 CESA_SA_SRAM_PAYLOAD_SIZE
);
92 len
= sg_pcopy_to_buffer(req
->src
, creq
->src_nents
,
93 engine
->sram
+ CESA_SA_DATA_SRAM_OFFSET
,
97 mv_cesa_set_crypt_op_len(&sreq
->op
, len
);
99 /* FIXME: only update enc_len field */
100 if (!sreq
->skip_ctx
) {
101 memcpy_toio(engine
->sram
, &sreq
->op
, sizeof(sreq
->op
));
102 sreq
->skip_ctx
= true;
104 memcpy_toio(engine
->sram
, &sreq
->op
, sizeof(sreq
->op
.desc
));
107 mv_cesa_set_int_mask(engine
, CESA_SA_INT_ACCEL0_DONE
);
108 writel_relaxed(CESA_SA_CFG_PARA_DIS
, engine
->regs
+ CESA_SA_CFG
);
109 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0
, engine
->regs
+ CESA_SA_CMD
);
112 static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request
*req
,
115 struct mv_cesa_ablkcipher_req
*creq
= ablkcipher_request_ctx(req
);
116 struct mv_cesa_ablkcipher_std_req
*sreq
= &creq
->req
.std
;
117 struct mv_cesa_engine
*engine
= sreq
->base
.engine
;
120 len
= sg_pcopy_from_buffer(req
->dst
, creq
->dst_nents
,
121 engine
->sram
+ CESA_SA_DATA_SRAM_OFFSET
,
122 sreq
->size
, sreq
->offset
);
125 if (sreq
->offset
< req
->nbytes
)
131 static int mv_cesa_ablkcipher_process(struct crypto_async_request
*req
,
134 struct ablkcipher_request
*ablkreq
= ablkcipher_request_cast(req
);
135 struct mv_cesa_ablkcipher_req
*creq
= ablkcipher_request_ctx(ablkreq
);
136 struct mv_cesa_ablkcipher_std_req
*sreq
= &creq
->req
.std
;
137 struct mv_cesa_engine
*engine
= sreq
->base
.engine
;
140 if (creq
->req
.base
.type
== CESA_DMA_REQ
)
141 ret
= mv_cesa_dma_process(&creq
->req
.dma
, status
);
143 ret
= mv_cesa_ablkcipher_std_process(ablkreq
, status
);
148 memcpy_fromio(ablkreq
->info
,
149 engine
->sram
+ CESA_SA_CRYPT_IV_SRAM_OFFSET
,
150 crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq
)));
155 static void mv_cesa_ablkcipher_step(struct crypto_async_request
*req
)
157 struct ablkcipher_request
*ablkreq
= ablkcipher_request_cast(req
);
158 struct mv_cesa_ablkcipher_req
*creq
= ablkcipher_request_ctx(ablkreq
);
160 if (creq
->req
.base
.type
== CESA_DMA_REQ
)
161 mv_cesa_dma_step(&creq
->req
.dma
);
163 mv_cesa_ablkcipher_std_step(ablkreq
);
167 mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request
*req
)
169 struct mv_cesa_ablkcipher_req
*creq
= ablkcipher_request_ctx(req
);
170 struct mv_cesa_tdma_req
*dreq
= &creq
->req
.dma
;
172 mv_cesa_dma_prepare(dreq
, dreq
->base
.engine
);
176 mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request
*req
)
178 struct mv_cesa_ablkcipher_req
*creq
= ablkcipher_request_ctx(req
);
179 struct mv_cesa_ablkcipher_std_req
*sreq
= &creq
->req
.std
;
180 struct mv_cesa_engine
*engine
= sreq
->base
.engine
;
184 mv_cesa_adjust_op(engine
, &sreq
->op
);
185 memcpy_toio(engine
->sram
, &sreq
->op
, sizeof(sreq
->op
));
188 static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request
*req
,
189 struct mv_cesa_engine
*engine
)
191 struct ablkcipher_request
*ablkreq
= ablkcipher_request_cast(req
);
192 struct mv_cesa_ablkcipher_req
*creq
= ablkcipher_request_ctx(ablkreq
);
193 creq
->req
.base
.engine
= engine
;
195 if (creq
->req
.base
.type
== CESA_DMA_REQ
)
196 mv_cesa_ablkcipher_dma_prepare(ablkreq
);
198 mv_cesa_ablkcipher_std_prepare(ablkreq
);
202 mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request
*req
)
204 struct ablkcipher_request
*ablkreq
= ablkcipher_request_cast(req
);
206 mv_cesa_ablkcipher_cleanup(ablkreq
);
209 static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops
= {
210 .step
= mv_cesa_ablkcipher_step
,
211 .process
= mv_cesa_ablkcipher_process
,
212 .prepare
= mv_cesa_ablkcipher_prepare
,
213 .cleanup
= mv_cesa_ablkcipher_req_cleanup
,
216 static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm
*tfm
)
218 struct mv_cesa_aes_ctx
*ctx
= crypto_tfm_ctx(tfm
);
220 ctx
->base
.ops
= &mv_cesa_ablkcipher_req_ops
;
222 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct mv_cesa_ablkcipher_req
);
227 static int mv_cesa_aes_setkey(struct crypto_ablkcipher
*cipher
, const u8
*key
,
230 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(cipher
);
231 struct mv_cesa_aes_ctx
*ctx
= crypto_tfm_ctx(tfm
);
237 ret
= crypto_aes_expand_key(&ctx
->aes
, key
, len
);
239 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
243 remaining
= (ctx
->aes
.key_length
- 16) / 4;
244 offset
= ctx
->aes
.key_length
+ 24 - remaining
;
245 for (i
= 0; i
< remaining
; i
++)
246 ctx
->aes
.key_dec
[4 + i
] =
247 cpu_to_le32(ctx
->aes
.key_enc
[offset
+ i
]);
252 static int mv_cesa_des_setkey(struct crypto_ablkcipher
*cipher
, const u8
*key
,
255 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(cipher
);
256 struct mv_cesa_des_ctx
*ctx
= crypto_tfm_ctx(tfm
);
257 u32 tmp
[DES_EXPKEY_WORDS
];
260 if (len
!= DES_KEY_SIZE
) {
261 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
265 ret
= des_ekey(tmp
, key
);
266 if (!ret
&& (tfm
->crt_flags
& CRYPTO_TFM_REQ_WEAK_KEY
)) {
267 tfm
->crt_flags
|= CRYPTO_TFM_RES_WEAK_KEY
;
271 memcpy(ctx
->key
, key
, DES_KEY_SIZE
);
276 static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher
*cipher
,
277 const u8
*key
, unsigned int len
)
279 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(cipher
);
280 struct mv_cesa_des_ctx
*ctx
= crypto_tfm_ctx(tfm
);
282 if (len
!= DES3_EDE_KEY_SIZE
) {
283 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
287 memcpy(ctx
->key
, key
, DES3_EDE_KEY_SIZE
);
292 static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request
*req
,
293 const struct mv_cesa_op_ctx
*op_templ
)
295 struct mv_cesa_ablkcipher_req
*creq
= ablkcipher_request_ctx(req
);
296 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
297 GFP_KERNEL
: GFP_ATOMIC
;
298 struct mv_cesa_tdma_req
*dreq
= &creq
->req
.dma
;
299 struct mv_cesa_ablkcipher_dma_iter iter
;
300 struct mv_cesa_tdma_chain chain
;
301 bool skip_ctx
= false;
304 dreq
->base
.type
= CESA_DMA_REQ
;
305 dreq
->chain
.first
= NULL
;
306 dreq
->chain
.last
= NULL
;
308 if (req
->src
!= req
->dst
) {
309 ret
= dma_map_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
,
314 ret
= dma_map_sg(cesa_dev
->dev
, req
->dst
, creq
->dst_nents
,
321 ret
= dma_map_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
,
327 mv_cesa_tdma_desc_iter_init(&chain
);
328 mv_cesa_ablkcipher_req_iter_init(&iter
, req
);
331 struct mv_cesa_op_ctx
*op
;
333 op
= mv_cesa_dma_add_op(&chain
, op_templ
, skip_ctx
, flags
);
340 mv_cesa_set_crypt_op_len(op
, iter
.base
.op_len
);
342 /* Add input transfers */
343 ret
= mv_cesa_dma_add_op_transfers(&chain
, &iter
.base
,
348 /* Add dummy desc to launch the crypto operation */
349 ret
= mv_cesa_dma_add_dummy_launch(&chain
, flags
);
353 /* Add output transfers */
354 ret
= mv_cesa_dma_add_op_transfers(&chain
, &iter
.base
,
359 } while (mv_cesa_ablkcipher_req_iter_next_op(&iter
));
366 mv_cesa_dma_cleanup(dreq
);
367 if (req
->dst
!= req
->src
)
368 dma_unmap_sg(cesa_dev
->dev
, req
->dst
, creq
->dst_nents
,
372 dma_unmap_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
,
373 req
->dst
!= req
->src
? DMA_TO_DEVICE
: DMA_BIDIRECTIONAL
);
379 mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request
*req
,
380 const struct mv_cesa_op_ctx
*op_templ
)
382 struct mv_cesa_ablkcipher_req
*creq
= ablkcipher_request_ctx(req
);
383 struct mv_cesa_ablkcipher_std_req
*sreq
= &creq
->req
.std
;
385 sreq
->base
.type
= CESA_STD_REQ
;
386 sreq
->op
= *op_templ
;
387 sreq
->skip_ctx
= false;
392 static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request
*req
,
393 struct mv_cesa_op_ctx
*tmpl
)
395 struct mv_cesa_ablkcipher_req
*creq
= ablkcipher_request_ctx(req
);
396 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
397 unsigned int blksize
= crypto_ablkcipher_blocksize(tfm
);
400 if (!IS_ALIGNED(req
->nbytes
, blksize
))
403 creq
->src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
404 creq
->dst_nents
= sg_nents_for_len(req
->dst
, req
->nbytes
);
406 mv_cesa_update_op_cfg(tmpl
, CESA_SA_DESC_CFG_OP_CRYPT_ONLY
,
407 CESA_SA_DESC_CFG_OP_MSK
);
409 /* TODO: add a threshold for DMA usage */
410 if (cesa_dev
->caps
->has_tdma
)
411 ret
= mv_cesa_ablkcipher_dma_req_init(req
, tmpl
);
413 ret
= mv_cesa_ablkcipher_std_req_init(req
, tmpl
);
418 static int mv_cesa_des_op(struct ablkcipher_request
*req
,
419 struct mv_cesa_op_ctx
*tmpl
)
421 struct mv_cesa_des_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
424 mv_cesa_update_op_cfg(tmpl
, CESA_SA_DESC_CFG_CRYPTM_DES
,
425 CESA_SA_DESC_CFG_CRYPTM_MSK
);
427 memcpy(tmpl
->ctx
.blkcipher
.key
, ctx
->key
, DES_KEY_SIZE
);
429 ret
= mv_cesa_ablkcipher_req_init(req
, tmpl
);
433 ret
= mv_cesa_queue_req(&req
->base
);
434 if (mv_cesa_req_needs_cleanup(&req
->base
, ret
))
435 mv_cesa_ablkcipher_cleanup(req
);
440 static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request
*req
)
442 struct mv_cesa_op_ctx tmpl
;
444 mv_cesa_set_op_cfg(&tmpl
,
445 CESA_SA_DESC_CFG_CRYPTCM_ECB
|
446 CESA_SA_DESC_CFG_DIR_ENC
);
448 return mv_cesa_des_op(req
, &tmpl
);
451 static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request
*req
)
453 struct mv_cesa_op_ctx tmpl
;
455 mv_cesa_set_op_cfg(&tmpl
,
456 CESA_SA_DESC_CFG_CRYPTCM_ECB
|
457 CESA_SA_DESC_CFG_DIR_DEC
);
459 return mv_cesa_des_op(req
, &tmpl
);
462 struct crypto_alg mv_cesa_ecb_des_alg
= {
463 .cra_name
= "ecb(des)",
464 .cra_driver_name
= "mv-ecb-des",
466 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
467 CRYPTO_ALG_KERN_DRIVER_ONLY
| CRYPTO_ALG_ASYNC
,
468 .cra_blocksize
= DES_BLOCK_SIZE
,
469 .cra_ctxsize
= sizeof(struct mv_cesa_des_ctx
),
471 .cra_type
= &crypto_ablkcipher_type
,
472 .cra_module
= THIS_MODULE
,
473 .cra_init
= mv_cesa_ablkcipher_cra_init
,
476 .min_keysize
= DES_KEY_SIZE
,
477 .max_keysize
= DES_KEY_SIZE
,
478 .setkey
= mv_cesa_des_setkey
,
479 .encrypt
= mv_cesa_ecb_des_encrypt
,
480 .decrypt
= mv_cesa_ecb_des_decrypt
,
485 static int mv_cesa_cbc_des_op(struct ablkcipher_request
*req
,
486 struct mv_cesa_op_ctx
*tmpl
)
488 mv_cesa_update_op_cfg(tmpl
, CESA_SA_DESC_CFG_CRYPTCM_CBC
,
489 CESA_SA_DESC_CFG_CRYPTCM_MSK
);
491 memcpy(tmpl
->ctx
.blkcipher
.iv
, req
->info
, DES_BLOCK_SIZE
);
493 return mv_cesa_des_op(req
, tmpl
);
496 static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request
*req
)
498 struct mv_cesa_op_ctx tmpl
;
500 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_DIR_ENC
);
502 return mv_cesa_cbc_des_op(req
, &tmpl
);
505 static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request
*req
)
507 struct mv_cesa_op_ctx tmpl
;
509 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_DIR_DEC
);
511 return mv_cesa_cbc_des_op(req
, &tmpl
);
514 struct crypto_alg mv_cesa_cbc_des_alg
= {
515 .cra_name
= "cbc(des)",
516 .cra_driver_name
= "mv-cbc-des",
518 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
519 CRYPTO_ALG_KERN_DRIVER_ONLY
| CRYPTO_ALG_ASYNC
,
520 .cra_blocksize
= DES_BLOCK_SIZE
,
521 .cra_ctxsize
= sizeof(struct mv_cesa_des_ctx
),
523 .cra_type
= &crypto_ablkcipher_type
,
524 .cra_module
= THIS_MODULE
,
525 .cra_init
= mv_cesa_ablkcipher_cra_init
,
528 .min_keysize
= DES_KEY_SIZE
,
529 .max_keysize
= DES_KEY_SIZE
,
530 .ivsize
= DES_BLOCK_SIZE
,
531 .setkey
= mv_cesa_des_setkey
,
532 .encrypt
= mv_cesa_cbc_des_encrypt
,
533 .decrypt
= mv_cesa_cbc_des_decrypt
,
538 static int mv_cesa_des3_op(struct ablkcipher_request
*req
,
539 struct mv_cesa_op_ctx
*tmpl
)
541 struct mv_cesa_des3_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
544 mv_cesa_update_op_cfg(tmpl
, CESA_SA_DESC_CFG_CRYPTM_3DES
,
545 CESA_SA_DESC_CFG_CRYPTM_MSK
);
547 memcpy(tmpl
->ctx
.blkcipher
.key
, ctx
->key
, DES3_EDE_KEY_SIZE
);
549 ret
= mv_cesa_ablkcipher_req_init(req
, tmpl
);
553 ret
= mv_cesa_queue_req(&req
->base
);
554 if (mv_cesa_req_needs_cleanup(&req
->base
, ret
))
555 mv_cesa_ablkcipher_cleanup(req
);
560 static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request
*req
)
562 struct mv_cesa_op_ctx tmpl
;
564 mv_cesa_set_op_cfg(&tmpl
,
565 CESA_SA_DESC_CFG_CRYPTCM_ECB
|
566 CESA_SA_DESC_CFG_3DES_EDE
|
567 CESA_SA_DESC_CFG_DIR_ENC
);
569 return mv_cesa_des3_op(req
, &tmpl
);
572 static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request
*req
)
574 struct mv_cesa_op_ctx tmpl
;
576 mv_cesa_set_op_cfg(&tmpl
,
577 CESA_SA_DESC_CFG_CRYPTCM_ECB
|
578 CESA_SA_DESC_CFG_3DES_EDE
|
579 CESA_SA_DESC_CFG_DIR_DEC
);
581 return mv_cesa_des3_op(req
, &tmpl
);
584 struct crypto_alg mv_cesa_ecb_des3_ede_alg
= {
585 .cra_name
= "ecb(des3_ede)",
586 .cra_driver_name
= "mv-ecb-des3-ede",
588 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
589 CRYPTO_ALG_KERN_DRIVER_ONLY
| CRYPTO_ALG_ASYNC
,
590 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
591 .cra_ctxsize
= sizeof(struct mv_cesa_des3_ctx
),
593 .cra_type
= &crypto_ablkcipher_type
,
594 .cra_module
= THIS_MODULE
,
595 .cra_init
= mv_cesa_ablkcipher_cra_init
,
598 .min_keysize
= DES3_EDE_KEY_SIZE
,
599 .max_keysize
= DES3_EDE_KEY_SIZE
,
600 .ivsize
= DES3_EDE_BLOCK_SIZE
,
601 .setkey
= mv_cesa_des3_ede_setkey
,
602 .encrypt
= mv_cesa_ecb_des3_ede_encrypt
,
603 .decrypt
= mv_cesa_ecb_des3_ede_decrypt
,
608 static int mv_cesa_cbc_des3_op(struct ablkcipher_request
*req
,
609 struct mv_cesa_op_ctx
*tmpl
)
611 memcpy(tmpl
->ctx
.blkcipher
.iv
, req
->info
, DES3_EDE_BLOCK_SIZE
);
613 return mv_cesa_des3_op(req
, tmpl
);
616 static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request
*req
)
618 struct mv_cesa_op_ctx tmpl
;
620 mv_cesa_set_op_cfg(&tmpl
,
621 CESA_SA_DESC_CFG_CRYPTCM_CBC
|
622 CESA_SA_DESC_CFG_3DES_EDE
|
623 CESA_SA_DESC_CFG_DIR_ENC
);
625 return mv_cesa_cbc_des3_op(req
, &tmpl
);
628 static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request
*req
)
630 struct mv_cesa_op_ctx tmpl
;
632 mv_cesa_set_op_cfg(&tmpl
,
633 CESA_SA_DESC_CFG_CRYPTCM_CBC
|
634 CESA_SA_DESC_CFG_3DES_EDE
|
635 CESA_SA_DESC_CFG_DIR_DEC
);
637 return mv_cesa_cbc_des3_op(req
, &tmpl
);
640 struct crypto_alg mv_cesa_cbc_des3_ede_alg
= {
641 .cra_name
= "cbc(des3_ede)",
642 .cra_driver_name
= "mv-cbc-des3-ede",
644 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
645 CRYPTO_ALG_KERN_DRIVER_ONLY
| CRYPTO_ALG_ASYNC
,
646 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
647 .cra_ctxsize
= sizeof(struct mv_cesa_des3_ctx
),
649 .cra_type
= &crypto_ablkcipher_type
,
650 .cra_module
= THIS_MODULE
,
651 .cra_init
= mv_cesa_ablkcipher_cra_init
,
654 .min_keysize
= DES3_EDE_KEY_SIZE
,
655 .max_keysize
= DES3_EDE_KEY_SIZE
,
656 .ivsize
= DES3_EDE_BLOCK_SIZE
,
657 .setkey
= mv_cesa_des3_ede_setkey
,
658 .encrypt
= mv_cesa_cbc_des3_ede_encrypt
,
659 .decrypt
= mv_cesa_cbc_des3_ede_decrypt
,
664 static int mv_cesa_aes_op(struct ablkcipher_request
*req
,
665 struct mv_cesa_op_ctx
*tmpl
)
667 struct mv_cesa_aes_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
672 cfg
= CESA_SA_DESC_CFG_CRYPTM_AES
;
674 if (mv_cesa_get_op_cfg(tmpl
) & CESA_SA_DESC_CFG_DIR_DEC
)
675 key
= ctx
->aes
.key_dec
;
677 key
= ctx
->aes
.key_enc
;
679 for (i
= 0; i
< ctx
->aes
.key_length
/ sizeof(u32
); i
++)
680 tmpl
->ctx
.blkcipher
.key
[i
] = cpu_to_le32(key
[i
]);
682 if (ctx
->aes
.key_length
== 24)
683 cfg
|= CESA_SA_DESC_CFG_AES_LEN_192
;
684 else if (ctx
->aes
.key_length
== 32)
685 cfg
|= CESA_SA_DESC_CFG_AES_LEN_256
;
687 mv_cesa_update_op_cfg(tmpl
, cfg
,
688 CESA_SA_DESC_CFG_CRYPTM_MSK
|
689 CESA_SA_DESC_CFG_AES_LEN_MSK
);
691 ret
= mv_cesa_ablkcipher_req_init(req
, tmpl
);
695 ret
= mv_cesa_queue_req(&req
->base
);
696 if (mv_cesa_req_needs_cleanup(&req
->base
, ret
))
697 mv_cesa_ablkcipher_cleanup(req
);
702 static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request
*req
)
704 struct mv_cesa_op_ctx tmpl
;
706 mv_cesa_set_op_cfg(&tmpl
,
707 CESA_SA_DESC_CFG_CRYPTCM_ECB
|
708 CESA_SA_DESC_CFG_DIR_ENC
);
710 return mv_cesa_aes_op(req
, &tmpl
);
713 static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request
*req
)
715 struct mv_cesa_op_ctx tmpl
;
717 mv_cesa_set_op_cfg(&tmpl
,
718 CESA_SA_DESC_CFG_CRYPTCM_ECB
|
719 CESA_SA_DESC_CFG_DIR_DEC
);
721 return mv_cesa_aes_op(req
, &tmpl
);
724 struct crypto_alg mv_cesa_ecb_aes_alg
= {
725 .cra_name
= "ecb(aes)",
726 .cra_driver_name
= "mv-ecb-aes",
728 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
729 CRYPTO_ALG_KERN_DRIVER_ONLY
| CRYPTO_ALG_ASYNC
,
730 .cra_blocksize
= AES_BLOCK_SIZE
,
731 .cra_ctxsize
= sizeof(struct mv_cesa_aes_ctx
),
733 .cra_type
= &crypto_ablkcipher_type
,
734 .cra_module
= THIS_MODULE
,
735 .cra_init
= mv_cesa_ablkcipher_cra_init
,
738 .min_keysize
= AES_MIN_KEY_SIZE
,
739 .max_keysize
= AES_MAX_KEY_SIZE
,
740 .setkey
= mv_cesa_aes_setkey
,
741 .encrypt
= mv_cesa_ecb_aes_encrypt
,
742 .decrypt
= mv_cesa_ecb_aes_decrypt
,
747 static int mv_cesa_cbc_aes_op(struct ablkcipher_request
*req
,
748 struct mv_cesa_op_ctx
*tmpl
)
750 mv_cesa_update_op_cfg(tmpl
, CESA_SA_DESC_CFG_CRYPTCM_CBC
,
751 CESA_SA_DESC_CFG_CRYPTCM_MSK
);
752 memcpy(tmpl
->ctx
.blkcipher
.iv
, req
->info
, AES_BLOCK_SIZE
);
754 return mv_cesa_aes_op(req
, tmpl
);
757 static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request
*req
)
759 struct mv_cesa_op_ctx tmpl
;
761 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_DIR_ENC
);
763 return mv_cesa_cbc_aes_op(req
, &tmpl
);
766 static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request
*req
)
768 struct mv_cesa_op_ctx tmpl
;
770 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_DIR_DEC
);
772 return mv_cesa_cbc_aes_op(req
, &tmpl
);
775 struct crypto_alg mv_cesa_cbc_aes_alg
= {
776 .cra_name
= "cbc(aes)",
777 .cra_driver_name
= "mv-cbc-aes",
779 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
780 CRYPTO_ALG_KERN_DRIVER_ONLY
| CRYPTO_ALG_ASYNC
,
781 .cra_blocksize
= AES_BLOCK_SIZE
,
782 .cra_ctxsize
= sizeof(struct mv_cesa_aes_ctx
),
784 .cra_type
= &crypto_ablkcipher_type
,
785 .cra_module
= THIS_MODULE
,
786 .cra_init
= mv_cesa_ablkcipher_cra_init
,
789 .min_keysize
= AES_MIN_KEY_SIZE
,
790 .max_keysize
= AES_MAX_KEY_SIZE
,
791 .ivsize
= AES_BLOCK_SIZE
,
792 .setkey
= mv_cesa_aes_setkey
,
793 .encrypt
= mv_cesa_cbc_aes_encrypt
,
794 .decrypt
= mv_cesa_cbc_aes_decrypt
,