1 // SPDX-License-Identifier: GPL-2.0-only
3 * Cipher algorithms supported by the CESA: DES, 3DES and AES.
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 * Author: Arnaud Ebalard <arno@natisbad.org>
8 * This work is based on an initial version written by
9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
12 #include <crypto/aes.h>
13 #include <crypto/internal/des.h>
14 #include <linux/device.h>
15 #include <linux/dma-mapping.h>
19 struct mv_cesa_des_ctx
{
20 struct mv_cesa_ctx base
;
24 struct mv_cesa_des3_ctx
{
25 struct mv_cesa_ctx base
;
26 u8 key
[DES3_EDE_KEY_SIZE
];
29 struct mv_cesa_aes_ctx
{
30 struct mv_cesa_ctx base
;
31 struct crypto_aes_ctx aes
;
34 struct mv_cesa_skcipher_dma_iter
{
35 struct mv_cesa_dma_iter base
;
36 struct mv_cesa_sg_dma_iter src
;
37 struct mv_cesa_sg_dma_iter dst
;
41 mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter
*iter
,
42 struct skcipher_request
*req
)
44 mv_cesa_req_dma_iter_init(&iter
->base
, req
->cryptlen
);
45 mv_cesa_sg_dma_iter_init(&iter
->src
, req
->src
, DMA_TO_DEVICE
);
46 mv_cesa_sg_dma_iter_init(&iter
->dst
, req
->dst
, DMA_FROM_DEVICE
);
50 mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter
*iter
)
52 iter
->src
.op_offset
= 0;
53 iter
->dst
.op_offset
= 0;
55 return mv_cesa_req_dma_iter_next_op(&iter
->base
);
59 mv_cesa_skcipher_dma_cleanup(struct skcipher_request
*req
)
61 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(req
);
63 if (req
->dst
!= req
->src
) {
64 dma_unmap_sg(cesa_dev
->dev
, req
->dst
, creq
->dst_nents
,
66 dma_unmap_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
,
69 dma_unmap_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
,
72 mv_cesa_dma_cleanup(&creq
->base
);
75 static inline void mv_cesa_skcipher_cleanup(struct skcipher_request
*req
)
77 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(req
);
79 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
)
80 mv_cesa_skcipher_dma_cleanup(req
);
83 static void mv_cesa_skcipher_std_step(struct skcipher_request
*req
)
85 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(req
);
86 struct mv_cesa_skcipher_std_req
*sreq
= &creq
->std
;
87 struct mv_cesa_engine
*engine
= creq
->base
.engine
;
88 size_t len
= min_t(size_t, req
->cryptlen
- sreq
->offset
,
89 CESA_SA_SRAM_PAYLOAD_SIZE
);
91 mv_cesa_adjust_op(engine
, &sreq
->op
);
92 memcpy_toio(engine
->sram
, &sreq
->op
, sizeof(sreq
->op
));
94 len
= sg_pcopy_to_buffer(req
->src
, creq
->src_nents
,
95 engine
->sram
+ CESA_SA_DATA_SRAM_OFFSET
,
99 mv_cesa_set_crypt_op_len(&sreq
->op
, len
);
101 /* FIXME: only update enc_len field */
102 if (!sreq
->skip_ctx
) {
103 memcpy_toio(engine
->sram
, &sreq
->op
, sizeof(sreq
->op
));
104 sreq
->skip_ctx
= true;
106 memcpy_toio(engine
->sram
, &sreq
->op
, sizeof(sreq
->op
.desc
));
109 mv_cesa_set_int_mask(engine
, CESA_SA_INT_ACCEL0_DONE
);
110 writel_relaxed(CESA_SA_CFG_PARA_DIS
, engine
->regs
+ CESA_SA_CFG
);
111 WARN_ON(readl(engine
->regs
+ CESA_SA_CMD
) &
112 CESA_SA_CMD_EN_CESA_SA_ACCL0
);
113 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0
, engine
->regs
+ CESA_SA_CMD
);
116 static int mv_cesa_skcipher_std_process(struct skcipher_request
*req
,
119 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(req
);
120 struct mv_cesa_skcipher_std_req
*sreq
= &creq
->std
;
121 struct mv_cesa_engine
*engine
= creq
->base
.engine
;
124 len
= sg_pcopy_from_buffer(req
->dst
, creq
->dst_nents
,
125 engine
->sram
+ CESA_SA_DATA_SRAM_OFFSET
,
126 sreq
->size
, sreq
->offset
);
129 if (sreq
->offset
< req
->cryptlen
)
135 static int mv_cesa_skcipher_process(struct crypto_async_request
*req
,
138 struct skcipher_request
*skreq
= skcipher_request_cast(req
);
139 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(skreq
);
140 struct mv_cesa_req
*basereq
= &creq
->base
;
142 if (mv_cesa_req_get_type(basereq
) == CESA_STD_REQ
)
143 return mv_cesa_skcipher_std_process(skreq
, status
);
145 return mv_cesa_dma_process(basereq
, status
);
148 static void mv_cesa_skcipher_step(struct crypto_async_request
*req
)
150 struct skcipher_request
*skreq
= skcipher_request_cast(req
);
151 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(skreq
);
153 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
)
154 mv_cesa_dma_step(&creq
->base
);
156 mv_cesa_skcipher_std_step(skreq
);
160 mv_cesa_skcipher_dma_prepare(struct skcipher_request
*req
)
162 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(req
);
163 struct mv_cesa_req
*basereq
= &creq
->base
;
165 mv_cesa_dma_prepare(basereq
, basereq
->engine
);
169 mv_cesa_skcipher_std_prepare(struct skcipher_request
*req
)
171 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(req
);
172 struct mv_cesa_skcipher_std_req
*sreq
= &creq
->std
;
178 static inline void mv_cesa_skcipher_prepare(struct crypto_async_request
*req
,
179 struct mv_cesa_engine
*engine
)
181 struct skcipher_request
*skreq
= skcipher_request_cast(req
);
182 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(skreq
);
184 creq
->base
.engine
= engine
;
186 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
)
187 mv_cesa_skcipher_dma_prepare(skreq
);
189 mv_cesa_skcipher_std_prepare(skreq
);
193 mv_cesa_skcipher_req_cleanup(struct crypto_async_request
*req
)
195 struct skcipher_request
*skreq
= skcipher_request_cast(req
);
197 mv_cesa_skcipher_cleanup(skreq
);
201 mv_cesa_skcipher_complete(struct crypto_async_request
*req
)
203 struct skcipher_request
*skreq
= skcipher_request_cast(req
);
204 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(skreq
);
205 struct mv_cesa_engine
*engine
= creq
->base
.engine
;
208 atomic_sub(skreq
->cryptlen
, &engine
->load
);
209 ivsize
= crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq
));
211 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
) {
212 struct mv_cesa_req
*basereq
;
214 basereq
= &creq
->base
;
215 memcpy(skreq
->iv
, basereq
->chain
.last
->op
->ctx
.skcipher
.iv
,
218 memcpy_fromio(skreq
->iv
,
219 engine
->sram
+ CESA_SA_CRYPT_IV_SRAM_OFFSET
,
224 static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops
= {
225 .step
= mv_cesa_skcipher_step
,
226 .process
= mv_cesa_skcipher_process
,
227 .cleanup
= mv_cesa_skcipher_req_cleanup
,
228 .complete
= mv_cesa_skcipher_complete
,
231 static void mv_cesa_skcipher_cra_exit(struct crypto_tfm
*tfm
)
233 void *ctx
= crypto_tfm_ctx(tfm
);
235 memzero_explicit(ctx
, tfm
->__crt_alg
->cra_ctxsize
);
238 static int mv_cesa_skcipher_cra_init(struct crypto_tfm
*tfm
)
240 struct mv_cesa_ctx
*ctx
= crypto_tfm_ctx(tfm
);
242 ctx
->ops
= &mv_cesa_skcipher_req_ops
;
244 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm
),
245 sizeof(struct mv_cesa_skcipher_req
));
250 static int mv_cesa_aes_setkey(struct crypto_skcipher
*cipher
, const u8
*key
,
253 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(cipher
);
254 struct mv_cesa_aes_ctx
*ctx
= crypto_tfm_ctx(tfm
);
260 ret
= aes_expandkey(&ctx
->aes
, key
, len
);
264 remaining
= (ctx
->aes
.key_length
- 16) / 4;
265 offset
= ctx
->aes
.key_length
+ 24 - remaining
;
266 for (i
= 0; i
< remaining
; i
++)
267 ctx
->aes
.key_dec
[4 + i
] = ctx
->aes
.key_enc
[offset
+ i
];
272 static int mv_cesa_des_setkey(struct crypto_skcipher
*cipher
, const u8
*key
,
275 struct mv_cesa_des_ctx
*ctx
= crypto_skcipher_ctx(cipher
);
278 err
= verify_skcipher_des_key(cipher
, key
);
282 memcpy(ctx
->key
, key
, DES_KEY_SIZE
);
287 static int mv_cesa_des3_ede_setkey(struct crypto_skcipher
*cipher
,
288 const u8
*key
, unsigned int len
)
290 struct mv_cesa_des_ctx
*ctx
= crypto_skcipher_ctx(cipher
);
293 err
= verify_skcipher_des3_key(cipher
, key
);
297 memcpy(ctx
->key
, key
, DES3_EDE_KEY_SIZE
);
302 static int mv_cesa_skcipher_dma_req_init(struct skcipher_request
*req
,
303 const struct mv_cesa_op_ctx
*op_templ
)
305 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(req
);
306 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
307 GFP_KERNEL
: GFP_ATOMIC
;
308 struct mv_cesa_req
*basereq
= &creq
->base
;
309 struct mv_cesa_skcipher_dma_iter iter
;
310 bool skip_ctx
= false;
313 basereq
->chain
.first
= NULL
;
314 basereq
->chain
.last
= NULL
;
316 if (req
->src
!= req
->dst
) {
317 ret
= dma_map_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
,
322 ret
= dma_map_sg(cesa_dev
->dev
, req
->dst
, creq
->dst_nents
,
329 ret
= dma_map_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
,
335 mv_cesa_tdma_desc_iter_init(&basereq
->chain
);
336 mv_cesa_skcipher_req_iter_init(&iter
, req
);
339 struct mv_cesa_op_ctx
*op
;
341 op
= mv_cesa_dma_add_op(&basereq
->chain
, op_templ
, skip_ctx
,
349 mv_cesa_set_crypt_op_len(op
, iter
.base
.op_len
);
351 /* Add input transfers */
352 ret
= mv_cesa_dma_add_op_transfers(&basereq
->chain
, &iter
.base
,
357 /* Add dummy desc to launch the crypto operation */
358 ret
= mv_cesa_dma_add_dummy_launch(&basereq
->chain
, flags
);
362 /* Add output transfers */
363 ret
= mv_cesa_dma_add_op_transfers(&basereq
->chain
, &iter
.base
,
368 } while (mv_cesa_skcipher_req_iter_next_op(&iter
));
370 /* Add output data for IV */
371 ret
= mv_cesa_dma_add_result_op(&basereq
->chain
,
372 CESA_SA_CFG_SRAM_OFFSET
,
373 CESA_SA_DATA_SRAM_OFFSET
,
374 CESA_TDMA_SRC_IN_SRAM
, flags
);
379 basereq
->chain
.last
->flags
|= CESA_TDMA_END_OF_REQ
;
384 mv_cesa_dma_cleanup(basereq
);
385 if (req
->dst
!= req
->src
)
386 dma_unmap_sg(cesa_dev
->dev
, req
->dst
, creq
->dst_nents
,
390 dma_unmap_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
,
391 req
->dst
!= req
->src
? DMA_TO_DEVICE
: DMA_BIDIRECTIONAL
);
397 mv_cesa_skcipher_std_req_init(struct skcipher_request
*req
,
398 const struct mv_cesa_op_ctx
*op_templ
)
400 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(req
);
401 struct mv_cesa_skcipher_std_req
*sreq
= &creq
->std
;
402 struct mv_cesa_req
*basereq
= &creq
->base
;
404 sreq
->op
= *op_templ
;
405 sreq
->skip_ctx
= false;
406 basereq
->chain
.first
= NULL
;
407 basereq
->chain
.last
= NULL
;
412 static int mv_cesa_skcipher_req_init(struct skcipher_request
*req
,
413 struct mv_cesa_op_ctx
*tmpl
)
415 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(req
);
416 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
417 unsigned int blksize
= crypto_skcipher_blocksize(tfm
);
420 if (!IS_ALIGNED(req
->cryptlen
, blksize
))
423 creq
->src_nents
= sg_nents_for_len(req
->src
, req
->cryptlen
);
424 if (creq
->src_nents
< 0) {
425 dev_err(cesa_dev
->dev
, "Invalid number of src SG");
426 return creq
->src_nents
;
428 creq
->dst_nents
= sg_nents_for_len(req
->dst
, req
->cryptlen
);
429 if (creq
->dst_nents
< 0) {
430 dev_err(cesa_dev
->dev
, "Invalid number of dst SG");
431 return creq
->dst_nents
;
434 mv_cesa_update_op_cfg(tmpl
, CESA_SA_DESC_CFG_OP_CRYPT_ONLY
,
435 CESA_SA_DESC_CFG_OP_MSK
);
437 if (cesa_dev
->caps
->has_tdma
)
438 ret
= mv_cesa_skcipher_dma_req_init(req
, tmpl
);
440 ret
= mv_cesa_skcipher_std_req_init(req
, tmpl
);
445 static int mv_cesa_skcipher_queue_req(struct skcipher_request
*req
,
446 struct mv_cesa_op_ctx
*tmpl
)
449 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(req
);
450 struct mv_cesa_engine
*engine
;
452 ret
= mv_cesa_skcipher_req_init(req
, tmpl
);
456 engine
= mv_cesa_select_engine(req
->cryptlen
);
457 mv_cesa_skcipher_prepare(&req
->base
, engine
);
459 ret
= mv_cesa_queue_req(&req
->base
, &creq
->base
);
461 if (mv_cesa_req_needs_cleanup(&req
->base
, ret
))
462 mv_cesa_skcipher_cleanup(req
);
467 static int mv_cesa_des_op(struct skcipher_request
*req
,
468 struct mv_cesa_op_ctx
*tmpl
)
470 struct mv_cesa_des_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
472 mv_cesa_update_op_cfg(tmpl
, CESA_SA_DESC_CFG_CRYPTM_DES
,
473 CESA_SA_DESC_CFG_CRYPTM_MSK
);
475 memcpy(tmpl
->ctx
.skcipher
.key
, ctx
->key
, DES_KEY_SIZE
);
477 return mv_cesa_skcipher_queue_req(req
, tmpl
);
480 static int mv_cesa_ecb_des_encrypt(struct skcipher_request
*req
)
482 struct mv_cesa_op_ctx tmpl
;
484 mv_cesa_set_op_cfg(&tmpl
,
485 CESA_SA_DESC_CFG_CRYPTCM_ECB
|
486 CESA_SA_DESC_CFG_DIR_ENC
);
488 return mv_cesa_des_op(req
, &tmpl
);
491 static int mv_cesa_ecb_des_decrypt(struct skcipher_request
*req
)
493 struct mv_cesa_op_ctx tmpl
;
495 mv_cesa_set_op_cfg(&tmpl
,
496 CESA_SA_DESC_CFG_CRYPTCM_ECB
|
497 CESA_SA_DESC_CFG_DIR_DEC
);
499 return mv_cesa_des_op(req
, &tmpl
);
502 struct skcipher_alg mv_cesa_ecb_des_alg
= {
503 .setkey
= mv_cesa_des_setkey
,
504 .encrypt
= mv_cesa_ecb_des_encrypt
,
505 .decrypt
= mv_cesa_ecb_des_decrypt
,
506 .min_keysize
= DES_KEY_SIZE
,
507 .max_keysize
= DES_KEY_SIZE
,
509 .cra_name
= "ecb(des)",
510 .cra_driver_name
= "mv-ecb-des",
512 .cra_flags
= CRYPTO_ALG_KERN_DRIVER_ONLY
| CRYPTO_ALG_ASYNC
|
513 CRYPTO_ALG_ALLOCATES_MEMORY
,
514 .cra_blocksize
= DES_BLOCK_SIZE
,
515 .cra_ctxsize
= sizeof(struct mv_cesa_des_ctx
),
517 .cra_module
= THIS_MODULE
,
518 .cra_init
= mv_cesa_skcipher_cra_init
,
519 .cra_exit
= mv_cesa_skcipher_cra_exit
,
523 static int mv_cesa_cbc_des_op(struct skcipher_request
*req
,
524 struct mv_cesa_op_ctx
*tmpl
)
526 mv_cesa_update_op_cfg(tmpl
, CESA_SA_DESC_CFG_CRYPTCM_CBC
,
527 CESA_SA_DESC_CFG_CRYPTCM_MSK
);
529 memcpy(tmpl
->ctx
.skcipher
.iv
, req
->iv
, DES_BLOCK_SIZE
);
531 return mv_cesa_des_op(req
, tmpl
);
534 static int mv_cesa_cbc_des_encrypt(struct skcipher_request
*req
)
536 struct mv_cesa_op_ctx tmpl
;
538 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_DIR_ENC
);
540 return mv_cesa_cbc_des_op(req
, &tmpl
);
543 static int mv_cesa_cbc_des_decrypt(struct skcipher_request
*req
)
545 struct mv_cesa_op_ctx tmpl
;
547 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_DIR_DEC
);
549 return mv_cesa_cbc_des_op(req
, &tmpl
);
552 struct skcipher_alg mv_cesa_cbc_des_alg
= {
553 .setkey
= mv_cesa_des_setkey
,
554 .encrypt
= mv_cesa_cbc_des_encrypt
,
555 .decrypt
= mv_cesa_cbc_des_decrypt
,
556 .min_keysize
= DES_KEY_SIZE
,
557 .max_keysize
= DES_KEY_SIZE
,
558 .ivsize
= DES_BLOCK_SIZE
,
560 .cra_name
= "cbc(des)",
561 .cra_driver_name
= "mv-cbc-des",
563 .cra_flags
= CRYPTO_ALG_KERN_DRIVER_ONLY
| CRYPTO_ALG_ASYNC
|
564 CRYPTO_ALG_ALLOCATES_MEMORY
,
565 .cra_blocksize
= DES_BLOCK_SIZE
,
566 .cra_ctxsize
= sizeof(struct mv_cesa_des_ctx
),
568 .cra_module
= THIS_MODULE
,
569 .cra_init
= mv_cesa_skcipher_cra_init
,
570 .cra_exit
= mv_cesa_skcipher_cra_exit
,
574 static int mv_cesa_des3_op(struct skcipher_request
*req
,
575 struct mv_cesa_op_ctx
*tmpl
)
577 struct mv_cesa_des3_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
579 mv_cesa_update_op_cfg(tmpl
, CESA_SA_DESC_CFG_CRYPTM_3DES
,
580 CESA_SA_DESC_CFG_CRYPTM_MSK
);
582 memcpy(tmpl
->ctx
.skcipher
.key
, ctx
->key
, DES3_EDE_KEY_SIZE
);
584 return mv_cesa_skcipher_queue_req(req
, tmpl
);
587 static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request
*req
)
589 struct mv_cesa_op_ctx tmpl
;
591 mv_cesa_set_op_cfg(&tmpl
,
592 CESA_SA_DESC_CFG_CRYPTCM_ECB
|
593 CESA_SA_DESC_CFG_3DES_EDE
|
594 CESA_SA_DESC_CFG_DIR_ENC
);
596 return mv_cesa_des3_op(req
, &tmpl
);
599 static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request
*req
)
601 struct mv_cesa_op_ctx tmpl
;
603 mv_cesa_set_op_cfg(&tmpl
,
604 CESA_SA_DESC_CFG_CRYPTCM_ECB
|
605 CESA_SA_DESC_CFG_3DES_EDE
|
606 CESA_SA_DESC_CFG_DIR_DEC
);
608 return mv_cesa_des3_op(req
, &tmpl
);
611 struct skcipher_alg mv_cesa_ecb_des3_ede_alg
= {
612 .setkey
= mv_cesa_des3_ede_setkey
,
613 .encrypt
= mv_cesa_ecb_des3_ede_encrypt
,
614 .decrypt
= mv_cesa_ecb_des3_ede_decrypt
,
615 .min_keysize
= DES3_EDE_KEY_SIZE
,
616 .max_keysize
= DES3_EDE_KEY_SIZE
,
617 .ivsize
= DES3_EDE_BLOCK_SIZE
,
619 .cra_name
= "ecb(des3_ede)",
620 .cra_driver_name
= "mv-ecb-des3-ede",
622 .cra_flags
= CRYPTO_ALG_KERN_DRIVER_ONLY
| CRYPTO_ALG_ASYNC
|
623 CRYPTO_ALG_ALLOCATES_MEMORY
,
624 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
625 .cra_ctxsize
= sizeof(struct mv_cesa_des3_ctx
),
627 .cra_module
= THIS_MODULE
,
628 .cra_init
= mv_cesa_skcipher_cra_init
,
629 .cra_exit
= mv_cesa_skcipher_cra_exit
,
633 static int mv_cesa_cbc_des3_op(struct skcipher_request
*req
,
634 struct mv_cesa_op_ctx
*tmpl
)
636 memcpy(tmpl
->ctx
.skcipher
.iv
, req
->iv
, DES3_EDE_BLOCK_SIZE
);
638 return mv_cesa_des3_op(req
, tmpl
);
641 static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request
*req
)
643 struct mv_cesa_op_ctx tmpl
;
645 mv_cesa_set_op_cfg(&tmpl
,
646 CESA_SA_DESC_CFG_CRYPTCM_CBC
|
647 CESA_SA_DESC_CFG_3DES_EDE
|
648 CESA_SA_DESC_CFG_DIR_ENC
);
650 return mv_cesa_cbc_des3_op(req
, &tmpl
);
653 static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request
*req
)
655 struct mv_cesa_op_ctx tmpl
;
657 mv_cesa_set_op_cfg(&tmpl
,
658 CESA_SA_DESC_CFG_CRYPTCM_CBC
|
659 CESA_SA_DESC_CFG_3DES_EDE
|
660 CESA_SA_DESC_CFG_DIR_DEC
);
662 return mv_cesa_cbc_des3_op(req
, &tmpl
);
665 struct skcipher_alg mv_cesa_cbc_des3_ede_alg
= {
666 .setkey
= mv_cesa_des3_ede_setkey
,
667 .encrypt
= mv_cesa_cbc_des3_ede_encrypt
,
668 .decrypt
= mv_cesa_cbc_des3_ede_decrypt
,
669 .min_keysize
= DES3_EDE_KEY_SIZE
,
670 .max_keysize
= DES3_EDE_KEY_SIZE
,
671 .ivsize
= DES3_EDE_BLOCK_SIZE
,
673 .cra_name
= "cbc(des3_ede)",
674 .cra_driver_name
= "mv-cbc-des3-ede",
676 .cra_flags
= CRYPTO_ALG_KERN_DRIVER_ONLY
| CRYPTO_ALG_ASYNC
|
677 CRYPTO_ALG_ALLOCATES_MEMORY
,
678 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
679 .cra_ctxsize
= sizeof(struct mv_cesa_des3_ctx
),
681 .cra_module
= THIS_MODULE
,
682 .cra_init
= mv_cesa_skcipher_cra_init
,
683 .cra_exit
= mv_cesa_skcipher_cra_exit
,
687 static int mv_cesa_aes_op(struct skcipher_request
*req
,
688 struct mv_cesa_op_ctx
*tmpl
)
690 struct mv_cesa_aes_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
695 cfg
= CESA_SA_DESC_CFG_CRYPTM_AES
;
697 if (mv_cesa_get_op_cfg(tmpl
) & CESA_SA_DESC_CFG_DIR_DEC
)
698 key
= ctx
->aes
.key_dec
;
700 key
= ctx
->aes
.key_enc
;
702 for (i
= 0; i
< ctx
->aes
.key_length
/ sizeof(u32
); i
++)
703 tmpl
->ctx
.skcipher
.key
[i
] = cpu_to_le32(key
[i
]);
705 if (ctx
->aes
.key_length
== 24)
706 cfg
|= CESA_SA_DESC_CFG_AES_LEN_192
;
707 else if (ctx
->aes
.key_length
== 32)
708 cfg
|= CESA_SA_DESC_CFG_AES_LEN_256
;
710 mv_cesa_update_op_cfg(tmpl
, cfg
,
711 CESA_SA_DESC_CFG_CRYPTM_MSK
|
712 CESA_SA_DESC_CFG_AES_LEN_MSK
);
714 return mv_cesa_skcipher_queue_req(req
, tmpl
);
717 static int mv_cesa_ecb_aes_encrypt(struct skcipher_request
*req
)
719 struct mv_cesa_op_ctx tmpl
;
721 mv_cesa_set_op_cfg(&tmpl
,
722 CESA_SA_DESC_CFG_CRYPTCM_ECB
|
723 CESA_SA_DESC_CFG_DIR_ENC
);
725 return mv_cesa_aes_op(req
, &tmpl
);
728 static int mv_cesa_ecb_aes_decrypt(struct skcipher_request
*req
)
730 struct mv_cesa_op_ctx tmpl
;
732 mv_cesa_set_op_cfg(&tmpl
,
733 CESA_SA_DESC_CFG_CRYPTCM_ECB
|
734 CESA_SA_DESC_CFG_DIR_DEC
);
736 return mv_cesa_aes_op(req
, &tmpl
);
739 struct skcipher_alg mv_cesa_ecb_aes_alg
= {
740 .setkey
= mv_cesa_aes_setkey
,
741 .encrypt
= mv_cesa_ecb_aes_encrypt
,
742 .decrypt
= mv_cesa_ecb_aes_decrypt
,
743 .min_keysize
= AES_MIN_KEY_SIZE
,
744 .max_keysize
= AES_MAX_KEY_SIZE
,
746 .cra_name
= "ecb(aes)",
747 .cra_driver_name
= "mv-ecb-aes",
749 .cra_flags
= CRYPTO_ALG_KERN_DRIVER_ONLY
| CRYPTO_ALG_ASYNC
|
750 CRYPTO_ALG_ALLOCATES_MEMORY
,
751 .cra_blocksize
= AES_BLOCK_SIZE
,
752 .cra_ctxsize
= sizeof(struct mv_cesa_aes_ctx
),
754 .cra_module
= THIS_MODULE
,
755 .cra_init
= mv_cesa_skcipher_cra_init
,
756 .cra_exit
= mv_cesa_skcipher_cra_exit
,
760 static int mv_cesa_cbc_aes_op(struct skcipher_request
*req
,
761 struct mv_cesa_op_ctx
*tmpl
)
763 mv_cesa_update_op_cfg(tmpl
, CESA_SA_DESC_CFG_CRYPTCM_CBC
,
764 CESA_SA_DESC_CFG_CRYPTCM_MSK
);
765 memcpy(tmpl
->ctx
.skcipher
.iv
, req
->iv
, AES_BLOCK_SIZE
);
767 return mv_cesa_aes_op(req
, tmpl
);
770 static int mv_cesa_cbc_aes_encrypt(struct skcipher_request
*req
)
772 struct mv_cesa_op_ctx tmpl
;
774 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_DIR_ENC
);
776 return mv_cesa_cbc_aes_op(req
, &tmpl
);
779 static int mv_cesa_cbc_aes_decrypt(struct skcipher_request
*req
)
781 struct mv_cesa_op_ctx tmpl
;
783 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_DIR_DEC
);
785 return mv_cesa_cbc_aes_op(req
, &tmpl
);
788 struct skcipher_alg mv_cesa_cbc_aes_alg
= {
789 .setkey
= mv_cesa_aes_setkey
,
790 .encrypt
= mv_cesa_cbc_aes_encrypt
,
791 .decrypt
= mv_cesa_cbc_aes_decrypt
,
792 .min_keysize
= AES_MIN_KEY_SIZE
,
793 .max_keysize
= AES_MAX_KEY_SIZE
,
794 .ivsize
= AES_BLOCK_SIZE
,
796 .cra_name
= "cbc(aes)",
797 .cra_driver_name
= "mv-cbc-aes",
799 .cra_flags
= CRYPTO_ALG_KERN_DRIVER_ONLY
| CRYPTO_ALG_ASYNC
|
800 CRYPTO_ALG_ALLOCATES_MEMORY
,
801 .cra_blocksize
= AES_BLOCK_SIZE
,
802 .cra_ctxsize
= sizeof(struct mv_cesa_aes_ctx
),
804 .cra_module
= THIS_MODULE
,
805 .cra_init
= mv_cesa_skcipher_cra_init
,
806 .cra_exit
= mv_cesa_skcipher_cra_exit
,