1 // SPDX-License-Identifier: GPL-2.0-only
3 * Cipher algorithms supported by the CESA: DES, 3DES and AES.
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 * Author: Arnaud Ebalard <arno@natisbad.org>
8 * This work is based on an initial version written by
9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
12 #include <crypto/aes.h>
13 #include <crypto/internal/des.h>
14 #include <linux/device.h>
15 #include <linux/dma-mapping.h>
19 struct mv_cesa_des_ctx
{
20 struct mv_cesa_ctx base
;
24 struct mv_cesa_des3_ctx
{
25 struct mv_cesa_ctx base
;
26 u8 key
[DES3_EDE_KEY_SIZE
];
29 struct mv_cesa_aes_ctx
{
30 struct mv_cesa_ctx base
;
31 struct crypto_aes_ctx aes
;
34 struct mv_cesa_skcipher_dma_iter
{
35 struct mv_cesa_dma_iter base
;
36 struct mv_cesa_sg_dma_iter src
;
37 struct mv_cesa_sg_dma_iter dst
;
41 mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter
*iter
,
42 struct skcipher_request
*req
)
44 mv_cesa_req_dma_iter_init(&iter
->base
, req
->cryptlen
);
45 mv_cesa_sg_dma_iter_init(&iter
->src
, req
->src
, DMA_TO_DEVICE
);
46 mv_cesa_sg_dma_iter_init(&iter
->dst
, req
->dst
, DMA_FROM_DEVICE
);
50 mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter
*iter
)
52 iter
->src
.op_offset
= 0;
53 iter
->dst
.op_offset
= 0;
55 return mv_cesa_req_dma_iter_next_op(&iter
->base
);
59 mv_cesa_skcipher_dma_cleanup(struct skcipher_request
*req
)
61 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(req
);
63 if (req
->dst
!= req
->src
) {
64 dma_unmap_sg(cesa_dev
->dev
, req
->dst
, creq
->dst_nents
,
66 dma_unmap_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
,
69 dma_unmap_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
,
72 mv_cesa_dma_cleanup(&creq
->base
);
75 static inline void mv_cesa_skcipher_cleanup(struct skcipher_request
*req
)
77 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(req
);
79 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
)
80 mv_cesa_skcipher_dma_cleanup(req
);
83 static void mv_cesa_skcipher_std_step(struct skcipher_request
*req
)
85 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(req
);
86 struct mv_cesa_skcipher_std_req
*sreq
= &creq
->std
;
87 struct mv_cesa_engine
*engine
= creq
->base
.engine
;
88 size_t len
= min_t(size_t, req
->cryptlen
- sreq
->offset
,
89 CESA_SA_SRAM_PAYLOAD_SIZE
);
91 mv_cesa_adjust_op(engine
, &sreq
->op
);
93 memcpy(engine
->sram_pool
, &sreq
->op
, sizeof(sreq
->op
));
95 memcpy_toio(engine
->sram
, &sreq
->op
, sizeof(sreq
->op
));
97 len
= mv_cesa_sg_copy_to_sram(engine
, req
->src
, creq
->src_nents
,
98 CESA_SA_DATA_SRAM_OFFSET
, len
,
102 mv_cesa_set_crypt_op_len(&sreq
->op
, len
);
104 /* FIXME: only update enc_len field */
105 if (!sreq
->skip_ctx
) {
107 memcpy(engine
->sram_pool
, &sreq
->op
, sizeof(sreq
->op
));
109 memcpy_toio(engine
->sram
, &sreq
->op
, sizeof(sreq
->op
));
110 sreq
->skip_ctx
= true;
111 } else if (engine
->pool
)
112 memcpy(engine
->sram_pool
, &sreq
->op
, sizeof(sreq
->op
.desc
));
114 memcpy_toio(engine
->sram
, &sreq
->op
, sizeof(sreq
->op
.desc
));
116 mv_cesa_set_int_mask(engine
, CESA_SA_INT_ACCEL0_DONE
);
117 writel_relaxed(CESA_SA_CFG_PARA_DIS
, engine
->regs
+ CESA_SA_CFG
);
118 WARN_ON(readl(engine
->regs
+ CESA_SA_CMD
) &
119 CESA_SA_CMD_EN_CESA_SA_ACCL0
);
120 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0
, engine
->regs
+ CESA_SA_CMD
);
123 static int mv_cesa_skcipher_std_process(struct skcipher_request
*req
,
126 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(req
);
127 struct mv_cesa_skcipher_std_req
*sreq
= &creq
->std
;
128 struct mv_cesa_engine
*engine
= creq
->base
.engine
;
131 len
= mv_cesa_sg_copy_from_sram(engine
, req
->dst
, creq
->dst_nents
,
132 CESA_SA_DATA_SRAM_OFFSET
, sreq
->size
,
136 if (sreq
->offset
< req
->cryptlen
)
142 static int mv_cesa_skcipher_process(struct crypto_async_request
*req
,
145 struct skcipher_request
*skreq
= skcipher_request_cast(req
);
146 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(skreq
);
147 struct mv_cesa_req
*basereq
= &creq
->base
;
149 if (mv_cesa_req_get_type(basereq
) == CESA_STD_REQ
)
150 return mv_cesa_skcipher_std_process(skreq
, status
);
152 return mv_cesa_dma_process(basereq
, status
);
155 static void mv_cesa_skcipher_step(struct crypto_async_request
*req
)
157 struct skcipher_request
*skreq
= skcipher_request_cast(req
);
158 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(skreq
);
160 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
)
161 mv_cesa_dma_step(&creq
->base
);
163 mv_cesa_skcipher_std_step(skreq
);
167 mv_cesa_skcipher_dma_prepare(struct skcipher_request
*req
)
169 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(req
);
170 struct mv_cesa_req
*basereq
= &creq
->base
;
172 mv_cesa_dma_prepare(basereq
, basereq
->engine
);
176 mv_cesa_skcipher_std_prepare(struct skcipher_request
*req
)
178 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(req
);
179 struct mv_cesa_skcipher_std_req
*sreq
= &creq
->std
;
185 static inline void mv_cesa_skcipher_prepare(struct crypto_async_request
*req
,
186 struct mv_cesa_engine
*engine
)
188 struct skcipher_request
*skreq
= skcipher_request_cast(req
);
189 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(skreq
);
191 creq
->base
.engine
= engine
;
193 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
)
194 mv_cesa_skcipher_dma_prepare(skreq
);
196 mv_cesa_skcipher_std_prepare(skreq
);
200 mv_cesa_skcipher_req_cleanup(struct crypto_async_request
*req
)
202 struct skcipher_request
*skreq
= skcipher_request_cast(req
);
204 mv_cesa_skcipher_cleanup(skreq
);
208 mv_cesa_skcipher_complete(struct crypto_async_request
*req
)
210 struct skcipher_request
*skreq
= skcipher_request_cast(req
);
211 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(skreq
);
212 struct mv_cesa_engine
*engine
= creq
->base
.engine
;
215 atomic_sub(skreq
->cryptlen
, &engine
->load
);
216 ivsize
= crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq
));
218 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
) {
219 struct mv_cesa_req
*basereq
;
221 basereq
= &creq
->base
;
222 memcpy(skreq
->iv
, basereq
->chain
.last
->op
->ctx
.skcipher
.iv
,
224 } else if (engine
->pool
)
226 engine
->sram_pool
+ CESA_SA_CRYPT_IV_SRAM_OFFSET
,
229 memcpy_fromio(skreq
->iv
,
230 engine
->sram
+ CESA_SA_CRYPT_IV_SRAM_OFFSET
,
234 static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops
= {
235 .step
= mv_cesa_skcipher_step
,
236 .process
= mv_cesa_skcipher_process
,
237 .cleanup
= mv_cesa_skcipher_req_cleanup
,
238 .complete
= mv_cesa_skcipher_complete
,
241 static void mv_cesa_skcipher_cra_exit(struct crypto_tfm
*tfm
)
243 void *ctx
= crypto_tfm_ctx(tfm
);
245 memzero_explicit(ctx
, tfm
->__crt_alg
->cra_ctxsize
);
248 static int mv_cesa_skcipher_cra_init(struct crypto_tfm
*tfm
)
250 struct mv_cesa_ctx
*ctx
= crypto_tfm_ctx(tfm
);
252 ctx
->ops
= &mv_cesa_skcipher_req_ops
;
254 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm
),
255 sizeof(struct mv_cesa_skcipher_req
));
260 static int mv_cesa_aes_setkey(struct crypto_skcipher
*cipher
, const u8
*key
,
263 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(cipher
);
264 struct mv_cesa_aes_ctx
*ctx
= crypto_tfm_ctx(tfm
);
270 ret
= aes_expandkey(&ctx
->aes
, key
, len
);
274 remaining
= (ctx
->aes
.key_length
- 16) / 4;
275 offset
= ctx
->aes
.key_length
+ 24 - remaining
;
276 for (i
= 0; i
< remaining
; i
++)
277 ctx
->aes
.key_dec
[4 + i
] = ctx
->aes
.key_enc
[offset
+ i
];
282 static int mv_cesa_des_setkey(struct crypto_skcipher
*cipher
, const u8
*key
,
285 struct mv_cesa_des_ctx
*ctx
= crypto_skcipher_ctx(cipher
);
288 err
= verify_skcipher_des_key(cipher
, key
);
292 memcpy(ctx
->key
, key
, DES_KEY_SIZE
);
297 static int mv_cesa_des3_ede_setkey(struct crypto_skcipher
*cipher
,
298 const u8
*key
, unsigned int len
)
300 struct mv_cesa_des3_ctx
*ctx
= crypto_skcipher_ctx(cipher
);
303 err
= verify_skcipher_des3_key(cipher
, key
);
307 memcpy(ctx
->key
, key
, DES3_EDE_KEY_SIZE
);
312 static int mv_cesa_skcipher_dma_req_init(struct skcipher_request
*req
,
313 const struct mv_cesa_op_ctx
*op_templ
)
315 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(req
);
316 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
317 GFP_KERNEL
: GFP_ATOMIC
;
318 struct mv_cesa_req
*basereq
= &creq
->base
;
319 struct mv_cesa_skcipher_dma_iter iter
;
320 bool skip_ctx
= false;
323 basereq
->chain
.first
= NULL
;
324 basereq
->chain
.last
= NULL
;
326 if (req
->src
!= req
->dst
) {
327 ret
= dma_map_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
,
332 ret
= dma_map_sg(cesa_dev
->dev
, req
->dst
, creq
->dst_nents
,
339 ret
= dma_map_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
,
345 mv_cesa_tdma_desc_iter_init(&basereq
->chain
);
346 mv_cesa_skcipher_req_iter_init(&iter
, req
);
349 struct mv_cesa_op_ctx
*op
;
351 op
= mv_cesa_dma_add_op(&basereq
->chain
, op_templ
, skip_ctx
,
359 mv_cesa_set_crypt_op_len(op
, iter
.base
.op_len
);
361 /* Add input transfers */
362 ret
= mv_cesa_dma_add_op_transfers(&basereq
->chain
, &iter
.base
,
367 /* Add dummy desc to launch the crypto operation */
368 ret
= mv_cesa_dma_add_dummy_launch(&basereq
->chain
, flags
);
372 /* Add output transfers */
373 ret
= mv_cesa_dma_add_op_transfers(&basereq
->chain
, &iter
.base
,
378 } while (mv_cesa_skcipher_req_iter_next_op(&iter
));
380 /* Add output data for IV */
381 ret
= mv_cesa_dma_add_result_op(&basereq
->chain
,
382 CESA_SA_CFG_SRAM_OFFSET
,
383 CESA_SA_DATA_SRAM_OFFSET
,
384 CESA_TDMA_SRC_IN_SRAM
, flags
);
389 basereq
->chain
.last
->flags
|= CESA_TDMA_END_OF_REQ
;
394 mv_cesa_dma_cleanup(basereq
);
395 if (req
->dst
!= req
->src
)
396 dma_unmap_sg(cesa_dev
->dev
, req
->dst
, creq
->dst_nents
,
400 dma_unmap_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
,
401 req
->dst
!= req
->src
? DMA_TO_DEVICE
: DMA_BIDIRECTIONAL
);
407 mv_cesa_skcipher_std_req_init(struct skcipher_request
*req
,
408 const struct mv_cesa_op_ctx
*op_templ
)
410 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(req
);
411 struct mv_cesa_skcipher_std_req
*sreq
= &creq
->std
;
412 struct mv_cesa_req
*basereq
= &creq
->base
;
414 sreq
->op
= *op_templ
;
415 sreq
->skip_ctx
= false;
416 basereq
->chain
.first
= NULL
;
417 basereq
->chain
.last
= NULL
;
422 static int mv_cesa_skcipher_req_init(struct skcipher_request
*req
,
423 struct mv_cesa_op_ctx
*tmpl
)
425 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(req
);
426 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
427 unsigned int blksize
= crypto_skcipher_blocksize(tfm
);
430 if (!IS_ALIGNED(req
->cryptlen
, blksize
))
433 creq
->src_nents
= sg_nents_for_len(req
->src
, req
->cryptlen
);
434 if (creq
->src_nents
< 0) {
435 dev_err(cesa_dev
->dev
, "Invalid number of src SG");
436 return creq
->src_nents
;
438 creq
->dst_nents
= sg_nents_for_len(req
->dst
, req
->cryptlen
);
439 if (creq
->dst_nents
< 0) {
440 dev_err(cesa_dev
->dev
, "Invalid number of dst SG");
441 return creq
->dst_nents
;
444 mv_cesa_update_op_cfg(tmpl
, CESA_SA_DESC_CFG_OP_CRYPT_ONLY
,
445 CESA_SA_DESC_CFG_OP_MSK
);
447 if (cesa_dev
->caps
->has_tdma
)
448 ret
= mv_cesa_skcipher_dma_req_init(req
, tmpl
);
450 ret
= mv_cesa_skcipher_std_req_init(req
, tmpl
);
455 static int mv_cesa_skcipher_queue_req(struct skcipher_request
*req
,
456 struct mv_cesa_op_ctx
*tmpl
)
459 struct mv_cesa_skcipher_req
*creq
= skcipher_request_ctx(req
);
460 struct mv_cesa_engine
*engine
;
462 ret
= mv_cesa_skcipher_req_init(req
, tmpl
);
466 engine
= mv_cesa_select_engine(req
->cryptlen
);
467 mv_cesa_skcipher_prepare(&req
->base
, engine
);
469 ret
= mv_cesa_queue_req(&req
->base
, &creq
->base
);
471 if (mv_cesa_req_needs_cleanup(&req
->base
, ret
))
472 mv_cesa_skcipher_cleanup(req
);
477 static int mv_cesa_des_op(struct skcipher_request
*req
,
478 struct mv_cesa_op_ctx
*tmpl
)
480 struct mv_cesa_des_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
482 mv_cesa_update_op_cfg(tmpl
, CESA_SA_DESC_CFG_CRYPTM_DES
,
483 CESA_SA_DESC_CFG_CRYPTM_MSK
);
485 memcpy(tmpl
->ctx
.skcipher
.key
, ctx
->key
, DES_KEY_SIZE
);
487 return mv_cesa_skcipher_queue_req(req
, tmpl
);
490 static int mv_cesa_ecb_des_encrypt(struct skcipher_request
*req
)
492 struct mv_cesa_op_ctx tmpl
= { };
494 mv_cesa_set_op_cfg(&tmpl
,
495 CESA_SA_DESC_CFG_CRYPTCM_ECB
|
496 CESA_SA_DESC_CFG_DIR_ENC
);
498 return mv_cesa_des_op(req
, &tmpl
);
501 static int mv_cesa_ecb_des_decrypt(struct skcipher_request
*req
)
503 struct mv_cesa_op_ctx tmpl
= { };
505 mv_cesa_set_op_cfg(&tmpl
,
506 CESA_SA_DESC_CFG_CRYPTCM_ECB
|
507 CESA_SA_DESC_CFG_DIR_DEC
);
509 return mv_cesa_des_op(req
, &tmpl
);
512 struct skcipher_alg mv_cesa_ecb_des_alg
= {
513 .setkey
= mv_cesa_des_setkey
,
514 .encrypt
= mv_cesa_ecb_des_encrypt
,
515 .decrypt
= mv_cesa_ecb_des_decrypt
,
516 .min_keysize
= DES_KEY_SIZE
,
517 .max_keysize
= DES_KEY_SIZE
,
519 .cra_name
= "ecb(des)",
520 .cra_driver_name
= "mv-ecb-des",
522 .cra_flags
= CRYPTO_ALG_KERN_DRIVER_ONLY
| CRYPTO_ALG_ASYNC
|
523 CRYPTO_ALG_ALLOCATES_MEMORY
,
524 .cra_blocksize
= DES_BLOCK_SIZE
,
525 .cra_ctxsize
= sizeof(struct mv_cesa_des_ctx
),
527 .cra_module
= THIS_MODULE
,
528 .cra_init
= mv_cesa_skcipher_cra_init
,
529 .cra_exit
= mv_cesa_skcipher_cra_exit
,
533 static int mv_cesa_cbc_des_op(struct skcipher_request
*req
,
534 struct mv_cesa_op_ctx
*tmpl
)
536 mv_cesa_update_op_cfg(tmpl
, CESA_SA_DESC_CFG_CRYPTCM_CBC
,
537 CESA_SA_DESC_CFG_CRYPTCM_MSK
);
539 memcpy(tmpl
->ctx
.skcipher
.iv
, req
->iv
, DES_BLOCK_SIZE
);
541 return mv_cesa_des_op(req
, tmpl
);
544 static int mv_cesa_cbc_des_encrypt(struct skcipher_request
*req
)
546 struct mv_cesa_op_ctx tmpl
= { };
548 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_DIR_ENC
);
550 return mv_cesa_cbc_des_op(req
, &tmpl
);
553 static int mv_cesa_cbc_des_decrypt(struct skcipher_request
*req
)
555 struct mv_cesa_op_ctx tmpl
= { };
557 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_DIR_DEC
);
559 return mv_cesa_cbc_des_op(req
, &tmpl
);
562 struct skcipher_alg mv_cesa_cbc_des_alg
= {
563 .setkey
= mv_cesa_des_setkey
,
564 .encrypt
= mv_cesa_cbc_des_encrypt
,
565 .decrypt
= mv_cesa_cbc_des_decrypt
,
566 .min_keysize
= DES_KEY_SIZE
,
567 .max_keysize
= DES_KEY_SIZE
,
568 .ivsize
= DES_BLOCK_SIZE
,
570 .cra_name
= "cbc(des)",
571 .cra_driver_name
= "mv-cbc-des",
573 .cra_flags
= CRYPTO_ALG_KERN_DRIVER_ONLY
| CRYPTO_ALG_ASYNC
|
574 CRYPTO_ALG_ALLOCATES_MEMORY
,
575 .cra_blocksize
= DES_BLOCK_SIZE
,
576 .cra_ctxsize
= sizeof(struct mv_cesa_des_ctx
),
578 .cra_module
= THIS_MODULE
,
579 .cra_init
= mv_cesa_skcipher_cra_init
,
580 .cra_exit
= mv_cesa_skcipher_cra_exit
,
584 static int mv_cesa_des3_op(struct skcipher_request
*req
,
585 struct mv_cesa_op_ctx
*tmpl
)
587 struct mv_cesa_des3_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
589 mv_cesa_update_op_cfg(tmpl
, CESA_SA_DESC_CFG_CRYPTM_3DES
,
590 CESA_SA_DESC_CFG_CRYPTM_MSK
);
592 memcpy(tmpl
->ctx
.skcipher
.key
, ctx
->key
, DES3_EDE_KEY_SIZE
);
594 return mv_cesa_skcipher_queue_req(req
, tmpl
);
597 static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request
*req
)
599 struct mv_cesa_op_ctx tmpl
= { };
601 mv_cesa_set_op_cfg(&tmpl
,
602 CESA_SA_DESC_CFG_CRYPTCM_ECB
|
603 CESA_SA_DESC_CFG_3DES_EDE
|
604 CESA_SA_DESC_CFG_DIR_ENC
);
606 return mv_cesa_des3_op(req
, &tmpl
);
609 static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request
*req
)
611 struct mv_cesa_op_ctx tmpl
= { };
613 mv_cesa_set_op_cfg(&tmpl
,
614 CESA_SA_DESC_CFG_CRYPTCM_ECB
|
615 CESA_SA_DESC_CFG_3DES_EDE
|
616 CESA_SA_DESC_CFG_DIR_DEC
);
618 return mv_cesa_des3_op(req
, &tmpl
);
621 struct skcipher_alg mv_cesa_ecb_des3_ede_alg
= {
622 .setkey
= mv_cesa_des3_ede_setkey
,
623 .encrypt
= mv_cesa_ecb_des3_ede_encrypt
,
624 .decrypt
= mv_cesa_ecb_des3_ede_decrypt
,
625 .min_keysize
= DES3_EDE_KEY_SIZE
,
626 .max_keysize
= DES3_EDE_KEY_SIZE
,
628 .cra_name
= "ecb(des3_ede)",
629 .cra_driver_name
= "mv-ecb-des3-ede",
631 .cra_flags
= CRYPTO_ALG_KERN_DRIVER_ONLY
| CRYPTO_ALG_ASYNC
|
632 CRYPTO_ALG_ALLOCATES_MEMORY
,
633 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
634 .cra_ctxsize
= sizeof(struct mv_cesa_des3_ctx
),
636 .cra_module
= THIS_MODULE
,
637 .cra_init
= mv_cesa_skcipher_cra_init
,
638 .cra_exit
= mv_cesa_skcipher_cra_exit
,
642 static int mv_cesa_cbc_des3_op(struct skcipher_request
*req
,
643 struct mv_cesa_op_ctx
*tmpl
)
645 memcpy(tmpl
->ctx
.skcipher
.iv
, req
->iv
, DES3_EDE_BLOCK_SIZE
);
647 return mv_cesa_des3_op(req
, tmpl
);
650 static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request
*req
)
652 struct mv_cesa_op_ctx tmpl
= { };
654 mv_cesa_set_op_cfg(&tmpl
,
655 CESA_SA_DESC_CFG_CRYPTCM_CBC
|
656 CESA_SA_DESC_CFG_3DES_EDE
|
657 CESA_SA_DESC_CFG_DIR_ENC
);
659 return mv_cesa_cbc_des3_op(req
, &tmpl
);
662 static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request
*req
)
664 struct mv_cesa_op_ctx tmpl
= { };
666 mv_cesa_set_op_cfg(&tmpl
,
667 CESA_SA_DESC_CFG_CRYPTCM_CBC
|
668 CESA_SA_DESC_CFG_3DES_EDE
|
669 CESA_SA_DESC_CFG_DIR_DEC
);
671 return mv_cesa_cbc_des3_op(req
, &tmpl
);
674 struct skcipher_alg mv_cesa_cbc_des3_ede_alg
= {
675 .setkey
= mv_cesa_des3_ede_setkey
,
676 .encrypt
= mv_cesa_cbc_des3_ede_encrypt
,
677 .decrypt
= mv_cesa_cbc_des3_ede_decrypt
,
678 .min_keysize
= DES3_EDE_KEY_SIZE
,
679 .max_keysize
= DES3_EDE_KEY_SIZE
,
680 .ivsize
= DES3_EDE_BLOCK_SIZE
,
682 .cra_name
= "cbc(des3_ede)",
683 .cra_driver_name
= "mv-cbc-des3-ede",
685 .cra_flags
= CRYPTO_ALG_KERN_DRIVER_ONLY
| CRYPTO_ALG_ASYNC
|
686 CRYPTO_ALG_ALLOCATES_MEMORY
,
687 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
688 .cra_ctxsize
= sizeof(struct mv_cesa_des3_ctx
),
690 .cra_module
= THIS_MODULE
,
691 .cra_init
= mv_cesa_skcipher_cra_init
,
692 .cra_exit
= mv_cesa_skcipher_cra_exit
,
696 static int mv_cesa_aes_op(struct skcipher_request
*req
,
697 struct mv_cesa_op_ctx
*tmpl
)
699 struct mv_cesa_aes_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
704 cfg
= CESA_SA_DESC_CFG_CRYPTM_AES
;
706 if (mv_cesa_get_op_cfg(tmpl
) & CESA_SA_DESC_CFG_DIR_DEC
)
707 key
= ctx
->aes
.key_dec
;
709 key
= ctx
->aes
.key_enc
;
711 for (i
= 0; i
< ctx
->aes
.key_length
/ sizeof(u32
); i
++)
712 tmpl
->ctx
.skcipher
.key
[i
] = cpu_to_le32(key
[i
]);
714 if (ctx
->aes
.key_length
== 24)
715 cfg
|= CESA_SA_DESC_CFG_AES_LEN_192
;
716 else if (ctx
->aes
.key_length
== 32)
717 cfg
|= CESA_SA_DESC_CFG_AES_LEN_256
;
719 mv_cesa_update_op_cfg(tmpl
, cfg
,
720 CESA_SA_DESC_CFG_CRYPTM_MSK
|
721 CESA_SA_DESC_CFG_AES_LEN_MSK
);
723 return mv_cesa_skcipher_queue_req(req
, tmpl
);
726 static int mv_cesa_ecb_aes_encrypt(struct skcipher_request
*req
)
728 struct mv_cesa_op_ctx tmpl
= { };
730 mv_cesa_set_op_cfg(&tmpl
,
731 CESA_SA_DESC_CFG_CRYPTCM_ECB
|
732 CESA_SA_DESC_CFG_DIR_ENC
);
734 return mv_cesa_aes_op(req
, &tmpl
);
737 static int mv_cesa_ecb_aes_decrypt(struct skcipher_request
*req
)
739 struct mv_cesa_op_ctx tmpl
= { };
741 mv_cesa_set_op_cfg(&tmpl
,
742 CESA_SA_DESC_CFG_CRYPTCM_ECB
|
743 CESA_SA_DESC_CFG_DIR_DEC
);
745 return mv_cesa_aes_op(req
, &tmpl
);
748 struct skcipher_alg mv_cesa_ecb_aes_alg
= {
749 .setkey
= mv_cesa_aes_setkey
,
750 .encrypt
= mv_cesa_ecb_aes_encrypt
,
751 .decrypt
= mv_cesa_ecb_aes_decrypt
,
752 .min_keysize
= AES_MIN_KEY_SIZE
,
753 .max_keysize
= AES_MAX_KEY_SIZE
,
755 .cra_name
= "ecb(aes)",
756 .cra_driver_name
= "mv-ecb-aes",
758 .cra_flags
= CRYPTO_ALG_KERN_DRIVER_ONLY
| CRYPTO_ALG_ASYNC
|
759 CRYPTO_ALG_ALLOCATES_MEMORY
,
760 .cra_blocksize
= AES_BLOCK_SIZE
,
761 .cra_ctxsize
= sizeof(struct mv_cesa_aes_ctx
),
763 .cra_module
= THIS_MODULE
,
764 .cra_init
= mv_cesa_skcipher_cra_init
,
765 .cra_exit
= mv_cesa_skcipher_cra_exit
,
769 static int mv_cesa_cbc_aes_op(struct skcipher_request
*req
,
770 struct mv_cesa_op_ctx
*tmpl
)
772 mv_cesa_update_op_cfg(tmpl
, CESA_SA_DESC_CFG_CRYPTCM_CBC
,
773 CESA_SA_DESC_CFG_CRYPTCM_MSK
);
774 memcpy(tmpl
->ctx
.skcipher
.iv
, req
->iv
, AES_BLOCK_SIZE
);
776 return mv_cesa_aes_op(req
, tmpl
);
779 static int mv_cesa_cbc_aes_encrypt(struct skcipher_request
*req
)
781 struct mv_cesa_op_ctx tmpl
= { };
783 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_DIR_ENC
);
785 return mv_cesa_cbc_aes_op(req
, &tmpl
);
788 static int mv_cesa_cbc_aes_decrypt(struct skcipher_request
*req
)
790 struct mv_cesa_op_ctx tmpl
= { };
792 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_DIR_DEC
);
794 return mv_cesa_cbc_aes_op(req
, &tmpl
);
797 struct skcipher_alg mv_cesa_cbc_aes_alg
= {
798 .setkey
= mv_cesa_aes_setkey
,
799 .encrypt
= mv_cesa_cbc_aes_encrypt
,
800 .decrypt
= mv_cesa_cbc_aes_decrypt
,
801 .min_keysize
= AES_MIN_KEY_SIZE
,
802 .max_keysize
= AES_MAX_KEY_SIZE
,
803 .ivsize
= AES_BLOCK_SIZE
,
805 .cra_name
= "cbc(aes)",
806 .cra_driver_name
= "mv-cbc-aes",
808 .cra_flags
= CRYPTO_ALG_KERN_DRIVER_ONLY
| CRYPTO_ALG_ASYNC
|
809 CRYPTO_ALG_ALLOCATES_MEMORY
,
810 .cra_blocksize
= AES_BLOCK_SIZE
,
811 .cra_ctxsize
= sizeof(struct mv_cesa_aes_ctx
),
813 .cra_module
= THIS_MODULE
,
814 .cra_init
= mv_cesa_skcipher_cra_init
,
815 .cra_exit
= mv_cesa_skcipher_cra_exit
,