2 * Support for Marvell's crypto engine which can be found on some Orion5X
5 * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
9 #include <crypto/aes.h>
10 #include <crypto/algapi.h>
11 #include <linux/crypto.h>
12 #include <linux/interrupt.h>
14 #include <linux/kthread.h>
15 #include <linux/platform_device.h>
16 #include <linux/scatterlist.h>
17 #include <linux/slab.h>
22 * /---------------------------------------\
23 * | | request complete
25 * IDLE -> new request -> BUSY -> done -> DEQUEUE
27 * | | more scatter entries
37 * struct req_progress - used for every crypt request
38 * @src_sg_it: sg iterator for src
39 * @dst_sg_it: sg iterator for dst
40 * @sg_src_left: bytes left in src to process (scatter list)
41 * @src_start: offset to add to src start position (scatter list)
42 * @crypt_len: length of current crypt process
43 * @sg_dst_left: bytes left dst to process in this scatter list
44 * @dst_start: offset to add to dst start position (scatter list)
45 * @total_req_bytes: total number of bytes processed (request).
47 * sg helper are used to iterate over the scatterlist. Since the size of the
48 * SRAM may be less than the scatter size, this struct struct is used to keep
49 * track of progress within current scatterlist.
52 struct sg_mapping_iter src_sg_it
;
53 struct sg_mapping_iter dst_sg_it
;
69 struct task_struct
*queue_th
;
71 /* the lock protects queue and eng_st */
73 struct crypto_queue queue
;
74 enum engine_status eng_st
;
75 struct ablkcipher_request
*cur_req
;
76 struct req_progress p
;
81 static struct crypto_priv
*cpg
;
84 u8 aes_enc_key
[AES_KEY_LEN
];
87 u32 need_calc_aes_dkey
;
100 static void compute_aes_dec_key(struct mv_ctx
*ctx
)
102 struct crypto_aes_ctx gen_aes_key
;
105 if (!ctx
->need_calc_aes_dkey
)
108 crypto_aes_expand_key(&gen_aes_key
, ctx
->aes_enc_key
, ctx
->key_len
);
110 key_pos
= ctx
->key_len
+ 24;
111 memcpy(ctx
->aes_dec_key
, &gen_aes_key
.key_enc
[key_pos
], 4 * 4);
112 switch (ctx
->key_len
) {
113 case AES_KEYSIZE_256
:
116 case AES_KEYSIZE_192
:
118 memcpy(&ctx
->aes_dec_key
[4], &gen_aes_key
.key_enc
[key_pos
],
122 ctx
->need_calc_aes_dkey
= 0;
125 static int mv_setkey_aes(struct crypto_ablkcipher
*cipher
, const u8
*key
,
128 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(cipher
);
129 struct mv_ctx
*ctx
= crypto_tfm_ctx(tfm
);
132 case AES_KEYSIZE_128
:
133 case AES_KEYSIZE_192
:
134 case AES_KEYSIZE_256
:
137 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
141 ctx
->need_calc_aes_dkey
= 1;
143 memcpy(ctx
->aes_enc_key
, key
, AES_KEY_LEN
);
147 static void setup_data_in(struct ablkcipher_request
*req
)
152 if (!cpg
->p
.sg_src_left
) {
153 ret
= sg_miter_next(&cpg
->p
.src_sg_it
);
155 cpg
->p
.sg_src_left
= cpg
->p
.src_sg_it
.length
;
156 cpg
->p
.src_start
= 0;
159 cpg
->p
.crypt_len
= min(cpg
->p
.sg_src_left
, cpg
->max_req_size
);
161 buf
= cpg
->p
.src_sg_it
.addr
;
162 buf
+= cpg
->p
.src_start
;
164 memcpy(cpg
->sram
+ SRAM_DATA_IN_START
, buf
, cpg
->p
.crypt_len
);
166 cpg
->p
.sg_src_left
-= cpg
->p
.crypt_len
;
167 cpg
->p
.src_start
+= cpg
->p
.crypt_len
;
170 static void mv_process_current_q(int first_block
)
172 struct ablkcipher_request
*req
= cpg
->cur_req
;
173 struct mv_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
174 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
175 struct sec_accel_config op
;
177 switch (req_ctx
->op
) {
179 op
.config
= CFG_OP_CRYPT_ONLY
| CFG_ENCM_AES
| CFG_ENC_MODE_ECB
;
182 op
.config
= CFG_OP_CRYPT_ONLY
| CFG_ENCM_AES
| CFG_ENC_MODE_CBC
;
183 op
.enc_iv
= ENC_IV_POINT(SRAM_DATA_IV
) |
184 ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF
);
186 memcpy(cpg
->sram
+ SRAM_DATA_IV
, req
->info
, 16);
189 if (req_ctx
->decrypt
) {
190 op
.config
|= CFG_DIR_DEC
;
191 memcpy(cpg
->sram
+ SRAM_DATA_KEY_P
, ctx
->aes_dec_key
,
194 op
.config
|= CFG_DIR_ENC
;
195 memcpy(cpg
->sram
+ SRAM_DATA_KEY_P
, ctx
->aes_enc_key
,
199 switch (ctx
->key_len
) {
200 case AES_KEYSIZE_128
:
201 op
.config
|= CFG_AES_LEN_128
;
203 case AES_KEYSIZE_192
:
204 op
.config
|= CFG_AES_LEN_192
;
206 case AES_KEYSIZE_256
:
207 op
.config
|= CFG_AES_LEN_256
;
210 op
.enc_p
= ENC_P_SRC(SRAM_DATA_IN_START
) |
211 ENC_P_DST(SRAM_DATA_OUT_START
);
212 op
.enc_key_p
= SRAM_DATA_KEY_P
;
215 op
.enc_len
= cpg
->p
.crypt_len
;
216 memcpy(cpg
->sram
+ SRAM_CONFIG
, &op
,
217 sizeof(struct sec_accel_config
));
219 writel(SRAM_CONFIG
, cpg
->reg
+ SEC_ACCEL_DESC_P0
);
221 writel(SEC_CMD_EN_SEC_ACCL0
, cpg
->reg
+ SEC_ACCEL_CMD
);
224 * XXX: add timer if the interrupt does not occur for some mystery
229 static void mv_crypto_algo_completion(void)
231 struct ablkcipher_request
*req
= cpg
->cur_req
;
232 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
234 if (req_ctx
->op
!= COP_AES_CBC
)
237 memcpy(req
->info
, cpg
->sram
+ SRAM_DATA_IV_BUF
, 16);
240 static void dequeue_complete_req(void)
242 struct ablkcipher_request
*req
= cpg
->cur_req
;
246 cpg
->p
.total_req_bytes
+= cpg
->p
.crypt_len
;
250 if (!cpg
->p
.sg_dst_left
) {
251 ret
= sg_miter_next(&cpg
->p
.dst_sg_it
);
253 cpg
->p
.sg_dst_left
= cpg
->p
.dst_sg_it
.length
;
254 cpg
->p
.dst_start
= 0;
257 buf
= cpg
->p
.dst_sg_it
.addr
;
258 buf
+= cpg
->p
.dst_start
;
260 dst_copy
= min(cpg
->p
.crypt_len
, cpg
->p
.sg_dst_left
);
262 memcpy(buf
, cpg
->sram
+ SRAM_DATA_OUT_START
, dst_copy
);
264 cpg
->p
.sg_dst_left
-= dst_copy
;
265 cpg
->p
.crypt_len
-= dst_copy
;
266 cpg
->p
.dst_start
+= dst_copy
;
267 } while (cpg
->p
.crypt_len
> 0);
269 BUG_ON(cpg
->eng_st
!= ENGINE_W_DEQUEUE
);
270 if (cpg
->p
.total_req_bytes
< req
->nbytes
) {
271 /* process next scatter list entry */
272 cpg
->eng_st
= ENGINE_BUSY
;
273 mv_process_current_q(0);
275 sg_miter_stop(&cpg
->p
.src_sg_it
);
276 sg_miter_stop(&cpg
->p
.dst_sg_it
);
277 mv_crypto_algo_completion();
278 cpg
->eng_st
= ENGINE_IDLE
;
279 req
->base
.complete(&req
->base
, 0);
283 static int count_sgs(struct scatterlist
*sl
, unsigned int total_bytes
)
288 total_bytes
-= sl
[i
].length
;
291 } while (total_bytes
> 0);
296 static void mv_enqueue_new_req(struct ablkcipher_request
*req
)
301 memset(&cpg
->p
, 0, sizeof(struct req_progress
));
303 num_sgs
= count_sgs(req
->src
, req
->nbytes
);
304 sg_miter_start(&cpg
->p
.src_sg_it
, req
->src
, num_sgs
, SG_MITER_FROM_SG
);
306 num_sgs
= count_sgs(req
->dst
, req
->nbytes
);
307 sg_miter_start(&cpg
->p
.dst_sg_it
, req
->dst
, num_sgs
, SG_MITER_TO_SG
);
308 mv_process_current_q(1);
311 static int queue_manag(void *data
)
313 cpg
->eng_st
= ENGINE_IDLE
;
315 struct ablkcipher_request
*req
;
316 struct crypto_async_request
*async_req
= NULL
;
317 struct crypto_async_request
*backlog
;
319 __set_current_state(TASK_INTERRUPTIBLE
);
321 if (cpg
->eng_st
== ENGINE_W_DEQUEUE
)
322 dequeue_complete_req();
324 spin_lock_irq(&cpg
->lock
);
325 if (cpg
->eng_st
== ENGINE_IDLE
) {
326 backlog
= crypto_get_backlog(&cpg
->queue
);
327 async_req
= crypto_dequeue_request(&cpg
->queue
);
329 BUG_ON(cpg
->eng_st
!= ENGINE_IDLE
);
330 cpg
->eng_st
= ENGINE_BUSY
;
333 spin_unlock_irq(&cpg
->lock
);
336 backlog
->complete(backlog
, -EINPROGRESS
);
341 req
= container_of(async_req
,
342 struct ablkcipher_request
, base
);
343 mv_enqueue_new_req(req
);
349 } while (!kthread_should_stop());
353 static int mv_handle_req(struct ablkcipher_request
*req
)
358 spin_lock_irqsave(&cpg
->lock
, flags
);
359 ret
= ablkcipher_enqueue_request(&cpg
->queue
, req
);
360 spin_unlock_irqrestore(&cpg
->lock
, flags
);
361 wake_up_process(cpg
->queue_th
);
365 static int mv_enc_aes_ecb(struct ablkcipher_request
*req
)
367 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
369 req_ctx
->op
= COP_AES_ECB
;
370 req_ctx
->decrypt
= 0;
372 return mv_handle_req(req
);
375 static int mv_dec_aes_ecb(struct ablkcipher_request
*req
)
377 struct mv_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
378 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
380 req_ctx
->op
= COP_AES_ECB
;
381 req_ctx
->decrypt
= 1;
383 compute_aes_dec_key(ctx
);
384 return mv_handle_req(req
);
387 static int mv_enc_aes_cbc(struct ablkcipher_request
*req
)
389 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
391 req_ctx
->op
= COP_AES_CBC
;
392 req_ctx
->decrypt
= 0;
394 return mv_handle_req(req
);
397 static int mv_dec_aes_cbc(struct ablkcipher_request
*req
)
399 struct mv_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
400 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
402 req_ctx
->op
= COP_AES_CBC
;
403 req_ctx
->decrypt
= 1;
405 compute_aes_dec_key(ctx
);
406 return mv_handle_req(req
);
409 static int mv_cra_init(struct crypto_tfm
*tfm
)
411 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct mv_req_ctx
);
415 irqreturn_t
crypto_int(int irq
, void *priv
)
419 val
= readl(cpg
->reg
+ SEC_ACCEL_INT_STATUS
);
420 if (!(val
& SEC_INT_ACCEL0_DONE
))
423 val
&= ~SEC_INT_ACCEL0_DONE
;
424 writel(val
, cpg
->reg
+ FPGA_INT_STATUS
);
425 writel(val
, cpg
->reg
+ SEC_ACCEL_INT_STATUS
);
426 BUG_ON(cpg
->eng_st
!= ENGINE_BUSY
);
427 cpg
->eng_st
= ENGINE_W_DEQUEUE
;
428 wake_up_process(cpg
->queue_th
);
432 struct crypto_alg mv_aes_alg_ecb
= {
433 .cra_name
= "ecb(aes)",
434 .cra_driver_name
= "mv-ecb-aes",
436 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
438 .cra_ctxsize
= sizeof(struct mv_ctx
),
440 .cra_type
= &crypto_ablkcipher_type
,
441 .cra_module
= THIS_MODULE
,
442 .cra_init
= mv_cra_init
,
445 .min_keysize
= AES_MIN_KEY_SIZE
,
446 .max_keysize
= AES_MAX_KEY_SIZE
,
447 .setkey
= mv_setkey_aes
,
448 .encrypt
= mv_enc_aes_ecb
,
449 .decrypt
= mv_dec_aes_ecb
,
454 struct crypto_alg mv_aes_alg_cbc
= {
455 .cra_name
= "cbc(aes)",
456 .cra_driver_name
= "mv-cbc-aes",
458 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
459 .cra_blocksize
= AES_BLOCK_SIZE
,
460 .cra_ctxsize
= sizeof(struct mv_ctx
),
462 .cra_type
= &crypto_ablkcipher_type
,
463 .cra_module
= THIS_MODULE
,
464 .cra_init
= mv_cra_init
,
467 .ivsize
= AES_BLOCK_SIZE
,
468 .min_keysize
= AES_MIN_KEY_SIZE
,
469 .max_keysize
= AES_MAX_KEY_SIZE
,
470 .setkey
= mv_setkey_aes
,
471 .encrypt
= mv_enc_aes_cbc
,
472 .decrypt
= mv_dec_aes_cbc
,
477 static int mv_probe(struct platform_device
*pdev
)
479 struct crypto_priv
*cp
;
480 struct resource
*res
;
485 printk(KERN_ERR
"Second crypto dev?\n");
489 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "regs");
493 cp
= kzalloc(sizeof(*cp
), GFP_KERNEL
);
497 spin_lock_init(&cp
->lock
);
498 crypto_init_queue(&cp
->queue
, 50);
499 cp
->reg
= ioremap(res
->start
, res
->end
- res
->start
+ 1);
505 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "sram");
510 cp
->sram_size
= res
->end
- res
->start
+ 1;
511 cp
->max_req_size
= cp
->sram_size
- SRAM_CFG_SPACE
;
512 cp
->sram
= ioremap(res
->start
, cp
->sram_size
);
518 irq
= platform_get_irq(pdev
, 0);
519 if (irq
< 0 || irq
== NO_IRQ
) {
525 platform_set_drvdata(pdev
, cp
);
528 cp
->queue_th
= kthread_run(queue_manag
, cp
, "mv_crypto");
529 if (IS_ERR(cp
->queue_th
)) {
530 ret
= PTR_ERR(cp
->queue_th
);
534 ret
= request_irq(irq
, crypto_int
, IRQF_DISABLED
, dev_name(&pdev
->dev
),
539 writel(SEC_INT_ACCEL0_DONE
, cpg
->reg
+ SEC_ACCEL_INT_MASK
);
540 writel(SEC_CFG_STOP_DIG_ERR
, cpg
->reg
+ SEC_ACCEL_CFG
);
542 ret
= crypto_register_alg(&mv_aes_alg_ecb
);
546 ret
= crypto_register_alg(&mv_aes_alg_cbc
);
551 crypto_unregister_alg(&mv_aes_alg_ecb
);
555 kthread_stop(cp
->queue_th
);
563 platform_set_drvdata(pdev
, NULL
);
567 static int mv_remove(struct platform_device
*pdev
)
569 struct crypto_priv
*cp
= platform_get_drvdata(pdev
);
571 crypto_unregister_alg(&mv_aes_alg_ecb
);
572 crypto_unregister_alg(&mv_aes_alg_cbc
);
573 kthread_stop(cp
->queue_th
);
574 free_irq(cp
->irq
, cp
);
575 memset(cp
->sram
, 0, cp
->sram_size
);
583 static struct platform_driver marvell_crypto
= {
587 .owner
= THIS_MODULE
,
591 MODULE_ALIAS("platform:mv_crypto");
593 static int __init
mv_crypto_init(void)
595 return platform_driver_register(&marvell_crypto
);
597 module_init(mv_crypto_init
);
599 static void __exit
mv_crypto_exit(void)
601 platform_driver_unregister(&marvell_crypto
);
603 module_exit(mv_crypto_exit
);
605 MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
606 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
607 MODULE_LICENSE("GPL");