2 * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA)
3 * that can be found on the following platform: Orion, Kirkwood, Armada. This
4 * driver supports the TDMA engine on platforms on which it is available.
6 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
7 * Author: Arnaud Ebalard <arno@natisbad.org>
9 * This work is based on an initial version written by
10 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License version 2 as published
14 * by the Free Software Foundation.
17 #include <linux/delay.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/genalloc.h>
20 #include <linux/interrupt.h>
22 #include <linux/kthread.h>
23 #include <linux/mbus.h>
24 #include <linux/platform_device.h>
25 #include <linux/scatterlist.h>
26 #include <linux/slab.h>
27 #include <linux/module.h>
28 #include <linux/clk.h>
30 #include <linux/of_platform.h>
31 #include <linux/of_irq.h>
35 /* Limit of the crypto queue before reaching the backlog */
36 #define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
38 struct mv_cesa_dev
*cesa_dev
;
40 struct crypto_async_request
*
41 mv_cesa_dequeue_req_locked(struct mv_cesa_engine
*engine
,
42 struct crypto_async_request
**backlog
)
44 struct crypto_async_request
*req
;
46 *backlog
= crypto_get_backlog(&engine
->queue
);
47 req
= crypto_dequeue_request(&engine
->queue
);
55 static void mv_cesa_rearm_engine(struct mv_cesa_engine
*engine
)
57 struct crypto_async_request
*req
= NULL
, *backlog
= NULL
;
58 struct mv_cesa_ctx
*ctx
;
61 spin_lock_bh(&engine
->lock
);
63 req
= mv_cesa_dequeue_req_locked(engine
, &backlog
);
66 spin_unlock_bh(&engine
->lock
);
72 backlog
->complete(backlog
, -EINPROGRESS
);
74 ctx
= crypto_tfm_ctx(req
->tfm
);
78 static int mv_cesa_std_process(struct mv_cesa_engine
*engine
, u32 status
)
80 struct crypto_async_request
*req
;
81 struct mv_cesa_ctx
*ctx
;
85 ctx
= crypto_tfm_ctx(req
->tfm
);
86 res
= ctx
->ops
->process(req
, status
);
89 ctx
->ops
->complete(req
);
90 mv_cesa_engine_enqueue_complete_request(engine
, req
);
91 } else if (res
== -EINPROGRESS
) {
98 static int mv_cesa_int_process(struct mv_cesa_engine
*engine
, u32 status
)
100 if (engine
->chain
.first
&& engine
->chain
.last
)
101 return mv_cesa_tdma_process(engine
, status
);
103 return mv_cesa_std_process(engine
, status
);
107 mv_cesa_complete_req(struct mv_cesa_ctx
*ctx
, struct crypto_async_request
*req
,
110 ctx
->ops
->cleanup(req
);
112 req
->complete(req
, res
);
116 static irqreturn_t
mv_cesa_int(int irq
, void *priv
)
118 struct mv_cesa_engine
*engine
= priv
;
119 struct crypto_async_request
*req
;
120 struct mv_cesa_ctx
*ctx
;
122 irqreturn_t ret
= IRQ_NONE
;
127 mask
= mv_cesa_get_int_mask(engine
);
128 status
= readl(engine
->regs
+ CESA_SA_INT_STATUS
);
130 if (!(status
& mask
))
134 * TODO: avoid clearing the FPGA_INT_STATUS if this not
135 * relevant on some platforms.
137 writel(~status
, engine
->regs
+ CESA_SA_FPGA_INT_STATUS
);
138 writel(~status
, engine
->regs
+ CESA_SA_INT_STATUS
);
140 /* Process fetched requests */
141 res
= mv_cesa_int_process(engine
, status
& mask
);
144 spin_lock_bh(&engine
->lock
);
146 if (res
!= -EINPROGRESS
)
148 spin_unlock_bh(&engine
->lock
);
150 ctx
= crypto_tfm_ctx(req
->tfm
);
152 if (res
&& res
!= -EINPROGRESS
)
153 mv_cesa_complete_req(ctx
, req
, res
);
155 /* Launch the next pending request */
156 mv_cesa_rearm_engine(engine
);
158 /* Iterate over the complete queue */
160 req
= mv_cesa_engine_dequeue_complete_request(engine
);
164 ctx
= crypto_tfm_ctx(req
->tfm
);
165 mv_cesa_complete_req(ctx
, req
, 0);
172 int mv_cesa_queue_req(struct crypto_async_request
*req
,
173 struct mv_cesa_req
*creq
)
176 struct mv_cesa_engine
*engine
= creq
->engine
;
178 spin_lock_bh(&engine
->lock
);
179 ret
= crypto_enqueue_request(&engine
->queue
, req
);
180 if ((mv_cesa_req_get_type(creq
) == CESA_DMA_REQ
) &&
181 (ret
== -EINPROGRESS
|| ret
== -EBUSY
))
182 mv_cesa_tdma_chain(engine
, creq
);
183 spin_unlock_bh(&engine
->lock
);
185 if (ret
!= -EINPROGRESS
)
188 mv_cesa_rearm_engine(engine
);
193 static int mv_cesa_add_algs(struct mv_cesa_dev
*cesa
)
198 for (i
= 0; i
< cesa
->caps
->ncipher_algs
; i
++) {
199 ret
= crypto_register_skcipher(cesa
->caps
->cipher_algs
[i
]);
201 goto err_unregister_crypto
;
204 for (i
= 0; i
< cesa
->caps
->nahash_algs
; i
++) {
205 ret
= crypto_register_ahash(cesa
->caps
->ahash_algs
[i
]);
207 goto err_unregister_ahash
;
212 err_unregister_ahash
:
213 for (j
= 0; j
< i
; j
++)
214 crypto_unregister_ahash(cesa
->caps
->ahash_algs
[j
]);
215 i
= cesa
->caps
->ncipher_algs
;
217 err_unregister_crypto
:
218 for (j
= 0; j
< i
; j
++)
219 crypto_unregister_skcipher(cesa
->caps
->cipher_algs
[j
]);
224 static void mv_cesa_remove_algs(struct mv_cesa_dev
*cesa
)
228 for (i
= 0; i
< cesa
->caps
->nahash_algs
; i
++)
229 crypto_unregister_ahash(cesa
->caps
->ahash_algs
[i
]);
231 for (i
= 0; i
< cesa
->caps
->ncipher_algs
; i
++)
232 crypto_unregister_skcipher(cesa
->caps
->cipher_algs
[i
]);
235 static struct skcipher_alg
*orion_cipher_algs
[] = {
236 &mv_cesa_ecb_des_alg
,
237 &mv_cesa_cbc_des_alg
,
238 &mv_cesa_ecb_des3_ede_alg
,
239 &mv_cesa_cbc_des3_ede_alg
,
240 &mv_cesa_ecb_aes_alg
,
241 &mv_cesa_cbc_aes_alg
,
244 static struct ahash_alg
*orion_ahash_algs
[] = {
251 static struct skcipher_alg
*armada_370_cipher_algs
[] = {
252 &mv_cesa_ecb_des_alg
,
253 &mv_cesa_cbc_des_alg
,
254 &mv_cesa_ecb_des3_ede_alg
,
255 &mv_cesa_cbc_des3_ede_alg
,
256 &mv_cesa_ecb_aes_alg
,
257 &mv_cesa_cbc_aes_alg
,
260 static struct ahash_alg
*armada_370_ahash_algs
[] = {
266 &mv_ahmac_sha256_alg
,
269 static const struct mv_cesa_caps orion_caps
= {
271 .cipher_algs
= orion_cipher_algs
,
272 .ncipher_algs
= ARRAY_SIZE(orion_cipher_algs
),
273 .ahash_algs
= orion_ahash_algs
,
274 .nahash_algs
= ARRAY_SIZE(orion_ahash_algs
),
278 static const struct mv_cesa_caps kirkwood_caps
= {
280 .cipher_algs
= orion_cipher_algs
,
281 .ncipher_algs
= ARRAY_SIZE(orion_cipher_algs
),
282 .ahash_algs
= orion_ahash_algs
,
283 .nahash_algs
= ARRAY_SIZE(orion_ahash_algs
),
287 static const struct mv_cesa_caps armada_370_caps
= {
289 .cipher_algs
= armada_370_cipher_algs
,
290 .ncipher_algs
= ARRAY_SIZE(armada_370_cipher_algs
),
291 .ahash_algs
= armada_370_ahash_algs
,
292 .nahash_algs
= ARRAY_SIZE(armada_370_ahash_algs
),
296 static const struct mv_cesa_caps armada_xp_caps
= {
298 .cipher_algs
= armada_370_cipher_algs
,
299 .ncipher_algs
= ARRAY_SIZE(armada_370_cipher_algs
),
300 .ahash_algs
= armada_370_ahash_algs
,
301 .nahash_algs
= ARRAY_SIZE(armada_370_ahash_algs
),
305 static const struct of_device_id mv_cesa_of_match_table
[] = {
306 { .compatible
= "marvell,orion-crypto", .data
= &orion_caps
},
307 { .compatible
= "marvell,kirkwood-crypto", .data
= &kirkwood_caps
},
308 { .compatible
= "marvell,dove-crypto", .data
= &kirkwood_caps
},
309 { .compatible
= "marvell,armada-370-crypto", .data
= &armada_370_caps
},
310 { .compatible
= "marvell,armada-xp-crypto", .data
= &armada_xp_caps
},
311 { .compatible
= "marvell,armada-375-crypto", .data
= &armada_xp_caps
},
312 { .compatible
= "marvell,armada-38x-crypto", .data
= &armada_xp_caps
},
315 MODULE_DEVICE_TABLE(of
, mv_cesa_of_match_table
);
318 mv_cesa_conf_mbus_windows(struct mv_cesa_engine
*engine
,
319 const struct mbus_dram_target_info
*dram
)
321 void __iomem
*iobase
= engine
->regs
;
324 for (i
= 0; i
< 4; i
++) {
325 writel(0, iobase
+ CESA_TDMA_WINDOW_CTRL(i
));
326 writel(0, iobase
+ CESA_TDMA_WINDOW_BASE(i
));
329 for (i
= 0; i
< dram
->num_cs
; i
++) {
330 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
332 writel(((cs
->size
- 1) & 0xffff0000) |
333 (cs
->mbus_attr
<< 8) |
334 (dram
->mbus_dram_target_id
<< 4) | 1,
335 iobase
+ CESA_TDMA_WINDOW_CTRL(i
));
336 writel(cs
->base
, iobase
+ CESA_TDMA_WINDOW_BASE(i
));
340 static int mv_cesa_dev_dma_init(struct mv_cesa_dev
*cesa
)
342 struct device
*dev
= cesa
->dev
;
343 struct mv_cesa_dev_dma
*dma
;
345 if (!cesa
->caps
->has_tdma
)
348 dma
= devm_kzalloc(dev
, sizeof(*dma
), GFP_KERNEL
);
352 dma
->tdma_desc_pool
= dmam_pool_create("tdma_desc", dev
,
353 sizeof(struct mv_cesa_tdma_desc
),
355 if (!dma
->tdma_desc_pool
)
358 dma
->op_pool
= dmam_pool_create("cesa_op", dev
,
359 sizeof(struct mv_cesa_op_ctx
), 16, 0);
363 dma
->cache_pool
= dmam_pool_create("cesa_cache", dev
,
364 CESA_MAX_HASH_BLOCK_SIZE
, 1, 0);
365 if (!dma
->cache_pool
)
368 dma
->padding_pool
= dmam_pool_create("cesa_padding", dev
, 72, 1, 0);
369 if (!dma
->padding_pool
)
377 static int mv_cesa_get_sram(struct platform_device
*pdev
, int idx
)
379 struct mv_cesa_dev
*cesa
= platform_get_drvdata(pdev
);
380 struct mv_cesa_engine
*engine
= &cesa
->engines
[idx
];
381 const char *res_name
= "sram";
382 struct resource
*res
;
384 engine
->pool
= of_gen_pool_get(cesa
->dev
->of_node
,
385 "marvell,crypto-srams", idx
);
387 engine
->sram
= gen_pool_dma_alloc(engine
->pool
,
397 if (cesa
->caps
->nengines
> 1) {
404 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
406 if (!res
|| resource_size(res
) < cesa
->sram_size
)
409 engine
->sram
= devm_ioremap_resource(cesa
->dev
, res
);
410 if (IS_ERR(engine
->sram
))
411 return PTR_ERR(engine
->sram
);
413 engine
->sram_dma
= dma_map_resource(cesa
->dev
, res
->start
,
415 DMA_BIDIRECTIONAL
, 0);
416 if (dma_mapping_error(cesa
->dev
, engine
->sram_dma
))
422 static void mv_cesa_put_sram(struct platform_device
*pdev
, int idx
)
424 struct mv_cesa_dev
*cesa
= platform_get_drvdata(pdev
);
425 struct mv_cesa_engine
*engine
= &cesa
->engines
[idx
];
428 gen_pool_free(engine
->pool
, (unsigned long)engine
->sram
,
431 dma_unmap_resource(cesa
->dev
, engine
->sram_dma
,
432 cesa
->sram_size
, DMA_BIDIRECTIONAL
, 0);
435 static int mv_cesa_probe(struct platform_device
*pdev
)
437 const struct mv_cesa_caps
*caps
= &orion_caps
;
438 const struct mbus_dram_target_info
*dram
;
439 const struct of_device_id
*match
;
440 struct device
*dev
= &pdev
->dev
;
441 struct mv_cesa_dev
*cesa
;
442 struct mv_cesa_engine
*engines
;
443 struct resource
*res
;
448 dev_err(&pdev
->dev
, "Only one CESA device authorized\n");
453 match
= of_match_node(mv_cesa_of_match_table
, dev
->of_node
);
454 if (!match
|| !match
->data
)
460 cesa
= devm_kzalloc(dev
, sizeof(*cesa
), GFP_KERNEL
);
467 sram_size
= CESA_SA_DEFAULT_SRAM_SIZE
;
468 of_property_read_u32(cesa
->dev
->of_node
, "marvell,crypto-sram-size",
470 if (sram_size
< CESA_SA_MIN_SRAM_SIZE
)
471 sram_size
= CESA_SA_MIN_SRAM_SIZE
;
473 cesa
->sram_size
= sram_size
;
474 cesa
->engines
= devm_kcalloc(dev
, caps
->nengines
, sizeof(*engines
),
479 spin_lock_init(&cesa
->lock
);
481 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "regs");
482 cesa
->regs
= devm_ioremap_resource(dev
, res
);
483 if (IS_ERR(cesa
->regs
))
484 return PTR_ERR(cesa
->regs
);
486 ret
= mv_cesa_dev_dma_init(cesa
);
490 dram
= mv_mbus_dram_info_nooverlap();
492 platform_set_drvdata(pdev
, cesa
);
494 for (i
= 0; i
< caps
->nengines
; i
++) {
495 struct mv_cesa_engine
*engine
= &cesa
->engines
[i
];
499 spin_lock_init(&engine
->lock
);
501 ret
= mv_cesa_get_sram(pdev
, i
);
505 irq
= platform_get_irq(pdev
, i
);
512 * Not all platforms can gate the CESA clocks: do not complain
513 * if the clock does not exist.
515 snprintf(res_name
, sizeof(res_name
), "cesa%d", i
);
516 engine
->clk
= devm_clk_get(dev
, res_name
);
517 if (IS_ERR(engine
->clk
)) {
518 engine
->clk
= devm_clk_get(dev
, NULL
);
519 if (IS_ERR(engine
->clk
))
523 snprintf(res_name
, sizeof(res_name
), "cesaz%d", i
);
524 engine
->zclk
= devm_clk_get(dev
, res_name
);
525 if (IS_ERR(engine
->zclk
))
528 ret
= clk_prepare_enable(engine
->clk
);
532 ret
= clk_prepare_enable(engine
->zclk
);
536 engine
->regs
= cesa
->regs
+ CESA_ENGINE_OFF(i
);
538 if (dram
&& cesa
->caps
->has_tdma
)
539 mv_cesa_conf_mbus_windows(engine
, dram
);
541 writel(0, engine
->regs
+ CESA_SA_INT_STATUS
);
542 writel(CESA_SA_CFG_STOP_DIG_ERR
,
543 engine
->regs
+ CESA_SA_CFG
);
544 writel(engine
->sram_dma
& CESA_SA_SRAM_MSK
,
545 engine
->regs
+ CESA_SA_DESC_P0
);
547 ret
= devm_request_threaded_irq(dev
, irq
, NULL
, mv_cesa_int
,
549 dev_name(&pdev
->dev
),
554 crypto_init_queue(&engine
->queue
, CESA_CRYPTO_DEFAULT_MAX_QLEN
);
555 atomic_set(&engine
->load
, 0);
556 INIT_LIST_HEAD(&engine
->complete_queue
);
561 ret
= mv_cesa_add_algs(cesa
);
567 dev_info(dev
, "CESA device successfully registered\n");
572 for (i
= 0; i
< caps
->nengines
; i
++) {
573 clk_disable_unprepare(cesa
->engines
[i
].zclk
);
574 clk_disable_unprepare(cesa
->engines
[i
].clk
);
575 mv_cesa_put_sram(pdev
, i
);
581 static int mv_cesa_remove(struct platform_device
*pdev
)
583 struct mv_cesa_dev
*cesa
= platform_get_drvdata(pdev
);
586 mv_cesa_remove_algs(cesa
);
588 for (i
= 0; i
< cesa
->caps
->nengines
; i
++) {
589 clk_disable_unprepare(cesa
->engines
[i
].zclk
);
590 clk_disable_unprepare(cesa
->engines
[i
].clk
);
591 mv_cesa_put_sram(pdev
, i
);
597 static const struct platform_device_id mv_cesa_plat_id_table
[] = {
598 { .name
= "mv_crypto" },
601 MODULE_DEVICE_TABLE(platform
, mv_cesa_plat_id_table
);
603 static struct platform_driver marvell_cesa
= {
604 .probe
= mv_cesa_probe
,
605 .remove
= mv_cesa_remove
,
606 .id_table
= mv_cesa_plat_id_table
,
608 .name
= "marvell-cesa",
609 .of_match_table
= mv_cesa_of_match_table
,
612 module_platform_driver(marvell_cesa
);
614 MODULE_ALIAS("platform:mv_crypto");
615 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
616 MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>");
617 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
618 MODULE_LICENSE("GPL v2");