1 // SPDX-License-Identifier: GPL-2.0-only
3 * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA)
4 * that can be found on the following platform: Orion, Kirkwood, Armada. This
5 * driver supports the TDMA engine on platforms on which it is available.
7 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
8 * Author: Arnaud Ebalard <arno@natisbad.org>
10 * This work is based on an initial version written by
11 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
14 #include <linux/delay.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/genalloc.h>
17 #include <linux/interrupt.h>
19 #include <linux/kthread.h>
20 #include <linux/mbus.h>
21 #include <linux/platform_device.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/clk.h>
27 #include <linux/of_platform.h>
28 #include <linux/of_irq.h>
32 /* Limit of the crypto queue before reaching the backlog */
33 #define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
35 struct mv_cesa_dev
*cesa_dev
;
37 struct crypto_async_request
*
38 mv_cesa_dequeue_req_locked(struct mv_cesa_engine
*engine
,
39 struct crypto_async_request
**backlog
)
41 struct crypto_async_request
*req
;
43 *backlog
= crypto_get_backlog(&engine
->queue
);
44 req
= crypto_dequeue_request(&engine
->queue
);
52 static void mv_cesa_rearm_engine(struct mv_cesa_engine
*engine
)
54 struct crypto_async_request
*req
= NULL
, *backlog
= NULL
;
55 struct mv_cesa_ctx
*ctx
;
58 spin_lock_bh(&engine
->lock
);
60 req
= mv_cesa_dequeue_req_locked(engine
, &backlog
);
63 spin_unlock_bh(&engine
->lock
);
69 backlog
->complete(backlog
, -EINPROGRESS
);
71 ctx
= crypto_tfm_ctx(req
->tfm
);
75 static int mv_cesa_std_process(struct mv_cesa_engine
*engine
, u32 status
)
77 struct crypto_async_request
*req
;
78 struct mv_cesa_ctx
*ctx
;
82 ctx
= crypto_tfm_ctx(req
->tfm
);
83 res
= ctx
->ops
->process(req
, status
);
86 ctx
->ops
->complete(req
);
87 mv_cesa_engine_enqueue_complete_request(engine
, req
);
88 } else if (res
== -EINPROGRESS
) {
95 static int mv_cesa_int_process(struct mv_cesa_engine
*engine
, u32 status
)
97 if (engine
->chain
.first
&& engine
->chain
.last
)
98 return mv_cesa_tdma_process(engine
, status
);
100 return mv_cesa_std_process(engine
, status
);
104 mv_cesa_complete_req(struct mv_cesa_ctx
*ctx
, struct crypto_async_request
*req
,
107 ctx
->ops
->cleanup(req
);
109 req
->complete(req
, res
);
113 static irqreturn_t
mv_cesa_int(int irq
, void *priv
)
115 struct mv_cesa_engine
*engine
= priv
;
116 struct crypto_async_request
*req
;
117 struct mv_cesa_ctx
*ctx
;
119 irqreturn_t ret
= IRQ_NONE
;
124 mask
= mv_cesa_get_int_mask(engine
);
125 status
= readl(engine
->regs
+ CESA_SA_INT_STATUS
);
127 if (!(status
& mask
))
131 * TODO: avoid clearing the FPGA_INT_STATUS if this not
132 * relevant on some platforms.
134 writel(~status
, engine
->regs
+ CESA_SA_FPGA_INT_STATUS
);
135 writel(~status
, engine
->regs
+ CESA_SA_INT_STATUS
);
137 /* Process fetched requests */
138 res
= mv_cesa_int_process(engine
, status
& mask
);
141 spin_lock_bh(&engine
->lock
);
143 if (res
!= -EINPROGRESS
)
145 spin_unlock_bh(&engine
->lock
);
147 ctx
= crypto_tfm_ctx(req
->tfm
);
149 if (res
&& res
!= -EINPROGRESS
)
150 mv_cesa_complete_req(ctx
, req
, res
);
152 /* Launch the next pending request */
153 mv_cesa_rearm_engine(engine
);
155 /* Iterate over the complete queue */
157 req
= mv_cesa_engine_dequeue_complete_request(engine
);
161 ctx
= crypto_tfm_ctx(req
->tfm
);
162 mv_cesa_complete_req(ctx
, req
, 0);
169 int mv_cesa_queue_req(struct crypto_async_request
*req
,
170 struct mv_cesa_req
*creq
)
173 struct mv_cesa_engine
*engine
= creq
->engine
;
175 spin_lock_bh(&engine
->lock
);
176 ret
= crypto_enqueue_request(&engine
->queue
, req
);
177 if ((mv_cesa_req_get_type(creq
) == CESA_DMA_REQ
) &&
178 (ret
== -EINPROGRESS
|| ret
== -EBUSY
))
179 mv_cesa_tdma_chain(engine
, creq
);
180 spin_unlock_bh(&engine
->lock
);
182 if (ret
!= -EINPROGRESS
)
185 mv_cesa_rearm_engine(engine
);
190 static int mv_cesa_add_algs(struct mv_cesa_dev
*cesa
)
195 for (i
= 0; i
< cesa
->caps
->ncipher_algs
; i
++) {
196 ret
= crypto_register_skcipher(cesa
->caps
->cipher_algs
[i
]);
198 goto err_unregister_crypto
;
201 for (i
= 0; i
< cesa
->caps
->nahash_algs
; i
++) {
202 ret
= crypto_register_ahash(cesa
->caps
->ahash_algs
[i
]);
204 goto err_unregister_ahash
;
209 err_unregister_ahash
:
210 for (j
= 0; j
< i
; j
++)
211 crypto_unregister_ahash(cesa
->caps
->ahash_algs
[j
]);
212 i
= cesa
->caps
->ncipher_algs
;
214 err_unregister_crypto
:
215 for (j
= 0; j
< i
; j
++)
216 crypto_unregister_skcipher(cesa
->caps
->cipher_algs
[j
]);
221 static void mv_cesa_remove_algs(struct mv_cesa_dev
*cesa
)
225 for (i
= 0; i
< cesa
->caps
->nahash_algs
; i
++)
226 crypto_unregister_ahash(cesa
->caps
->ahash_algs
[i
]);
228 for (i
= 0; i
< cesa
->caps
->ncipher_algs
; i
++)
229 crypto_unregister_skcipher(cesa
->caps
->cipher_algs
[i
]);
232 static struct skcipher_alg
*orion_cipher_algs
[] = {
233 &mv_cesa_ecb_des_alg
,
234 &mv_cesa_cbc_des_alg
,
235 &mv_cesa_ecb_des3_ede_alg
,
236 &mv_cesa_cbc_des3_ede_alg
,
237 &mv_cesa_ecb_aes_alg
,
238 &mv_cesa_cbc_aes_alg
,
241 static struct ahash_alg
*orion_ahash_algs
[] = {
248 static struct skcipher_alg
*armada_370_cipher_algs
[] = {
249 &mv_cesa_ecb_des_alg
,
250 &mv_cesa_cbc_des_alg
,
251 &mv_cesa_ecb_des3_ede_alg
,
252 &mv_cesa_cbc_des3_ede_alg
,
253 &mv_cesa_ecb_aes_alg
,
254 &mv_cesa_cbc_aes_alg
,
257 static struct ahash_alg
*armada_370_ahash_algs
[] = {
263 &mv_ahmac_sha256_alg
,
266 static const struct mv_cesa_caps orion_caps
= {
268 .cipher_algs
= orion_cipher_algs
,
269 .ncipher_algs
= ARRAY_SIZE(orion_cipher_algs
),
270 .ahash_algs
= orion_ahash_algs
,
271 .nahash_algs
= ARRAY_SIZE(orion_ahash_algs
),
275 static const struct mv_cesa_caps kirkwood_caps
= {
277 .cipher_algs
= orion_cipher_algs
,
278 .ncipher_algs
= ARRAY_SIZE(orion_cipher_algs
),
279 .ahash_algs
= orion_ahash_algs
,
280 .nahash_algs
= ARRAY_SIZE(orion_ahash_algs
),
284 static const struct mv_cesa_caps armada_370_caps
= {
286 .cipher_algs
= armada_370_cipher_algs
,
287 .ncipher_algs
= ARRAY_SIZE(armada_370_cipher_algs
),
288 .ahash_algs
= armada_370_ahash_algs
,
289 .nahash_algs
= ARRAY_SIZE(armada_370_ahash_algs
),
293 static const struct mv_cesa_caps armada_xp_caps
= {
295 .cipher_algs
= armada_370_cipher_algs
,
296 .ncipher_algs
= ARRAY_SIZE(armada_370_cipher_algs
),
297 .ahash_algs
= armada_370_ahash_algs
,
298 .nahash_algs
= ARRAY_SIZE(armada_370_ahash_algs
),
302 static const struct of_device_id mv_cesa_of_match_table
[] = {
303 { .compatible
= "marvell,orion-crypto", .data
= &orion_caps
},
304 { .compatible
= "marvell,kirkwood-crypto", .data
= &kirkwood_caps
},
305 { .compatible
= "marvell,dove-crypto", .data
= &kirkwood_caps
},
306 { .compatible
= "marvell,armada-370-crypto", .data
= &armada_370_caps
},
307 { .compatible
= "marvell,armada-xp-crypto", .data
= &armada_xp_caps
},
308 { .compatible
= "marvell,armada-375-crypto", .data
= &armada_xp_caps
},
309 { .compatible
= "marvell,armada-38x-crypto", .data
= &armada_xp_caps
},
312 MODULE_DEVICE_TABLE(of
, mv_cesa_of_match_table
);
315 mv_cesa_conf_mbus_windows(struct mv_cesa_engine
*engine
,
316 const struct mbus_dram_target_info
*dram
)
318 void __iomem
*iobase
= engine
->regs
;
321 for (i
= 0; i
< 4; i
++) {
322 writel(0, iobase
+ CESA_TDMA_WINDOW_CTRL(i
));
323 writel(0, iobase
+ CESA_TDMA_WINDOW_BASE(i
));
326 for (i
= 0; i
< dram
->num_cs
; i
++) {
327 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
329 writel(((cs
->size
- 1) & 0xffff0000) |
330 (cs
->mbus_attr
<< 8) |
331 (dram
->mbus_dram_target_id
<< 4) | 1,
332 iobase
+ CESA_TDMA_WINDOW_CTRL(i
));
333 writel(cs
->base
, iobase
+ CESA_TDMA_WINDOW_BASE(i
));
337 static int mv_cesa_dev_dma_init(struct mv_cesa_dev
*cesa
)
339 struct device
*dev
= cesa
->dev
;
340 struct mv_cesa_dev_dma
*dma
;
342 if (!cesa
->caps
->has_tdma
)
345 dma
= devm_kzalloc(dev
, sizeof(*dma
), GFP_KERNEL
);
349 dma
->tdma_desc_pool
= dmam_pool_create("tdma_desc", dev
,
350 sizeof(struct mv_cesa_tdma_desc
),
352 if (!dma
->tdma_desc_pool
)
355 dma
->op_pool
= dmam_pool_create("cesa_op", dev
,
356 sizeof(struct mv_cesa_op_ctx
), 16, 0);
360 dma
->cache_pool
= dmam_pool_create("cesa_cache", dev
,
361 CESA_MAX_HASH_BLOCK_SIZE
, 1, 0);
362 if (!dma
->cache_pool
)
365 dma
->padding_pool
= dmam_pool_create("cesa_padding", dev
, 72, 1, 0);
366 if (!dma
->padding_pool
)
374 static int mv_cesa_get_sram(struct platform_device
*pdev
, int idx
)
376 struct mv_cesa_dev
*cesa
= platform_get_drvdata(pdev
);
377 struct mv_cesa_engine
*engine
= &cesa
->engines
[idx
];
378 const char *res_name
= "sram";
379 struct resource
*res
;
381 engine
->pool
= of_gen_pool_get(cesa
->dev
->of_node
,
382 "marvell,crypto-srams", idx
);
384 engine
->sram
= gen_pool_dma_alloc(engine
->pool
,
394 if (cesa
->caps
->nengines
> 1) {
401 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
403 if (!res
|| resource_size(res
) < cesa
->sram_size
)
406 engine
->sram
= devm_ioremap_resource(cesa
->dev
, res
);
407 if (IS_ERR(engine
->sram
))
408 return PTR_ERR(engine
->sram
);
410 engine
->sram_dma
= dma_map_resource(cesa
->dev
, res
->start
,
412 DMA_BIDIRECTIONAL
, 0);
413 if (dma_mapping_error(cesa
->dev
, engine
->sram_dma
))
419 static void mv_cesa_put_sram(struct platform_device
*pdev
, int idx
)
421 struct mv_cesa_dev
*cesa
= platform_get_drvdata(pdev
);
422 struct mv_cesa_engine
*engine
= &cesa
->engines
[idx
];
425 gen_pool_free(engine
->pool
, (unsigned long)engine
->sram
,
428 dma_unmap_resource(cesa
->dev
, engine
->sram_dma
,
429 cesa
->sram_size
, DMA_BIDIRECTIONAL
, 0);
432 static int mv_cesa_probe(struct platform_device
*pdev
)
434 const struct mv_cesa_caps
*caps
= &orion_caps
;
435 const struct mbus_dram_target_info
*dram
;
436 const struct of_device_id
*match
;
437 struct device
*dev
= &pdev
->dev
;
438 struct mv_cesa_dev
*cesa
;
439 struct mv_cesa_engine
*engines
;
440 int irq
, ret
, i
, cpu
;
444 dev_err(&pdev
->dev
, "Only one CESA device authorized\n");
449 match
= of_match_node(mv_cesa_of_match_table
, dev
->of_node
);
450 if (!match
|| !match
->data
)
456 cesa
= devm_kzalloc(dev
, sizeof(*cesa
), GFP_KERNEL
);
463 sram_size
= CESA_SA_DEFAULT_SRAM_SIZE
;
464 of_property_read_u32(cesa
->dev
->of_node
, "marvell,crypto-sram-size",
466 if (sram_size
< CESA_SA_MIN_SRAM_SIZE
)
467 sram_size
= CESA_SA_MIN_SRAM_SIZE
;
469 cesa
->sram_size
= sram_size
;
470 cesa
->engines
= devm_kcalloc(dev
, caps
->nengines
, sizeof(*engines
),
475 spin_lock_init(&cesa
->lock
);
477 cesa
->regs
= devm_platform_ioremap_resource_byname(pdev
, "regs");
478 if (IS_ERR(cesa
->regs
))
479 return PTR_ERR(cesa
->regs
);
481 ret
= mv_cesa_dev_dma_init(cesa
);
485 dram
= mv_mbus_dram_info_nooverlap();
487 platform_set_drvdata(pdev
, cesa
);
489 for (i
= 0; i
< caps
->nengines
; i
++) {
490 struct mv_cesa_engine
*engine
= &cesa
->engines
[i
];
494 spin_lock_init(&engine
->lock
);
496 ret
= mv_cesa_get_sram(pdev
, i
);
500 irq
= platform_get_irq(pdev
, i
);
509 * Not all platforms can gate the CESA clocks: do not complain
510 * if the clock does not exist.
512 snprintf(res_name
, sizeof(res_name
), "cesa%d", i
);
513 engine
->clk
= devm_clk_get(dev
, res_name
);
514 if (IS_ERR(engine
->clk
)) {
515 engine
->clk
= devm_clk_get(dev
, NULL
);
516 if (IS_ERR(engine
->clk
))
520 snprintf(res_name
, sizeof(res_name
), "cesaz%d", i
);
521 engine
->zclk
= devm_clk_get(dev
, res_name
);
522 if (IS_ERR(engine
->zclk
))
525 ret
= clk_prepare_enable(engine
->clk
);
529 ret
= clk_prepare_enable(engine
->zclk
);
533 engine
->regs
= cesa
->regs
+ CESA_ENGINE_OFF(i
);
535 if (dram
&& cesa
->caps
->has_tdma
)
536 mv_cesa_conf_mbus_windows(engine
, dram
);
538 writel(0, engine
->regs
+ CESA_SA_INT_STATUS
);
539 writel(CESA_SA_CFG_STOP_DIG_ERR
,
540 engine
->regs
+ CESA_SA_CFG
);
541 writel(engine
->sram_dma
& CESA_SA_SRAM_MSK
,
542 engine
->regs
+ CESA_SA_DESC_P0
);
544 ret
= devm_request_threaded_irq(dev
, irq
, NULL
, mv_cesa_int
,
546 dev_name(&pdev
->dev
),
552 cpu
= cpumask_local_spread(engine
->id
, NUMA_NO_NODE
);
553 irq_set_affinity_hint(irq
, get_cpu_mask(cpu
));
555 crypto_init_queue(&engine
->queue
, CESA_CRYPTO_DEFAULT_MAX_QLEN
);
556 atomic_set(&engine
->load
, 0);
557 INIT_LIST_HEAD(&engine
->complete_queue
);
562 ret
= mv_cesa_add_algs(cesa
);
568 dev_info(dev
, "CESA device successfully registered\n");
573 for (i
= 0; i
< caps
->nengines
; i
++) {
574 clk_disable_unprepare(cesa
->engines
[i
].zclk
);
575 clk_disable_unprepare(cesa
->engines
[i
].clk
);
576 mv_cesa_put_sram(pdev
, i
);
577 if (cesa
->engines
[i
].irq
> 0)
578 irq_set_affinity_hint(cesa
->engines
[i
].irq
, NULL
);
584 static int mv_cesa_remove(struct platform_device
*pdev
)
586 struct mv_cesa_dev
*cesa
= platform_get_drvdata(pdev
);
589 mv_cesa_remove_algs(cesa
);
591 for (i
= 0; i
< cesa
->caps
->nengines
; i
++) {
592 clk_disable_unprepare(cesa
->engines
[i
].zclk
);
593 clk_disable_unprepare(cesa
->engines
[i
].clk
);
594 mv_cesa_put_sram(pdev
, i
);
595 irq_set_affinity_hint(cesa
->engines
[i
].irq
, NULL
);
601 static const struct platform_device_id mv_cesa_plat_id_table
[] = {
602 { .name
= "mv_crypto" },
605 MODULE_DEVICE_TABLE(platform
, mv_cesa_plat_id_table
);
607 static struct platform_driver marvell_cesa
= {
608 .probe
= mv_cesa_probe
,
609 .remove
= mv_cesa_remove
,
610 .id_table
= mv_cesa_plat_id_table
,
612 .name
= "marvell-cesa",
613 .of_match_table
= mv_cesa_of_match_table
,
616 module_platform_driver(marvell_cesa
);
618 MODULE_ALIAS("platform:mv_crypto");
619 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
620 MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>");
621 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
622 MODULE_LICENSE("GPL v2");