2 * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA)
3 * that can be found on the following platform: Orion, Kirkwood, Armada. This
4 * driver supports the TDMA engine on platforms on which it is available.
6 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
7 * Author: Arnaud Ebalard <arno@natisbad.org>
9 * This work is based on an initial version written by
10 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License version 2 as published
14 * by the Free Software Foundation.
17 #include <linux/delay.h>
18 #include <linux/genalloc.h>
19 #include <linux/interrupt.h>
21 #include <linux/kthread.h>
22 #include <linux/mbus.h>
23 #include <linux/platform_device.h>
24 #include <linux/scatterlist.h>
25 #include <linux/slab.h>
26 #include <linux/module.h>
27 #include <linux/clk.h>
29 #include <linux/of_platform.h>
30 #include <linux/of_irq.h>
34 /* Limit of the crypto queue before reaching the backlog */
35 #define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
37 static int allhwsupport
= !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA
);
38 module_param_named(allhwsupport
, allhwsupport
, int, 0444);
39 MODULE_PARM_DESC(allhwsupport
, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)");
41 struct mv_cesa_dev
*cesa_dev
;
43 struct crypto_async_request
*
44 mv_cesa_dequeue_req_locked(struct mv_cesa_engine
*engine
,
45 struct crypto_async_request
**backlog
)
47 struct crypto_async_request
*req
;
49 *backlog
= crypto_get_backlog(&engine
->queue
);
50 req
= crypto_dequeue_request(&engine
->queue
);
58 static void mv_cesa_rearm_engine(struct mv_cesa_engine
*engine
)
60 struct crypto_async_request
*req
= NULL
, *backlog
= NULL
;
61 struct mv_cesa_ctx
*ctx
;
64 spin_lock_bh(&engine
->lock
);
66 req
= mv_cesa_dequeue_req_locked(engine
, &backlog
);
69 spin_unlock_bh(&engine
->lock
);
75 backlog
->complete(backlog
, -EINPROGRESS
);
77 ctx
= crypto_tfm_ctx(req
->tfm
);
83 static int mv_cesa_std_process(struct mv_cesa_engine
*engine
, u32 status
)
85 struct crypto_async_request
*req
;
86 struct mv_cesa_ctx
*ctx
;
90 ctx
= crypto_tfm_ctx(req
->tfm
);
91 res
= ctx
->ops
->process(req
, status
);
94 ctx
->ops
->complete(req
);
95 mv_cesa_engine_enqueue_complete_request(engine
, req
);
96 } else if (res
== -EINPROGRESS
) {
103 static int mv_cesa_int_process(struct mv_cesa_engine
*engine
, u32 status
)
105 if (engine
->chain
.first
&& engine
->chain
.last
)
106 return mv_cesa_tdma_process(engine
, status
);
108 return mv_cesa_std_process(engine
, status
);
112 mv_cesa_complete_req(struct mv_cesa_ctx
*ctx
, struct crypto_async_request
*req
,
115 ctx
->ops
->cleanup(req
);
117 req
->complete(req
, res
);
121 static irqreturn_t
mv_cesa_int(int irq
, void *priv
)
123 struct mv_cesa_engine
*engine
= priv
;
124 struct crypto_async_request
*req
;
125 struct mv_cesa_ctx
*ctx
;
127 irqreturn_t ret
= IRQ_NONE
;
132 mask
= mv_cesa_get_int_mask(engine
);
133 status
= readl(engine
->regs
+ CESA_SA_INT_STATUS
);
135 if (!(status
& mask
))
139 * TODO: avoid clearing the FPGA_INT_STATUS if this not
140 * relevant on some platforms.
142 writel(~status
, engine
->regs
+ CESA_SA_FPGA_INT_STATUS
);
143 writel(~status
, engine
->regs
+ CESA_SA_INT_STATUS
);
145 /* Process fetched requests */
146 res
= mv_cesa_int_process(engine
, status
& mask
);
149 spin_lock_bh(&engine
->lock
);
151 if (res
!= -EINPROGRESS
)
153 spin_unlock_bh(&engine
->lock
);
155 ctx
= crypto_tfm_ctx(req
->tfm
);
157 if (res
&& res
!= -EINPROGRESS
)
158 mv_cesa_complete_req(ctx
, req
, res
);
160 /* Launch the next pending request */
161 mv_cesa_rearm_engine(engine
);
163 /* Iterate over the complete queue */
165 req
= mv_cesa_engine_dequeue_complete_request(engine
);
169 ctx
= crypto_tfm_ctx(req
->tfm
);
170 mv_cesa_complete_req(ctx
, req
, 0);
177 int mv_cesa_queue_req(struct crypto_async_request
*req
,
178 struct mv_cesa_req
*creq
)
181 struct mv_cesa_engine
*engine
= creq
->engine
;
183 spin_lock_bh(&engine
->lock
);
184 ret
= crypto_enqueue_request(&engine
->queue
, req
);
185 if ((mv_cesa_req_get_type(creq
) == CESA_DMA_REQ
) &&
186 (ret
== -EINPROGRESS
||
187 (ret
== -EBUSY
&& req
->flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)))
188 mv_cesa_tdma_chain(engine
, creq
);
189 spin_unlock_bh(&engine
->lock
);
191 if (ret
!= -EINPROGRESS
)
194 mv_cesa_rearm_engine(engine
);
199 static int mv_cesa_add_algs(struct mv_cesa_dev
*cesa
)
204 for (i
= 0; i
< cesa
->caps
->ncipher_algs
; i
++) {
205 ret
= crypto_register_alg(cesa
->caps
->cipher_algs
[i
]);
207 goto err_unregister_crypto
;
210 for (i
= 0; i
< cesa
->caps
->nahash_algs
; i
++) {
211 ret
= crypto_register_ahash(cesa
->caps
->ahash_algs
[i
]);
213 goto err_unregister_ahash
;
218 err_unregister_ahash
:
219 for (j
= 0; j
< i
; j
++)
220 crypto_unregister_ahash(cesa
->caps
->ahash_algs
[j
]);
221 i
= cesa
->caps
->ncipher_algs
;
223 err_unregister_crypto
:
224 for (j
= 0; j
< i
; j
++)
225 crypto_unregister_alg(cesa
->caps
->cipher_algs
[j
]);
230 static void mv_cesa_remove_algs(struct mv_cesa_dev
*cesa
)
234 for (i
= 0; i
< cesa
->caps
->nahash_algs
; i
++)
235 crypto_unregister_ahash(cesa
->caps
->ahash_algs
[i
]);
237 for (i
= 0; i
< cesa
->caps
->ncipher_algs
; i
++)
238 crypto_unregister_alg(cesa
->caps
->cipher_algs
[i
]);
241 static struct crypto_alg
*orion_cipher_algs
[] = {
242 &mv_cesa_ecb_des_alg
,
243 &mv_cesa_cbc_des_alg
,
244 &mv_cesa_ecb_des3_ede_alg
,
245 &mv_cesa_cbc_des3_ede_alg
,
246 &mv_cesa_ecb_aes_alg
,
247 &mv_cesa_cbc_aes_alg
,
250 static struct ahash_alg
*orion_ahash_algs
[] = {
257 static struct crypto_alg
*armada_370_cipher_algs
[] = {
258 &mv_cesa_ecb_des_alg
,
259 &mv_cesa_cbc_des_alg
,
260 &mv_cesa_ecb_des3_ede_alg
,
261 &mv_cesa_cbc_des3_ede_alg
,
262 &mv_cesa_ecb_aes_alg
,
263 &mv_cesa_cbc_aes_alg
,
266 static struct ahash_alg
*armada_370_ahash_algs
[] = {
272 &mv_ahmac_sha256_alg
,
275 static const struct mv_cesa_caps orion_caps
= {
277 .cipher_algs
= orion_cipher_algs
,
278 .ncipher_algs
= ARRAY_SIZE(orion_cipher_algs
),
279 .ahash_algs
= orion_ahash_algs
,
280 .nahash_algs
= ARRAY_SIZE(orion_ahash_algs
),
284 static const struct mv_cesa_caps kirkwood_caps
= {
286 .cipher_algs
= orion_cipher_algs
,
287 .ncipher_algs
= ARRAY_SIZE(orion_cipher_algs
),
288 .ahash_algs
= orion_ahash_algs
,
289 .nahash_algs
= ARRAY_SIZE(orion_ahash_algs
),
293 static const struct mv_cesa_caps armada_370_caps
= {
295 .cipher_algs
= armada_370_cipher_algs
,
296 .ncipher_algs
= ARRAY_SIZE(armada_370_cipher_algs
),
297 .ahash_algs
= armada_370_ahash_algs
,
298 .nahash_algs
= ARRAY_SIZE(armada_370_ahash_algs
),
302 static const struct mv_cesa_caps armada_xp_caps
= {
304 .cipher_algs
= armada_370_cipher_algs
,
305 .ncipher_algs
= ARRAY_SIZE(armada_370_cipher_algs
),
306 .ahash_algs
= armada_370_ahash_algs
,
307 .nahash_algs
= ARRAY_SIZE(armada_370_ahash_algs
),
311 static const struct of_device_id mv_cesa_of_match_table
[] = {
312 { .compatible
= "marvell,orion-crypto", .data
= &orion_caps
},
313 { .compatible
= "marvell,kirkwood-crypto", .data
= &kirkwood_caps
},
314 { .compatible
= "marvell,dove-crypto", .data
= &kirkwood_caps
},
315 { .compatible
= "marvell,armada-370-crypto", .data
= &armada_370_caps
},
316 { .compatible
= "marvell,armada-xp-crypto", .data
= &armada_xp_caps
},
317 { .compatible
= "marvell,armada-375-crypto", .data
= &armada_xp_caps
},
318 { .compatible
= "marvell,armada-38x-crypto", .data
= &armada_xp_caps
},
321 MODULE_DEVICE_TABLE(of
, mv_cesa_of_match_table
);
324 mv_cesa_conf_mbus_windows(struct mv_cesa_engine
*engine
,
325 const struct mbus_dram_target_info
*dram
)
327 void __iomem
*iobase
= engine
->regs
;
330 for (i
= 0; i
< 4; i
++) {
331 writel(0, iobase
+ CESA_TDMA_WINDOW_CTRL(i
));
332 writel(0, iobase
+ CESA_TDMA_WINDOW_BASE(i
));
335 for (i
= 0; i
< dram
->num_cs
; i
++) {
336 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
338 writel(((cs
->size
- 1) & 0xffff0000) |
339 (cs
->mbus_attr
<< 8) |
340 (dram
->mbus_dram_target_id
<< 4) | 1,
341 iobase
+ CESA_TDMA_WINDOW_CTRL(i
));
342 writel(cs
->base
, iobase
+ CESA_TDMA_WINDOW_BASE(i
));
346 static int mv_cesa_dev_dma_init(struct mv_cesa_dev
*cesa
)
348 struct device
*dev
= cesa
->dev
;
349 struct mv_cesa_dev_dma
*dma
;
351 if (!cesa
->caps
->has_tdma
)
354 dma
= devm_kzalloc(dev
, sizeof(*dma
), GFP_KERNEL
);
358 dma
->tdma_desc_pool
= dmam_pool_create("tdma_desc", dev
,
359 sizeof(struct mv_cesa_tdma_desc
),
361 if (!dma
->tdma_desc_pool
)
364 dma
->op_pool
= dmam_pool_create("cesa_op", dev
,
365 sizeof(struct mv_cesa_op_ctx
), 16, 0);
369 dma
->cache_pool
= dmam_pool_create("cesa_cache", dev
,
370 CESA_MAX_HASH_BLOCK_SIZE
, 1, 0);
371 if (!dma
->cache_pool
)
374 dma
->padding_pool
= dmam_pool_create("cesa_padding", dev
, 72, 1, 0);
375 if (!dma
->padding_pool
)
383 static int mv_cesa_get_sram(struct platform_device
*pdev
, int idx
)
385 struct mv_cesa_dev
*cesa
= platform_get_drvdata(pdev
);
386 struct mv_cesa_engine
*engine
= &cesa
->engines
[idx
];
387 const char *res_name
= "sram";
388 struct resource
*res
;
390 engine
->pool
= of_gen_pool_get(cesa
->dev
->of_node
,
391 "marvell,crypto-srams", idx
);
393 engine
->sram
= gen_pool_dma_alloc(engine
->pool
,
403 if (cesa
->caps
->nengines
> 1) {
410 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
412 if (!res
|| resource_size(res
) < cesa
->sram_size
)
415 engine
->sram
= devm_ioremap_resource(cesa
->dev
, res
);
416 if (IS_ERR(engine
->sram
))
417 return PTR_ERR(engine
->sram
);
419 engine
->sram_dma
= phys_to_dma(cesa
->dev
,
420 (phys_addr_t
)res
->start
);
425 static void mv_cesa_put_sram(struct platform_device
*pdev
, int idx
)
427 struct mv_cesa_dev
*cesa
= platform_get_drvdata(pdev
);
428 struct mv_cesa_engine
*engine
= &cesa
->engines
[idx
];
433 gen_pool_free(engine
->pool
, (unsigned long)engine
->sram
,
437 static int mv_cesa_probe(struct platform_device
*pdev
)
439 const struct mv_cesa_caps
*caps
= &orion_caps
;
440 const struct mbus_dram_target_info
*dram
;
441 const struct of_device_id
*match
;
442 struct device
*dev
= &pdev
->dev
;
443 struct mv_cesa_dev
*cesa
;
444 struct mv_cesa_engine
*engines
;
445 struct resource
*res
;
450 dev_err(&pdev
->dev
, "Only one CESA device authorized\n");
455 match
= of_match_node(mv_cesa_of_match_table
, dev
->of_node
);
456 if (!match
|| !match
->data
)
462 if ((caps
== &orion_caps
|| caps
== &kirkwood_caps
) && !allhwsupport
)
465 cesa
= devm_kzalloc(dev
, sizeof(*cesa
), GFP_KERNEL
);
472 sram_size
= CESA_SA_DEFAULT_SRAM_SIZE
;
473 of_property_read_u32(cesa
->dev
->of_node
, "marvell,crypto-sram-size",
475 if (sram_size
< CESA_SA_MIN_SRAM_SIZE
)
476 sram_size
= CESA_SA_MIN_SRAM_SIZE
;
478 cesa
->sram_size
= sram_size
;
479 cesa
->engines
= devm_kzalloc(dev
, caps
->nengines
* sizeof(*engines
),
484 spin_lock_init(&cesa
->lock
);
486 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "regs");
487 cesa
->regs
= devm_ioremap_resource(dev
, res
);
488 if (IS_ERR(cesa
->regs
))
489 return PTR_ERR(cesa
->regs
);
491 ret
= mv_cesa_dev_dma_init(cesa
);
495 dram
= mv_mbus_dram_info_nooverlap();
497 platform_set_drvdata(pdev
, cesa
);
499 for (i
= 0; i
< caps
->nengines
; i
++) {
500 struct mv_cesa_engine
*engine
= &cesa
->engines
[i
];
504 spin_lock_init(&engine
->lock
);
506 ret
= mv_cesa_get_sram(pdev
, i
);
510 irq
= platform_get_irq(pdev
, i
);
517 * Not all platforms can gate the CESA clocks: do not complain
518 * if the clock does not exist.
520 snprintf(res_name
, sizeof(res_name
), "cesa%d", i
);
521 engine
->clk
= devm_clk_get(dev
, res_name
);
522 if (IS_ERR(engine
->clk
)) {
523 engine
->clk
= devm_clk_get(dev
, NULL
);
524 if (IS_ERR(engine
->clk
))
528 snprintf(res_name
, sizeof(res_name
), "cesaz%d", i
);
529 engine
->zclk
= devm_clk_get(dev
, res_name
);
530 if (IS_ERR(engine
->zclk
))
533 ret
= clk_prepare_enable(engine
->clk
);
537 ret
= clk_prepare_enable(engine
->zclk
);
541 engine
->regs
= cesa
->regs
+ CESA_ENGINE_OFF(i
);
543 if (dram
&& cesa
->caps
->has_tdma
)
544 mv_cesa_conf_mbus_windows(engine
, dram
);
546 writel(0, engine
->regs
+ CESA_SA_INT_STATUS
);
547 writel(CESA_SA_CFG_STOP_DIG_ERR
,
548 engine
->regs
+ CESA_SA_CFG
);
549 writel(engine
->sram_dma
& CESA_SA_SRAM_MSK
,
550 engine
->regs
+ CESA_SA_DESC_P0
);
552 ret
= devm_request_threaded_irq(dev
, irq
, NULL
, mv_cesa_int
,
554 dev_name(&pdev
->dev
),
559 crypto_init_queue(&engine
->queue
, CESA_CRYPTO_DEFAULT_MAX_QLEN
);
560 atomic_set(&engine
->load
, 0);
561 INIT_LIST_HEAD(&engine
->complete_queue
);
566 ret
= mv_cesa_add_algs(cesa
);
572 dev_info(dev
, "CESA device successfully registered\n");
577 for (i
= 0; i
< caps
->nengines
; i
++) {
578 clk_disable_unprepare(cesa
->engines
[i
].zclk
);
579 clk_disable_unprepare(cesa
->engines
[i
].clk
);
580 mv_cesa_put_sram(pdev
, i
);
586 static int mv_cesa_remove(struct platform_device
*pdev
)
588 struct mv_cesa_dev
*cesa
= platform_get_drvdata(pdev
);
591 mv_cesa_remove_algs(cesa
);
593 for (i
= 0; i
< cesa
->caps
->nengines
; i
++) {
594 clk_disable_unprepare(cesa
->engines
[i
].zclk
);
595 clk_disable_unprepare(cesa
->engines
[i
].clk
);
596 mv_cesa_put_sram(pdev
, i
);
602 static struct platform_driver marvell_cesa
= {
603 .probe
= mv_cesa_probe
,
604 .remove
= mv_cesa_remove
,
606 .name
= "marvell-cesa",
607 .of_match_table
= mv_cesa_of_match_table
,
610 module_platform_driver(marvell_cesa
);
612 MODULE_ALIAS("platform:mv_crypto");
613 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
614 MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>");
615 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
616 MODULE_LICENSE("GPL v2");