2 * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA)
3 * that can be found on the following platform: Orion, Kirkwood, Armada. This
4 * driver supports the TDMA engine on platforms on which it is available.
6 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
7 * Author: Arnaud Ebalard <arno@natisbad.org>
9 * This work is based on an initial version written by
10 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License version 2 as published
14 * by the Free Software Foundation.
17 #include <linux/delay.h>
18 #include <linux/genalloc.h>
19 #include <linux/interrupt.h>
21 #include <linux/kthread.h>
22 #include <linux/mbus.h>
23 #include <linux/platform_device.h>
24 #include <linux/scatterlist.h>
25 #include <linux/slab.h>
26 #include <linux/module.h>
27 #include <linux/clk.h>
29 #include <linux/of_platform.h>
30 #include <linux/of_irq.h>
34 /* Limit of the crypto queue before reaching the backlog */
35 #define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
37 static int allhwsupport
= !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA
);
38 module_param_named(allhwsupport
, allhwsupport
, int, 0444);
39 MODULE_PARM_DESC(allhwsupport
, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)");
41 struct mv_cesa_dev
*cesa_dev
;
43 struct crypto_async_request
*
44 mv_cesa_dequeue_req_locked(struct mv_cesa_engine
*engine
,
45 struct crypto_async_request
**backlog
)
47 struct crypto_async_request
*req
;
49 *backlog
= crypto_get_backlog(&engine
->queue
);
50 req
= crypto_dequeue_request(&engine
->queue
);
58 static void mv_cesa_rearm_engine(struct mv_cesa_engine
*engine
)
60 struct crypto_async_request
*req
= NULL
, *backlog
= NULL
;
61 struct mv_cesa_ctx
*ctx
;
64 spin_lock_bh(&engine
->lock
);
66 req
= mv_cesa_dequeue_req_locked(engine
, &backlog
);
69 spin_unlock_bh(&engine
->lock
);
75 backlog
->complete(backlog
, -EINPROGRESS
);
77 ctx
= crypto_tfm_ctx(req
->tfm
);
83 static int mv_cesa_std_process(struct mv_cesa_engine
*engine
, u32 status
)
85 struct crypto_async_request
*req
;
86 struct mv_cesa_ctx
*ctx
;
90 ctx
= crypto_tfm_ctx(req
->tfm
);
91 res
= ctx
->ops
->process(req
, status
);
94 ctx
->ops
->complete(req
);
95 mv_cesa_engine_enqueue_complete_request(engine
, req
);
96 } else if (res
== -EINPROGRESS
) {
103 static int mv_cesa_int_process(struct mv_cesa_engine
*engine
, u32 status
)
105 if (engine
->chain
.first
&& engine
->chain
.last
)
106 return mv_cesa_tdma_process(engine
, status
);
108 return mv_cesa_std_process(engine
, status
);
112 mv_cesa_complete_req(struct mv_cesa_ctx
*ctx
, struct crypto_async_request
*req
,
115 ctx
->ops
->cleanup(req
);
117 req
->complete(req
, res
);
121 static irqreturn_t
mv_cesa_int(int irq
, void *priv
)
123 struct mv_cesa_engine
*engine
= priv
;
124 struct crypto_async_request
*req
;
125 struct mv_cesa_ctx
*ctx
;
127 irqreturn_t ret
= IRQ_NONE
;
132 mask
= mv_cesa_get_int_mask(engine
);
133 status
= readl(engine
->regs
+ CESA_SA_INT_STATUS
);
135 if (!(status
& mask
))
139 * TODO: avoid clearing the FPGA_INT_STATUS if this not
140 * relevant on some platforms.
142 writel(~status
, engine
->regs
+ CESA_SA_FPGA_INT_STATUS
);
143 writel(~status
, engine
->regs
+ CESA_SA_INT_STATUS
);
145 /* Process fetched requests */
146 res
= mv_cesa_int_process(engine
, status
& mask
);
149 spin_lock_bh(&engine
->lock
);
151 if (res
!= -EINPROGRESS
)
153 spin_unlock_bh(&engine
->lock
);
155 ctx
= crypto_tfm_ctx(req
->tfm
);
157 if (res
&& res
!= -EINPROGRESS
)
158 mv_cesa_complete_req(ctx
, req
, res
);
160 /* Launch the next pending request */
161 mv_cesa_rearm_engine(engine
);
163 /* Iterate over the complete queue */
165 req
= mv_cesa_engine_dequeue_complete_request(engine
);
169 mv_cesa_complete_req(ctx
, req
, 0);
176 int mv_cesa_queue_req(struct crypto_async_request
*req
,
177 struct mv_cesa_req
*creq
)
180 struct mv_cesa_engine
*engine
= creq
->engine
;
182 spin_lock_bh(&engine
->lock
);
183 ret
= crypto_enqueue_request(&engine
->queue
, req
);
184 if ((mv_cesa_req_get_type(creq
) == CESA_DMA_REQ
) &&
185 (ret
== -EINPROGRESS
||
186 (ret
== -EBUSY
&& req
->flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)))
187 mv_cesa_tdma_chain(engine
, creq
);
188 spin_unlock_bh(&engine
->lock
);
190 if (ret
!= -EINPROGRESS
)
193 mv_cesa_rearm_engine(engine
);
198 static int mv_cesa_add_algs(struct mv_cesa_dev
*cesa
)
203 for (i
= 0; i
< cesa
->caps
->ncipher_algs
; i
++) {
204 ret
= crypto_register_alg(cesa
->caps
->cipher_algs
[i
]);
206 goto err_unregister_crypto
;
209 for (i
= 0; i
< cesa
->caps
->nahash_algs
; i
++) {
210 ret
= crypto_register_ahash(cesa
->caps
->ahash_algs
[i
]);
212 goto err_unregister_ahash
;
217 err_unregister_ahash
:
218 for (j
= 0; j
< i
; j
++)
219 crypto_unregister_ahash(cesa
->caps
->ahash_algs
[j
]);
220 i
= cesa
->caps
->ncipher_algs
;
222 err_unregister_crypto
:
223 for (j
= 0; j
< i
; j
++)
224 crypto_unregister_alg(cesa
->caps
->cipher_algs
[j
]);
229 static void mv_cesa_remove_algs(struct mv_cesa_dev
*cesa
)
233 for (i
= 0; i
< cesa
->caps
->nahash_algs
; i
++)
234 crypto_unregister_ahash(cesa
->caps
->ahash_algs
[i
]);
236 for (i
= 0; i
< cesa
->caps
->ncipher_algs
; i
++)
237 crypto_unregister_alg(cesa
->caps
->cipher_algs
[i
]);
240 static struct crypto_alg
*orion_cipher_algs
[] = {
241 &mv_cesa_ecb_des_alg
,
242 &mv_cesa_cbc_des_alg
,
243 &mv_cesa_ecb_des3_ede_alg
,
244 &mv_cesa_cbc_des3_ede_alg
,
245 &mv_cesa_ecb_aes_alg
,
246 &mv_cesa_cbc_aes_alg
,
249 static struct ahash_alg
*orion_ahash_algs
[] = {
256 static struct crypto_alg
*armada_370_cipher_algs
[] = {
257 &mv_cesa_ecb_des_alg
,
258 &mv_cesa_cbc_des_alg
,
259 &mv_cesa_ecb_des3_ede_alg
,
260 &mv_cesa_cbc_des3_ede_alg
,
261 &mv_cesa_ecb_aes_alg
,
262 &mv_cesa_cbc_aes_alg
,
265 static struct ahash_alg
*armada_370_ahash_algs
[] = {
271 &mv_ahmac_sha256_alg
,
274 static const struct mv_cesa_caps orion_caps
= {
276 .cipher_algs
= orion_cipher_algs
,
277 .ncipher_algs
= ARRAY_SIZE(orion_cipher_algs
),
278 .ahash_algs
= orion_ahash_algs
,
279 .nahash_algs
= ARRAY_SIZE(orion_ahash_algs
),
283 static const struct mv_cesa_caps kirkwood_caps
= {
285 .cipher_algs
= orion_cipher_algs
,
286 .ncipher_algs
= ARRAY_SIZE(orion_cipher_algs
),
287 .ahash_algs
= orion_ahash_algs
,
288 .nahash_algs
= ARRAY_SIZE(orion_ahash_algs
),
292 static const struct mv_cesa_caps armada_370_caps
= {
294 .cipher_algs
= armada_370_cipher_algs
,
295 .ncipher_algs
= ARRAY_SIZE(armada_370_cipher_algs
),
296 .ahash_algs
= armada_370_ahash_algs
,
297 .nahash_algs
= ARRAY_SIZE(armada_370_ahash_algs
),
301 static const struct mv_cesa_caps armada_xp_caps
= {
303 .cipher_algs
= armada_370_cipher_algs
,
304 .ncipher_algs
= ARRAY_SIZE(armada_370_cipher_algs
),
305 .ahash_algs
= armada_370_ahash_algs
,
306 .nahash_algs
= ARRAY_SIZE(armada_370_ahash_algs
),
310 static const struct of_device_id mv_cesa_of_match_table
[] = {
311 { .compatible
= "marvell,orion-crypto", .data
= &orion_caps
},
312 { .compatible
= "marvell,kirkwood-crypto", .data
= &kirkwood_caps
},
313 { .compatible
= "marvell,dove-crypto", .data
= &kirkwood_caps
},
314 { .compatible
= "marvell,armada-370-crypto", .data
= &armada_370_caps
},
315 { .compatible
= "marvell,armada-xp-crypto", .data
= &armada_xp_caps
},
316 { .compatible
= "marvell,armada-375-crypto", .data
= &armada_xp_caps
},
317 { .compatible
= "marvell,armada-38x-crypto", .data
= &armada_xp_caps
},
320 MODULE_DEVICE_TABLE(of
, mv_cesa_of_match_table
);
323 mv_cesa_conf_mbus_windows(struct mv_cesa_engine
*engine
,
324 const struct mbus_dram_target_info
*dram
)
326 void __iomem
*iobase
= engine
->regs
;
329 for (i
= 0; i
< 4; i
++) {
330 writel(0, iobase
+ CESA_TDMA_WINDOW_CTRL(i
));
331 writel(0, iobase
+ CESA_TDMA_WINDOW_BASE(i
));
334 for (i
= 0; i
< dram
->num_cs
; i
++) {
335 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
337 writel(((cs
->size
- 1) & 0xffff0000) |
338 (cs
->mbus_attr
<< 8) |
339 (dram
->mbus_dram_target_id
<< 4) | 1,
340 iobase
+ CESA_TDMA_WINDOW_CTRL(i
));
341 writel(cs
->base
, iobase
+ CESA_TDMA_WINDOW_BASE(i
));
345 static int mv_cesa_dev_dma_init(struct mv_cesa_dev
*cesa
)
347 struct device
*dev
= cesa
->dev
;
348 struct mv_cesa_dev_dma
*dma
;
350 if (!cesa
->caps
->has_tdma
)
353 dma
= devm_kzalloc(dev
, sizeof(*dma
), GFP_KERNEL
);
357 dma
->tdma_desc_pool
= dmam_pool_create("tdma_desc", dev
,
358 sizeof(struct mv_cesa_tdma_desc
),
360 if (!dma
->tdma_desc_pool
)
363 dma
->op_pool
= dmam_pool_create("cesa_op", dev
,
364 sizeof(struct mv_cesa_op_ctx
), 16, 0);
368 dma
->cache_pool
= dmam_pool_create("cesa_cache", dev
,
369 CESA_MAX_HASH_BLOCK_SIZE
, 1, 0);
370 if (!dma
->cache_pool
)
373 dma
->padding_pool
= dmam_pool_create("cesa_padding", dev
, 72, 1, 0);
374 if (!dma
->padding_pool
)
377 dma
->iv_pool
= dmam_pool_create("cesa_iv", dev
, 16, 1, 0);
386 static int mv_cesa_get_sram(struct platform_device
*pdev
, int idx
)
388 struct mv_cesa_dev
*cesa
= platform_get_drvdata(pdev
);
389 struct mv_cesa_engine
*engine
= &cesa
->engines
[idx
];
390 const char *res_name
= "sram";
391 struct resource
*res
;
393 engine
->pool
= of_gen_pool_get(cesa
->dev
->of_node
,
394 "marvell,crypto-srams", idx
);
396 engine
->sram
= gen_pool_dma_alloc(engine
->pool
,
406 if (cesa
->caps
->nengines
> 1) {
413 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
415 if (!res
|| resource_size(res
) < cesa
->sram_size
)
418 engine
->sram
= devm_ioremap_resource(cesa
->dev
, res
);
419 if (IS_ERR(engine
->sram
))
420 return PTR_ERR(engine
->sram
);
422 engine
->sram_dma
= phys_to_dma(cesa
->dev
,
423 (phys_addr_t
)res
->start
);
428 static void mv_cesa_put_sram(struct platform_device
*pdev
, int idx
)
430 struct mv_cesa_dev
*cesa
= platform_get_drvdata(pdev
);
431 struct mv_cesa_engine
*engine
= &cesa
->engines
[idx
];
436 gen_pool_free(engine
->pool
, (unsigned long)engine
->sram
,
440 static int mv_cesa_probe(struct platform_device
*pdev
)
442 const struct mv_cesa_caps
*caps
= &orion_caps
;
443 const struct mbus_dram_target_info
*dram
;
444 const struct of_device_id
*match
;
445 struct device
*dev
= &pdev
->dev
;
446 struct mv_cesa_dev
*cesa
;
447 struct mv_cesa_engine
*engines
;
448 struct resource
*res
;
453 dev_err(&pdev
->dev
, "Only one CESA device authorized\n");
458 match
= of_match_node(mv_cesa_of_match_table
, dev
->of_node
);
459 if (!match
|| !match
->data
)
465 if ((caps
== &orion_caps
|| caps
== &kirkwood_caps
) && !allhwsupport
)
468 cesa
= devm_kzalloc(dev
, sizeof(*cesa
), GFP_KERNEL
);
475 sram_size
= CESA_SA_DEFAULT_SRAM_SIZE
;
476 of_property_read_u32(cesa
->dev
->of_node
, "marvell,crypto-sram-size",
478 if (sram_size
< CESA_SA_MIN_SRAM_SIZE
)
479 sram_size
= CESA_SA_MIN_SRAM_SIZE
;
481 cesa
->sram_size
= sram_size
;
482 cesa
->engines
= devm_kzalloc(dev
, caps
->nengines
* sizeof(*engines
),
487 spin_lock_init(&cesa
->lock
);
489 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "regs");
490 cesa
->regs
= devm_ioremap_resource(dev
, res
);
491 if (IS_ERR(cesa
->regs
))
492 return PTR_ERR(cesa
->regs
);
494 ret
= mv_cesa_dev_dma_init(cesa
);
498 dram
= mv_mbus_dram_info_nooverlap();
500 platform_set_drvdata(pdev
, cesa
);
502 for (i
= 0; i
< caps
->nengines
; i
++) {
503 struct mv_cesa_engine
*engine
= &cesa
->engines
[i
];
507 spin_lock_init(&engine
->lock
);
509 ret
= mv_cesa_get_sram(pdev
, i
);
513 irq
= platform_get_irq(pdev
, i
);
520 * Not all platforms can gate the CESA clocks: do not complain
521 * if the clock does not exist.
523 snprintf(res_name
, sizeof(res_name
), "cesa%d", i
);
524 engine
->clk
= devm_clk_get(dev
, res_name
);
525 if (IS_ERR(engine
->clk
)) {
526 engine
->clk
= devm_clk_get(dev
, NULL
);
527 if (IS_ERR(engine
->clk
))
531 snprintf(res_name
, sizeof(res_name
), "cesaz%d", i
);
532 engine
->zclk
= devm_clk_get(dev
, res_name
);
533 if (IS_ERR(engine
->zclk
))
536 ret
= clk_prepare_enable(engine
->clk
);
540 ret
= clk_prepare_enable(engine
->zclk
);
544 engine
->regs
= cesa
->regs
+ CESA_ENGINE_OFF(i
);
546 if (dram
&& cesa
->caps
->has_tdma
)
547 mv_cesa_conf_mbus_windows(engine
, dram
);
549 writel(0, engine
->regs
+ CESA_SA_INT_STATUS
);
550 writel(CESA_SA_CFG_STOP_DIG_ERR
,
551 engine
->regs
+ CESA_SA_CFG
);
552 writel(engine
->sram_dma
& CESA_SA_SRAM_MSK
,
553 engine
->regs
+ CESA_SA_DESC_P0
);
555 ret
= devm_request_threaded_irq(dev
, irq
, NULL
, mv_cesa_int
,
557 dev_name(&pdev
->dev
),
562 crypto_init_queue(&engine
->queue
, CESA_CRYPTO_DEFAULT_MAX_QLEN
);
563 atomic_set(&engine
->load
, 0);
564 INIT_LIST_HEAD(&engine
->complete_queue
);
569 ret
= mv_cesa_add_algs(cesa
);
575 dev_info(dev
, "CESA device successfully registered\n");
580 for (i
= 0; i
< caps
->nengines
; i
++) {
581 clk_disable_unprepare(cesa
->engines
[i
].zclk
);
582 clk_disable_unprepare(cesa
->engines
[i
].clk
);
583 mv_cesa_put_sram(pdev
, i
);
589 static int mv_cesa_remove(struct platform_device
*pdev
)
591 struct mv_cesa_dev
*cesa
= platform_get_drvdata(pdev
);
594 mv_cesa_remove_algs(cesa
);
596 for (i
= 0; i
< cesa
->caps
->nengines
; i
++) {
597 clk_disable_unprepare(cesa
->engines
[i
].zclk
);
598 clk_disable_unprepare(cesa
->engines
[i
].clk
);
599 mv_cesa_put_sram(pdev
, i
);
605 static struct platform_driver marvell_cesa
= {
606 .probe
= mv_cesa_probe
,
607 .remove
= mv_cesa_remove
,
609 .name
= "marvell-cesa",
610 .of_match_table
= mv_cesa_of_match_table
,
613 module_platform_driver(marvell_cesa
);
615 MODULE_ALIAS("platform:mv_crypto");
616 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
617 MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>");
618 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
619 MODULE_LICENSE("GPL v2");