2 * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA)
3 * that can be found on the following platform: Orion, Kirkwood, Armada. This
4 * driver supports the TDMA engine on platforms on which it is available.
6 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
7 * Author: Arnaud Ebalard <arno@natisbad.org>
9 * This work is based on an initial version written by
10 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License version 2 as published
14 * by the Free Software Foundation.
17 #include <linux/delay.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/genalloc.h>
20 #include <linux/interrupt.h>
22 #include <linux/kthread.h>
23 #include <linux/mbus.h>
24 #include <linux/platform_device.h>
25 #include <linux/scatterlist.h>
26 #include <linux/slab.h>
27 #include <linux/module.h>
28 #include <linux/dma-direct.h> /* XXX: drivers shall never use this directly! */
29 #include <linux/clk.h>
31 #include <linux/of_platform.h>
32 #include <linux/of_irq.h>
36 /* Limit of the crypto queue before reaching the backlog */
37 #define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
39 struct mv_cesa_dev
*cesa_dev
;
41 struct crypto_async_request
*
42 mv_cesa_dequeue_req_locked(struct mv_cesa_engine
*engine
,
43 struct crypto_async_request
**backlog
)
45 struct crypto_async_request
*req
;
47 *backlog
= crypto_get_backlog(&engine
->queue
);
48 req
= crypto_dequeue_request(&engine
->queue
);
56 static void mv_cesa_rearm_engine(struct mv_cesa_engine
*engine
)
58 struct crypto_async_request
*req
= NULL
, *backlog
= NULL
;
59 struct mv_cesa_ctx
*ctx
;
62 spin_lock_bh(&engine
->lock
);
64 req
= mv_cesa_dequeue_req_locked(engine
, &backlog
);
67 spin_unlock_bh(&engine
->lock
);
73 backlog
->complete(backlog
, -EINPROGRESS
);
75 ctx
= crypto_tfm_ctx(req
->tfm
);
79 static int mv_cesa_std_process(struct mv_cesa_engine
*engine
, u32 status
)
81 struct crypto_async_request
*req
;
82 struct mv_cesa_ctx
*ctx
;
86 ctx
= crypto_tfm_ctx(req
->tfm
);
87 res
= ctx
->ops
->process(req
, status
);
90 ctx
->ops
->complete(req
);
91 mv_cesa_engine_enqueue_complete_request(engine
, req
);
92 } else if (res
== -EINPROGRESS
) {
99 static int mv_cesa_int_process(struct mv_cesa_engine
*engine
, u32 status
)
101 if (engine
->chain
.first
&& engine
->chain
.last
)
102 return mv_cesa_tdma_process(engine
, status
);
104 return mv_cesa_std_process(engine
, status
);
108 mv_cesa_complete_req(struct mv_cesa_ctx
*ctx
, struct crypto_async_request
*req
,
111 ctx
->ops
->cleanup(req
);
113 req
->complete(req
, res
);
117 static irqreturn_t
mv_cesa_int(int irq
, void *priv
)
119 struct mv_cesa_engine
*engine
= priv
;
120 struct crypto_async_request
*req
;
121 struct mv_cesa_ctx
*ctx
;
123 irqreturn_t ret
= IRQ_NONE
;
128 mask
= mv_cesa_get_int_mask(engine
);
129 status
= readl(engine
->regs
+ CESA_SA_INT_STATUS
);
131 if (!(status
& mask
))
135 * TODO: avoid clearing the FPGA_INT_STATUS if this not
136 * relevant on some platforms.
138 writel(~status
, engine
->regs
+ CESA_SA_FPGA_INT_STATUS
);
139 writel(~status
, engine
->regs
+ CESA_SA_INT_STATUS
);
141 /* Process fetched requests */
142 res
= mv_cesa_int_process(engine
, status
& mask
);
145 spin_lock_bh(&engine
->lock
);
147 if (res
!= -EINPROGRESS
)
149 spin_unlock_bh(&engine
->lock
);
151 ctx
= crypto_tfm_ctx(req
->tfm
);
153 if (res
&& res
!= -EINPROGRESS
)
154 mv_cesa_complete_req(ctx
, req
, res
);
156 /* Launch the next pending request */
157 mv_cesa_rearm_engine(engine
);
159 /* Iterate over the complete queue */
161 req
= mv_cesa_engine_dequeue_complete_request(engine
);
165 ctx
= crypto_tfm_ctx(req
->tfm
);
166 mv_cesa_complete_req(ctx
, req
, 0);
173 int mv_cesa_queue_req(struct crypto_async_request
*req
,
174 struct mv_cesa_req
*creq
)
177 struct mv_cesa_engine
*engine
= creq
->engine
;
179 spin_lock_bh(&engine
->lock
);
180 ret
= crypto_enqueue_request(&engine
->queue
, req
);
181 if ((mv_cesa_req_get_type(creq
) == CESA_DMA_REQ
) &&
182 (ret
== -EINPROGRESS
|| ret
== -EBUSY
))
183 mv_cesa_tdma_chain(engine
, creq
);
184 spin_unlock_bh(&engine
->lock
);
186 if (ret
!= -EINPROGRESS
)
189 mv_cesa_rearm_engine(engine
);
194 static int mv_cesa_add_algs(struct mv_cesa_dev
*cesa
)
199 for (i
= 0; i
< cesa
->caps
->ncipher_algs
; i
++) {
200 ret
= crypto_register_skcipher(cesa
->caps
->cipher_algs
[i
]);
202 goto err_unregister_crypto
;
205 for (i
= 0; i
< cesa
->caps
->nahash_algs
; i
++) {
206 ret
= crypto_register_ahash(cesa
->caps
->ahash_algs
[i
]);
208 goto err_unregister_ahash
;
213 err_unregister_ahash
:
214 for (j
= 0; j
< i
; j
++)
215 crypto_unregister_ahash(cesa
->caps
->ahash_algs
[j
]);
216 i
= cesa
->caps
->ncipher_algs
;
218 err_unregister_crypto
:
219 for (j
= 0; j
< i
; j
++)
220 crypto_unregister_skcipher(cesa
->caps
->cipher_algs
[j
]);
225 static void mv_cesa_remove_algs(struct mv_cesa_dev
*cesa
)
229 for (i
= 0; i
< cesa
->caps
->nahash_algs
; i
++)
230 crypto_unregister_ahash(cesa
->caps
->ahash_algs
[i
]);
232 for (i
= 0; i
< cesa
->caps
->ncipher_algs
; i
++)
233 crypto_unregister_skcipher(cesa
->caps
->cipher_algs
[i
]);
236 static struct skcipher_alg
*orion_cipher_algs
[] = {
237 &mv_cesa_ecb_des_alg
,
238 &mv_cesa_cbc_des_alg
,
239 &mv_cesa_ecb_des3_ede_alg
,
240 &mv_cesa_cbc_des3_ede_alg
,
241 &mv_cesa_ecb_aes_alg
,
242 &mv_cesa_cbc_aes_alg
,
245 static struct ahash_alg
*orion_ahash_algs
[] = {
252 static struct skcipher_alg
*armada_370_cipher_algs
[] = {
253 &mv_cesa_ecb_des_alg
,
254 &mv_cesa_cbc_des_alg
,
255 &mv_cesa_ecb_des3_ede_alg
,
256 &mv_cesa_cbc_des3_ede_alg
,
257 &mv_cesa_ecb_aes_alg
,
258 &mv_cesa_cbc_aes_alg
,
261 static struct ahash_alg
*armada_370_ahash_algs
[] = {
267 &mv_ahmac_sha256_alg
,
270 static const struct mv_cesa_caps orion_caps
= {
272 .cipher_algs
= orion_cipher_algs
,
273 .ncipher_algs
= ARRAY_SIZE(orion_cipher_algs
),
274 .ahash_algs
= orion_ahash_algs
,
275 .nahash_algs
= ARRAY_SIZE(orion_ahash_algs
),
279 static const struct mv_cesa_caps kirkwood_caps
= {
281 .cipher_algs
= orion_cipher_algs
,
282 .ncipher_algs
= ARRAY_SIZE(orion_cipher_algs
),
283 .ahash_algs
= orion_ahash_algs
,
284 .nahash_algs
= ARRAY_SIZE(orion_ahash_algs
),
288 static const struct mv_cesa_caps armada_370_caps
= {
290 .cipher_algs
= armada_370_cipher_algs
,
291 .ncipher_algs
= ARRAY_SIZE(armada_370_cipher_algs
),
292 .ahash_algs
= armada_370_ahash_algs
,
293 .nahash_algs
= ARRAY_SIZE(armada_370_ahash_algs
),
297 static const struct mv_cesa_caps armada_xp_caps
= {
299 .cipher_algs
= armada_370_cipher_algs
,
300 .ncipher_algs
= ARRAY_SIZE(armada_370_cipher_algs
),
301 .ahash_algs
= armada_370_ahash_algs
,
302 .nahash_algs
= ARRAY_SIZE(armada_370_ahash_algs
),
306 static const struct of_device_id mv_cesa_of_match_table
[] = {
307 { .compatible
= "marvell,orion-crypto", .data
= &orion_caps
},
308 { .compatible
= "marvell,kirkwood-crypto", .data
= &kirkwood_caps
},
309 { .compatible
= "marvell,dove-crypto", .data
= &kirkwood_caps
},
310 { .compatible
= "marvell,armada-370-crypto", .data
= &armada_370_caps
},
311 { .compatible
= "marvell,armada-xp-crypto", .data
= &armada_xp_caps
},
312 { .compatible
= "marvell,armada-375-crypto", .data
= &armada_xp_caps
},
313 { .compatible
= "marvell,armada-38x-crypto", .data
= &armada_xp_caps
},
316 MODULE_DEVICE_TABLE(of
, mv_cesa_of_match_table
);
319 mv_cesa_conf_mbus_windows(struct mv_cesa_engine
*engine
,
320 const struct mbus_dram_target_info
*dram
)
322 void __iomem
*iobase
= engine
->regs
;
325 for (i
= 0; i
< 4; i
++) {
326 writel(0, iobase
+ CESA_TDMA_WINDOW_CTRL(i
));
327 writel(0, iobase
+ CESA_TDMA_WINDOW_BASE(i
));
330 for (i
= 0; i
< dram
->num_cs
; i
++) {
331 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
333 writel(((cs
->size
- 1) & 0xffff0000) |
334 (cs
->mbus_attr
<< 8) |
335 (dram
->mbus_dram_target_id
<< 4) | 1,
336 iobase
+ CESA_TDMA_WINDOW_CTRL(i
));
337 writel(cs
->base
, iobase
+ CESA_TDMA_WINDOW_BASE(i
));
341 static int mv_cesa_dev_dma_init(struct mv_cesa_dev
*cesa
)
343 struct device
*dev
= cesa
->dev
;
344 struct mv_cesa_dev_dma
*dma
;
346 if (!cesa
->caps
->has_tdma
)
349 dma
= devm_kzalloc(dev
, sizeof(*dma
), GFP_KERNEL
);
353 dma
->tdma_desc_pool
= dmam_pool_create("tdma_desc", dev
,
354 sizeof(struct mv_cesa_tdma_desc
),
356 if (!dma
->tdma_desc_pool
)
359 dma
->op_pool
= dmam_pool_create("cesa_op", dev
,
360 sizeof(struct mv_cesa_op_ctx
), 16, 0);
364 dma
->cache_pool
= dmam_pool_create("cesa_cache", dev
,
365 CESA_MAX_HASH_BLOCK_SIZE
, 1, 0);
366 if (!dma
->cache_pool
)
369 dma
->padding_pool
= dmam_pool_create("cesa_padding", dev
, 72, 1, 0);
370 if (!dma
->padding_pool
)
378 static int mv_cesa_get_sram(struct platform_device
*pdev
, int idx
)
380 struct mv_cesa_dev
*cesa
= platform_get_drvdata(pdev
);
381 struct mv_cesa_engine
*engine
= &cesa
->engines
[idx
];
382 const char *res_name
= "sram";
383 struct resource
*res
;
385 engine
->pool
= of_gen_pool_get(cesa
->dev
->of_node
,
386 "marvell,crypto-srams", idx
);
388 engine
->sram
= gen_pool_dma_alloc(engine
->pool
,
398 if (cesa
->caps
->nengines
> 1) {
405 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
407 if (!res
|| resource_size(res
) < cesa
->sram_size
)
410 engine
->sram
= devm_ioremap_resource(cesa
->dev
, res
);
411 if (IS_ERR(engine
->sram
))
412 return PTR_ERR(engine
->sram
);
414 engine
->sram_dma
= dma_map_resource(cesa
->dev
, res
->start
,
416 DMA_BIDIRECTIONAL
, 0);
417 if (dma_mapping_error(cesa
->dev
, engine
->sram_dma
))
423 static void mv_cesa_put_sram(struct platform_device
*pdev
, int idx
)
425 struct mv_cesa_dev
*cesa
= platform_get_drvdata(pdev
);
426 struct mv_cesa_engine
*engine
= &cesa
->engines
[idx
];
429 gen_pool_free(engine
->pool
, (unsigned long)engine
->sram
,
432 dma_unmap_resource(cesa
->dev
, engine
->sram_dma
,
433 cesa
->sram_size
, DMA_BIDIRECTIONAL
, 0);
436 static int mv_cesa_probe(struct platform_device
*pdev
)
438 const struct mv_cesa_caps
*caps
= &orion_caps
;
439 const struct mbus_dram_target_info
*dram
;
440 const struct of_device_id
*match
;
441 struct device
*dev
= &pdev
->dev
;
442 struct mv_cesa_dev
*cesa
;
443 struct mv_cesa_engine
*engines
;
444 struct resource
*res
;
449 dev_err(&pdev
->dev
, "Only one CESA device authorized\n");
454 match
= of_match_node(mv_cesa_of_match_table
, dev
->of_node
);
455 if (!match
|| !match
->data
)
461 cesa
= devm_kzalloc(dev
, sizeof(*cesa
), GFP_KERNEL
);
468 sram_size
= CESA_SA_DEFAULT_SRAM_SIZE
;
469 of_property_read_u32(cesa
->dev
->of_node
, "marvell,crypto-sram-size",
471 if (sram_size
< CESA_SA_MIN_SRAM_SIZE
)
472 sram_size
= CESA_SA_MIN_SRAM_SIZE
;
474 cesa
->sram_size
= sram_size
;
475 cesa
->engines
= devm_kzalloc(dev
, caps
->nengines
* sizeof(*engines
),
480 spin_lock_init(&cesa
->lock
);
482 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "regs");
483 cesa
->regs
= devm_ioremap_resource(dev
, res
);
484 if (IS_ERR(cesa
->regs
))
485 return PTR_ERR(cesa
->regs
);
487 ret
= mv_cesa_dev_dma_init(cesa
);
491 dram
= mv_mbus_dram_info_nooverlap();
493 platform_set_drvdata(pdev
, cesa
);
495 for (i
= 0; i
< caps
->nengines
; i
++) {
496 struct mv_cesa_engine
*engine
= &cesa
->engines
[i
];
500 spin_lock_init(&engine
->lock
);
502 ret
= mv_cesa_get_sram(pdev
, i
);
506 irq
= platform_get_irq(pdev
, i
);
513 * Not all platforms can gate the CESA clocks: do not complain
514 * if the clock does not exist.
516 snprintf(res_name
, sizeof(res_name
), "cesa%d", i
);
517 engine
->clk
= devm_clk_get(dev
, res_name
);
518 if (IS_ERR(engine
->clk
)) {
519 engine
->clk
= devm_clk_get(dev
, NULL
);
520 if (IS_ERR(engine
->clk
))
524 snprintf(res_name
, sizeof(res_name
), "cesaz%d", i
);
525 engine
->zclk
= devm_clk_get(dev
, res_name
);
526 if (IS_ERR(engine
->zclk
))
529 ret
= clk_prepare_enable(engine
->clk
);
533 ret
= clk_prepare_enable(engine
->zclk
);
537 engine
->regs
= cesa
->regs
+ CESA_ENGINE_OFF(i
);
539 if (dram
&& cesa
->caps
->has_tdma
)
540 mv_cesa_conf_mbus_windows(engine
, dram
);
542 writel(0, engine
->regs
+ CESA_SA_INT_STATUS
);
543 writel(CESA_SA_CFG_STOP_DIG_ERR
,
544 engine
->regs
+ CESA_SA_CFG
);
545 writel(engine
->sram_dma
& CESA_SA_SRAM_MSK
,
546 engine
->regs
+ CESA_SA_DESC_P0
);
548 ret
= devm_request_threaded_irq(dev
, irq
, NULL
, mv_cesa_int
,
550 dev_name(&pdev
->dev
),
555 crypto_init_queue(&engine
->queue
, CESA_CRYPTO_DEFAULT_MAX_QLEN
);
556 atomic_set(&engine
->load
, 0);
557 INIT_LIST_HEAD(&engine
->complete_queue
);
562 ret
= mv_cesa_add_algs(cesa
);
568 dev_info(dev
, "CESA device successfully registered\n");
573 for (i
= 0; i
< caps
->nengines
; i
++) {
574 clk_disable_unprepare(cesa
->engines
[i
].zclk
);
575 clk_disable_unprepare(cesa
->engines
[i
].clk
);
576 mv_cesa_put_sram(pdev
, i
);
582 static int mv_cesa_remove(struct platform_device
*pdev
)
584 struct mv_cesa_dev
*cesa
= platform_get_drvdata(pdev
);
587 mv_cesa_remove_algs(cesa
);
589 for (i
= 0; i
< cesa
->caps
->nengines
; i
++) {
590 clk_disable_unprepare(cesa
->engines
[i
].zclk
);
591 clk_disable_unprepare(cesa
->engines
[i
].clk
);
592 mv_cesa_put_sram(pdev
, i
);
598 static const struct platform_device_id mv_cesa_plat_id_table
[] = {
599 { .name
= "mv_crypto" },
602 MODULE_DEVICE_TABLE(platform
, mv_cesa_plat_id_table
);
604 static struct platform_driver marvell_cesa
= {
605 .probe
= mv_cesa_probe
,
606 .remove
= mv_cesa_remove
,
607 .id_table
= mv_cesa_plat_id_table
,
609 .name
= "marvell-cesa",
610 .of_match_table
= mv_cesa_of_match_table
,
613 module_platform_driver(marvell_cesa
);
615 MODULE_ALIAS("platform:mv_crypto");
616 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
617 MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>");
618 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
619 MODULE_LICENSE("GPL v2");