1 // SPDX-License-Identifier: GPL-2.0
3 * sl3516-ce-core.c - hardware cryptographic offloader for Storlink SL3516 SoC
5 * Copyright (C) 2021 Corentin Labbe <clabbe@baylibre.com>
7 * Core file which registers crypto algorithms supported by the CryptoEngine
10 #include <crypto/engine.h>
11 #include <crypto/internal/rng.h>
12 #include <crypto/internal/skcipher.h>
13 #include <linux/clk.h>
14 #include <linux/debugfs.h>
15 #include <linux/dev_printk.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/err.h>
18 #include <linux/interrupt.h>
20 #include <linux/irq.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/reset.h>
28 #include "sl3516-ce.h"
30 static int sl3516_ce_desc_init(struct sl3516_ce_dev
*ce
)
32 const size_t sz
= sizeof(struct descriptor
) * MAXDESC
;
35 ce
->tx
= dma_alloc_coherent(ce
->dev
, sz
, &ce
->dtx
, GFP_KERNEL
);
38 ce
->rx
= dma_alloc_coherent(ce
->dev
, sz
, &ce
->drx
, GFP_KERNEL
);
42 for (i
= 0; i
< MAXDESC
; i
++) {
43 ce
->tx
[i
].frame_ctrl
.bits
.own
= CE_CPU
;
44 ce
->tx
[i
].next_desc
.next_descriptor
= ce
->dtx
+ (i
+ 1) * sizeof(struct descriptor
);
46 ce
->tx
[MAXDESC
- 1].next_desc
.next_descriptor
= ce
->dtx
;
48 for (i
= 0; i
< MAXDESC
; i
++) {
49 ce
->rx
[i
].frame_ctrl
.bits
.own
= CE_CPU
;
50 ce
->rx
[i
].next_desc
.next_descriptor
= ce
->drx
+ (i
+ 1) * sizeof(struct descriptor
);
52 ce
->rx
[MAXDESC
- 1].next_desc
.next_descriptor
= ce
->drx
;
54 ce
->pctrl
= dma_alloc_coherent(ce
->dev
, sizeof(struct pkt_control_ecb
),
55 &ce
->dctrl
, GFP_KERNEL
);
61 dma_free_coherent(ce
->dev
, sz
, ce
->rx
, ce
->drx
);
63 dma_free_coherent(ce
->dev
, sz
, ce
->tx
, ce
->dtx
);
67 static void sl3516_ce_free_descs(struct sl3516_ce_dev
*ce
)
69 const size_t sz
= sizeof(struct descriptor
) * MAXDESC
;
71 dma_free_coherent(ce
->dev
, sz
, ce
->tx
, ce
->dtx
);
72 dma_free_coherent(ce
->dev
, sz
, ce
->rx
, ce
->drx
);
73 dma_free_coherent(ce
->dev
, sizeof(struct pkt_control_ecb
), ce
->pctrl
,
77 static void start_dma_tx(struct sl3516_ce_dev
*ce
)
81 v
= TXDMA_CTRL_START
| TXDMA_CTRL_CHAIN_MODE
| TXDMA_CTRL_CONTINUE
| \
82 TXDMA_CTRL_INT_FAIL
| TXDMA_CTRL_INT_PERR
| TXDMA_CTRL_BURST_UNK
;
84 writel(v
, ce
->base
+ IPSEC_TXDMA_CTRL
);
87 static void start_dma_rx(struct sl3516_ce_dev
*ce
)
91 v
= RXDMA_CTRL_START
| RXDMA_CTRL_CHAIN_MODE
| RXDMA_CTRL_CONTINUE
| \
92 RXDMA_CTRL_BURST_UNK
| RXDMA_CTRL_INT_FINISH
| \
93 RXDMA_CTRL_INT_FAIL
| RXDMA_CTRL_INT_PERR
| \
94 RXDMA_CTRL_INT_EOD
| RXDMA_CTRL_INT_EOF
;
96 writel(v
, ce
->base
+ IPSEC_RXDMA_CTRL
);
99 static struct descriptor
*get_desc_tx(struct sl3516_ce_dev
*ce
)
101 struct descriptor
*dd
;
103 dd
= &ce
->tx
[ce
->ctx
];
105 if (ce
->ctx
>= MAXDESC
)
110 static struct descriptor
*get_desc_rx(struct sl3516_ce_dev
*ce
)
112 struct descriptor
*rdd
;
114 rdd
= &ce
->rx
[ce
->crx
];
116 if (ce
->crx
>= MAXDESC
)
121 int sl3516_ce_run_task(struct sl3516_ce_dev
*ce
, struct sl3516_ce_cipher_req_ctx
*rctx
,
124 struct descriptor
*dd
, *rdd
= NULL
;
130 reinit_completion(&ce
->complete
);
133 for (i
= 0; i
< rctx
->nr_sgd
; i
++) {
134 dev_dbg(ce
->dev
, "%s handle DST SG %d/%d len=%d\n", __func__
,
135 i
, rctx
->nr_sgd
, rctx
->t_dst
[i
].len
);
136 rdd
= get_desc_rx(ce
);
137 rdd
->buf_adr
= rctx
->t_dst
[i
].addr
;
138 rdd
->frame_ctrl
.bits
.buffer_size
= rctx
->t_dst
[i
].len
;
139 rdd
->frame_ctrl
.bits
.own
= CE_DMA
;
141 rdd
->next_desc
.bits
.eofie
= 1;
143 for (i
= 0; i
< rctx
->nr_sgs
; i
++) {
144 dev_dbg(ce
->dev
, "%s handle SRC SG %d/%d len=%d\n", __func__
,
145 i
, rctx
->nr_sgs
, rctx
->t_src
[i
].len
);
146 rctx
->h
->algorithm_len
= rctx
->t_src
[i
].len
;
148 dd
= get_desc_tx(ce
);
149 dd
->frame_ctrl
.raw
= 0;
150 dd
->flag_status
.raw
= 0;
151 dd
->frame_ctrl
.bits
.buffer_size
= rctx
->pctrllen
;
152 dd
->buf_adr
= ce
->dctrl
;
153 dd
->flag_status
.tx_flag
.tqflag
= rctx
->tqflag
;
154 dd
->next_desc
.bits
.eofie
= 0;
155 dd
->next_desc
.bits
.dec
= 0;
156 dd
->next_desc
.bits
.sof_eof
= DESC_FIRST
| DESC_LAST
;
157 dd
->frame_ctrl
.bits
.own
= CE_DMA
;
159 dd
= get_desc_tx(ce
);
160 dd
->frame_ctrl
.raw
= 0;
161 dd
->flag_status
.raw
= 0;
162 dd
->frame_ctrl
.bits
.buffer_size
= rctx
->t_src
[i
].len
;
163 dd
->buf_adr
= rctx
->t_src
[i
].addr
;
164 dd
->flag_status
.tx_flag
.tqflag
= 0;
165 dd
->next_desc
.bits
.eofie
= 0;
166 dd
->next_desc
.bits
.dec
= 0;
167 dd
->next_desc
.bits
.sof_eof
= DESC_FIRST
| DESC_LAST
;
168 dd
->frame_ctrl
.bits
.own
= CE_DMA
;
172 wait_for_completion_interruptible_timeout(&ce
->complete
,
173 msecs_to_jiffies(5000));
174 if (ce
->status
== 0) {
175 dev_err(ce
->dev
, "DMA timeout for %s\n", name
);
178 v
= readl(ce
->base
+ IPSEC_STATUS_REG
);
180 dev_err(ce
->dev
, "IPSEC_STATUS_REG %x\n", v
);
187 static irqreturn_t
ce_irq_handler(int irq
, void *data
)
189 struct sl3516_ce_dev
*ce
= (struct sl3516_ce_dev
*)data
;
194 v
= readl(ce
->base
+ IPSEC_DMA_STATUS
);
195 writel(v
, ce
->base
+ IPSEC_DMA_STATUS
);
197 if (v
& DMA_STATUS_TS_DERR
)
198 dev_err(ce
->dev
, "AHB bus Error While Tx !!!\n");
199 if (v
& DMA_STATUS_TS_PERR
)
200 dev_err(ce
->dev
, "Tx Descriptor Protocol Error !!!\n");
201 if (v
& DMA_STATUS_RS_DERR
)
202 dev_err(ce
->dev
, "AHB bus Error While Rx !!!\n");
203 if (v
& DMA_STATUS_RS_PERR
)
204 dev_err(ce
->dev
, "Rx Descriptor Protocol Error !!!\n");
206 if (v
& DMA_STATUS_TS_EOFI
)
208 if (v
& DMA_STATUS_RS_EOFI
) {
210 complete(&ce
->complete
);
218 static struct sl3516_ce_alg_template ce_algs
[] = {
220 .type
= CRYPTO_ALG_TYPE_SKCIPHER
,
222 .alg
.skcipher
.base
= {
224 .cra_name
= "ecb(aes)",
225 .cra_driver_name
= "ecb-aes-sl3516",
227 .cra_blocksize
= AES_BLOCK_SIZE
,
228 .cra_flags
= CRYPTO_ALG_TYPE_SKCIPHER
|
229 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
,
230 .cra_ctxsize
= sizeof(struct sl3516_ce_cipher_tfm_ctx
),
231 .cra_module
= THIS_MODULE
,
232 .cra_alignmask
= 0xf,
233 .cra_init
= sl3516_ce_cipher_init
,
234 .cra_exit
= sl3516_ce_cipher_exit
,
236 .min_keysize
= AES_MIN_KEY_SIZE
,
237 .max_keysize
= AES_MAX_KEY_SIZE
,
238 .setkey
= sl3516_ce_aes_setkey
,
239 .encrypt
= sl3516_ce_skencrypt
,
240 .decrypt
= sl3516_ce_skdecrypt
,
243 .do_one_request
= sl3516_ce_handle_cipher_request
,
248 static int sl3516_ce_debugfs_show(struct seq_file
*seq
, void *v
)
250 struct sl3516_ce_dev
*ce
= seq
->private;
253 seq_printf(seq
, "HWRNG %lu %lu\n",
254 ce
->hwrng_stat_req
, ce
->hwrng_stat_bytes
);
255 seq_printf(seq
, "IRQ %lu\n", ce
->stat_irq
);
256 seq_printf(seq
, "IRQ TX %lu\n", ce
->stat_irq_tx
);
257 seq_printf(seq
, "IRQ RX %lu\n", ce
->stat_irq_rx
);
258 seq_printf(seq
, "nreq %lu\n", ce
->stat_req
);
259 seq_printf(seq
, "fallback SG count TX %lu\n", ce
->fallback_sg_count_tx
);
260 seq_printf(seq
, "fallback SG count RX %lu\n", ce
->fallback_sg_count_rx
);
261 seq_printf(seq
, "fallback modulo16 %lu\n", ce
->fallback_mod16
);
262 seq_printf(seq
, "fallback align16 %lu\n", ce
->fallback_align16
);
263 seq_printf(seq
, "fallback not same len %lu\n", ce
->fallback_not_same_len
);
265 for (i
= 0; i
< ARRAY_SIZE(ce_algs
); i
++) {
268 switch (ce_algs
[i
].type
) {
269 case CRYPTO_ALG_TYPE_SKCIPHER
:
270 seq_printf(seq
, "%s %s reqs=%lu fallback=%lu\n",
271 ce_algs
[i
].alg
.skcipher
.base
.base
.cra_driver_name
,
272 ce_algs
[i
].alg
.skcipher
.base
.base
.cra_name
,
273 ce_algs
[i
].stat_req
, ce_algs
[i
].stat_fb
);
280 DEFINE_SHOW_ATTRIBUTE(sl3516_ce_debugfs
);
282 static int sl3516_ce_register_algs(struct sl3516_ce_dev
*ce
)
287 for (i
= 0; i
< ARRAY_SIZE(ce_algs
); i
++) {
289 switch (ce_algs
[i
].type
) {
290 case CRYPTO_ALG_TYPE_SKCIPHER
:
291 dev_info(ce
->dev
, "DEBUG: Register %s\n",
292 ce_algs
[i
].alg
.skcipher
.base
.base
.cra_name
);
293 err
= crypto_engine_register_skcipher(&ce_algs
[i
].alg
.skcipher
);
295 dev_err(ce
->dev
, "Fail to register %s\n",
296 ce_algs
[i
].alg
.skcipher
.base
.base
.cra_name
);
297 ce_algs
[i
].ce
= NULL
;
302 ce_algs
[i
].ce
= NULL
;
303 dev_err(ce
->dev
, "ERROR: tried to register an unknown algo\n");
309 static void sl3516_ce_unregister_algs(struct sl3516_ce_dev
*ce
)
313 for (i
= 0; i
< ARRAY_SIZE(ce_algs
); i
++) {
316 switch (ce_algs
[i
].type
) {
317 case CRYPTO_ALG_TYPE_SKCIPHER
:
318 dev_info(ce
->dev
, "Unregister %d %s\n", i
,
319 ce_algs
[i
].alg
.skcipher
.base
.base
.cra_name
);
320 crypto_engine_unregister_skcipher(&ce_algs
[i
].alg
.skcipher
);
326 static void sl3516_ce_start(struct sl3516_ce_dev
*ce
)
330 writel(ce
->dtx
, ce
->base
+ IPSEC_TXDMA_CURR_DESC
);
331 writel(ce
->drx
, ce
->base
+ IPSEC_RXDMA_CURR_DESC
);
332 writel(0, ce
->base
+ IPSEC_DMA_STATUS
);
336 * Power management strategy: The device is suspended unless a TFM exists for
337 * one of the algorithms proposed by this driver.
339 static int sl3516_ce_pm_suspend(struct device
*dev
)
341 struct sl3516_ce_dev
*ce
= dev_get_drvdata(dev
);
343 reset_control_assert(ce
->reset
);
344 clk_disable_unprepare(ce
->clks
);
348 static int sl3516_ce_pm_resume(struct device
*dev
)
350 struct sl3516_ce_dev
*ce
= dev_get_drvdata(dev
);
353 err
= clk_prepare_enable(ce
->clks
);
355 dev_err(ce
->dev
, "Cannot prepare_enable\n");
358 err
= reset_control_deassert(ce
->reset
);
360 dev_err(ce
->dev
, "Cannot deassert reset control\n");
368 sl3516_ce_pm_suspend(dev
);
372 static const struct dev_pm_ops sl3516_ce_pm_ops
= {
373 SET_RUNTIME_PM_OPS(sl3516_ce_pm_suspend
, sl3516_ce_pm_resume
, NULL
)
376 static int sl3516_ce_pm_init(struct sl3516_ce_dev
*ce
)
380 pm_runtime_use_autosuspend(ce
->dev
);
381 pm_runtime_set_autosuspend_delay(ce
->dev
, 2000);
383 err
= pm_runtime_set_suspended(ce
->dev
);
386 pm_runtime_enable(ce
->dev
);
390 static void sl3516_ce_pm_exit(struct sl3516_ce_dev
*ce
)
392 pm_runtime_disable(ce
->dev
);
395 static int sl3516_ce_probe(struct platform_device
*pdev
)
397 struct sl3516_ce_dev
*ce
;
401 ce
= devm_kzalloc(&pdev
->dev
, sizeof(*ce
), GFP_KERNEL
);
405 ce
->dev
= &pdev
->dev
;
406 platform_set_drvdata(pdev
, ce
);
408 ce
->base
= devm_platform_ioremap_resource(pdev
, 0);
409 if (IS_ERR(ce
->base
))
410 return PTR_ERR(ce
->base
);
412 irq
= platform_get_irq(pdev
, 0);
416 err
= devm_request_irq(&pdev
->dev
, irq
, ce_irq_handler
, 0, "crypto", ce
);
418 dev_err(ce
->dev
, "Cannot request Crypto Engine IRQ (err=%d)\n", err
);
422 ce
->reset
= devm_reset_control_get(&pdev
->dev
, NULL
);
423 if (IS_ERR(ce
->reset
))
424 return dev_err_probe(&pdev
->dev
, PTR_ERR(ce
->reset
),
425 "No reset control found\n");
426 ce
->clks
= devm_clk_get(ce
->dev
, NULL
);
427 if (IS_ERR(ce
->clks
)) {
428 err
= PTR_ERR(ce
->clks
);
429 dev_err(ce
->dev
, "Cannot get clock err=%d\n", err
);
433 err
= sl3516_ce_desc_init(ce
);
437 err
= sl3516_ce_pm_init(ce
);
441 init_completion(&ce
->complete
);
443 ce
->engine
= crypto_engine_alloc_init(ce
->dev
, true);
445 dev_err(ce
->dev
, "Cannot allocate engine\n");
450 err
= crypto_engine_start(ce
->engine
);
452 dev_err(ce
->dev
, "Cannot start engine\n");
456 err
= sl3516_ce_register_algs(ce
);
460 err
= sl3516_ce_rng_register(ce
);
464 err
= pm_runtime_resume_and_get(ce
->dev
);
468 v
= readl(ce
->base
+ IPSEC_ID
);
469 dev_info(ce
->dev
, "SL3516 dev %lx rev %lx\n",
472 v
= readl(ce
->base
+ IPSEC_DMA_DEVICE_ID
);
473 dev_info(ce
->dev
, "SL3516 DMA dev %lx rev %lx\n",
477 pm_runtime_put_sync(ce
->dev
);
479 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SL3516_DEBUG
)) {
480 struct dentry
*dbgfs_dir __maybe_unused
;
481 struct dentry
*dbgfs_stats __maybe_unused
;
483 /* Ignore error of debugfs */
484 dbgfs_dir
= debugfs_create_dir("sl3516", NULL
);
485 dbgfs_stats
= debugfs_create_file("stats", 0444,
487 &sl3516_ce_debugfs_fops
);
488 #ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
489 ce
->dbgfs_dir
= dbgfs_dir
;
490 ce
->dbgfs_stats
= dbgfs_stats
;
496 sl3516_ce_rng_unregister(ce
);
498 sl3516_ce_unregister_algs(ce
);
500 crypto_engine_exit(ce
->engine
);
502 sl3516_ce_pm_exit(ce
);
504 sl3516_ce_free_descs(ce
);
508 static void sl3516_ce_remove(struct platform_device
*pdev
)
510 struct sl3516_ce_dev
*ce
= platform_get_drvdata(pdev
);
512 sl3516_ce_rng_unregister(ce
);
513 sl3516_ce_unregister_algs(ce
);
514 crypto_engine_exit(ce
->engine
);
515 sl3516_ce_pm_exit(ce
);
516 sl3516_ce_free_descs(ce
);
518 #ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
519 debugfs_remove_recursive(ce
->dbgfs_dir
);
523 static const struct of_device_id sl3516_ce_crypto_of_match_table
[] = {
524 { .compatible
= "cortina,sl3516-crypto"},
527 MODULE_DEVICE_TABLE(of
, sl3516_ce_crypto_of_match_table
);
529 static struct platform_driver sl3516_ce_driver
= {
530 .probe
= sl3516_ce_probe
,
531 .remove
= sl3516_ce_remove
,
533 .name
= "sl3516-crypto",
534 .pm
= &sl3516_ce_pm_ops
,
535 .of_match_table
= sl3516_ce_crypto_of_match_table
,
539 module_platform_driver(sl3516_ce_driver
);
541 MODULE_DESCRIPTION("SL3516 cryptographic offloader");
542 MODULE_LICENSE("GPL");
543 MODULE_AUTHOR("Corentin Labbe <clabbe@baylibre.com>");