Revert "unicode: Don't special case ignorable code points"
[linux.git] / drivers / crypto / aspeed / aspeed-acry.c
blob8d1c79aaca07d7c2ef5abcff55677b9c29d51bf0
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright 2021 Aspeed Technology Inc.
4 */
5 #include <crypto/engine.h>
6 #include <crypto/internal/akcipher.h>
7 #include <crypto/internal/rsa.h>
8 #include <crypto/scatterwalk.h>
9 #include <linux/clk.h>
10 #include <linux/count_zeros.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/err.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/mfd/syscon.h>
16 #include <linux/module.h>
17 #include <linux/of.h>
18 #include <linux/platform_device.h>
19 #include <linux/regmap.h>
20 #include <linux/slab.h>
21 #include <linux/string.h>
23 #ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
24 #define ACRY_DBG(d, fmt, ...) \
25 dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
26 #else
27 #define ACRY_DBG(d, fmt, ...) \
28 dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
29 #endif
31 /*****************************
32 * *
33 * ACRY register definitions *
34 * *
35 * ***************************/
36 #define ASPEED_ACRY_TRIGGER 0x000 /* ACRY Engine Control: trigger */
37 #define ASPEED_ACRY_DMA_CMD 0x048 /* ACRY Engine Control: Command */
38 #define ASPEED_ACRY_DMA_SRC_BASE 0x04C /* ACRY DRAM base address for DMA */
39 #define ASPEED_ACRY_DMA_LEN 0x050 /* ACRY Data Length of DMA */
40 #define ASPEED_ACRY_RSA_KEY_LEN 0x058 /* ACRY RSA Exp/Mod Key Length (Bits) */
41 #define ASPEED_ACRY_INT_MASK 0x3F8 /* ACRY Interrupt Mask */
42 #define ASPEED_ACRY_STATUS 0x3FC /* ACRY Interrupt Status */
44 /* rsa trigger */
45 #define ACRY_CMD_RSA_TRIGGER BIT(0)
46 #define ACRY_CMD_DMA_RSA_TRIGGER BIT(1)
48 /* rsa dma cmd */
49 #define ACRY_CMD_DMA_SRAM_MODE_RSA (0x3 << 4)
50 #define ACRY_CMD_DMEM_AHB BIT(8)
51 #define ACRY_CMD_DMA_SRAM_AHB_ENGINE 0
53 /* rsa key len */
54 #define RSA_E_BITS_LEN(x) ((x) << 16)
55 #define RSA_M_BITS_LEN(x) (x)
57 /* acry isr */
58 #define ACRY_RSA_ISR BIT(1)
60 #define ASPEED_ACRY_BUFF_SIZE 0x1800 /* DMA buffer size */
61 #define ASPEED_ACRY_SRAM_MAX_LEN 2048 /* ACRY SRAM maximum length (Bytes) */
62 #define ASPEED_ACRY_RSA_MAX_KEY_LEN 512 /* ACRY RSA maximum key length (Bytes) */
64 #define CRYPTO_FLAGS_BUSY BIT(1)
65 #define BYTES_PER_DWORD 4
67 /*****************************
68 * *
69 * AHBC register definitions *
70 * *
71 * ***************************/
72 #define AHBC_REGION_PROT 0x240
73 #define REGION_ACRYM BIT(23)
75 #define ast_acry_write(acry, val, offset) \
76 writel((val), (acry)->regs + (offset))
78 #define ast_acry_read(acry, offset) \
79 readl((acry)->regs + (offset))
81 struct aspeed_acry_dev;
83 typedef int (*aspeed_acry_fn_t)(struct aspeed_acry_dev *);
85 struct aspeed_acry_dev {
86 void __iomem *regs;
87 struct device *dev;
88 int irq;
89 struct clk *clk;
90 struct regmap *ahbc;
92 struct akcipher_request *req;
93 struct tasklet_struct done_task;
94 aspeed_acry_fn_t resume;
95 unsigned long flags;
97 /* ACRY output SRAM buffer */
98 void __iomem *acry_sram;
100 /* ACRY input DMA buffer */
101 void *buf_addr;
102 dma_addr_t buf_dma_addr;
104 struct crypto_engine *crypt_engine_rsa;
106 /* ACRY SRAM memory mapped */
107 int exp_dw_mapping[ASPEED_ACRY_RSA_MAX_KEY_LEN];
108 int mod_dw_mapping[ASPEED_ACRY_RSA_MAX_KEY_LEN];
109 int data_byte_mapping[ASPEED_ACRY_SRAM_MAX_LEN];
112 struct aspeed_acry_ctx {
113 struct aspeed_acry_dev *acry_dev;
115 struct rsa_key key;
116 int enc;
117 u8 *n;
118 u8 *e;
119 u8 *d;
120 size_t n_sz;
121 size_t e_sz;
122 size_t d_sz;
124 aspeed_acry_fn_t trigger;
126 struct crypto_akcipher *fallback_tfm;
129 struct aspeed_acry_alg {
130 struct aspeed_acry_dev *acry_dev;
131 struct akcipher_engine_alg akcipher;
134 enum aspeed_rsa_key_mode {
135 ASPEED_RSA_EXP_MODE = 0,
136 ASPEED_RSA_MOD_MODE,
137 ASPEED_RSA_DATA_MODE,
140 static inline struct akcipher_request *
141 akcipher_request_cast(struct crypto_async_request *req)
143 return container_of(req, struct akcipher_request, base);
146 static int aspeed_acry_do_fallback(struct akcipher_request *req)
148 struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
149 struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
150 int err;
152 akcipher_request_set_tfm(req, ctx->fallback_tfm);
154 if (ctx->enc)
155 err = crypto_akcipher_encrypt(req);
156 else
157 err = crypto_akcipher_decrypt(req);
159 akcipher_request_set_tfm(req, cipher);
161 return err;
164 static bool aspeed_acry_need_fallback(struct akcipher_request *req)
166 struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
167 struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
169 return ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN;
172 static int aspeed_acry_handle_queue(struct aspeed_acry_dev *acry_dev,
173 struct akcipher_request *req)
175 if (aspeed_acry_need_fallback(req)) {
176 ACRY_DBG(acry_dev, "SW fallback\n");
177 return aspeed_acry_do_fallback(req);
180 return crypto_transfer_akcipher_request_to_engine(acry_dev->crypt_engine_rsa, req);
183 static int aspeed_acry_do_request(struct crypto_engine *engine, void *areq)
185 struct akcipher_request *req = akcipher_request_cast(areq);
186 struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
187 struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
188 struct aspeed_acry_dev *acry_dev = ctx->acry_dev;
190 acry_dev->req = req;
191 acry_dev->flags |= CRYPTO_FLAGS_BUSY;
193 return ctx->trigger(acry_dev);
196 static int aspeed_acry_complete(struct aspeed_acry_dev *acry_dev, int err)
198 struct akcipher_request *req = acry_dev->req;
200 acry_dev->flags &= ~CRYPTO_FLAGS_BUSY;
202 crypto_finalize_akcipher_request(acry_dev->crypt_engine_rsa, req, err);
204 return err;
208 * Copy Data to DMA buffer for engine used.
210 static void aspeed_acry_rsa_sg_copy_to_buffer(struct aspeed_acry_dev *acry_dev,
211 u8 *buf, struct scatterlist *src,
212 size_t nbytes)
214 static u8 dram_buffer[ASPEED_ACRY_SRAM_MAX_LEN];
215 int i = 0, j;
216 int data_idx;
218 ACRY_DBG(acry_dev, "\n");
220 scatterwalk_map_and_copy(dram_buffer, src, 0, nbytes, 0);
222 for (j = nbytes - 1; j >= 0; j--) {
223 data_idx = acry_dev->data_byte_mapping[i];
224 buf[data_idx] = dram_buffer[j];
225 i++;
228 for (; i < ASPEED_ACRY_SRAM_MAX_LEN; i++) {
229 data_idx = acry_dev->data_byte_mapping[i];
230 buf[data_idx] = 0;
235 * Copy Exp/Mod to DMA buffer for engine used.
237 * Params:
238 * - mode 0 : Exponential
239 * - mode 1 : Modulus
241 * Example:
242 * - DRAM memory layout:
243 * D[0], D[4], D[8], D[12]
244 * - ACRY SRAM memory layout should reverse the order of source data:
245 * D[12], D[8], D[4], D[0]
247 static int aspeed_acry_rsa_ctx_copy(struct aspeed_acry_dev *acry_dev, void *buf,
248 const void *xbuf, size_t nbytes,
249 enum aspeed_rsa_key_mode mode)
251 const u8 *src = xbuf;
252 __le32 *dw_buf = buf;
253 int nbits, ndw;
254 int i, j, idx;
255 u32 data = 0;
257 ACRY_DBG(acry_dev, "nbytes:%zu, mode:%d\n", nbytes, mode);
259 if (nbytes > ASPEED_ACRY_RSA_MAX_KEY_LEN)
260 return -ENOMEM;
262 /* Remove the leading zeros */
263 while (nbytes > 0 && src[0] == 0) {
264 src++;
265 nbytes--;
268 nbits = nbytes * 8;
269 if (nbytes > 0)
270 nbits -= count_leading_zeros(src[0]) - (BITS_PER_LONG - 8);
272 /* double-world alignment */
273 ndw = DIV_ROUND_UP(nbytes, BYTES_PER_DWORD);
275 if (nbytes > 0) {
276 i = BYTES_PER_DWORD - nbytes % BYTES_PER_DWORD;
277 i %= BYTES_PER_DWORD;
279 for (j = ndw; j > 0; j--) {
280 for (; i < BYTES_PER_DWORD; i++) {
281 data <<= 8;
282 data |= *src++;
285 i = 0;
287 if (mode == ASPEED_RSA_EXP_MODE)
288 idx = acry_dev->exp_dw_mapping[j - 1];
289 else /* mode == ASPEED_RSA_MOD_MODE */
290 idx = acry_dev->mod_dw_mapping[j - 1];
292 dw_buf[idx] = cpu_to_le32(data);
296 return nbits;
299 static int aspeed_acry_rsa_transfer(struct aspeed_acry_dev *acry_dev)
301 struct akcipher_request *req = acry_dev->req;
302 u8 __iomem *sram_buffer = acry_dev->acry_sram;
303 struct scatterlist *out_sg = req->dst;
304 static u8 dram_buffer[ASPEED_ACRY_SRAM_MAX_LEN];
305 int leading_zero = 1;
306 int result_nbytes;
307 int i = 0, j;
308 int data_idx;
310 /* Set Data Memory to AHB(CPU) Access Mode */
311 ast_acry_write(acry_dev, ACRY_CMD_DMEM_AHB, ASPEED_ACRY_DMA_CMD);
313 /* Disable ACRY SRAM protection */
314 regmap_update_bits(acry_dev->ahbc, AHBC_REGION_PROT,
315 REGION_ACRYM, 0);
317 result_nbytes = ASPEED_ACRY_SRAM_MAX_LEN;
319 for (j = ASPEED_ACRY_SRAM_MAX_LEN - 1; j >= 0; j--) {
320 data_idx = acry_dev->data_byte_mapping[j];
321 if (readb(sram_buffer + data_idx) == 0 && leading_zero) {
322 result_nbytes--;
323 } else {
324 leading_zero = 0;
325 dram_buffer[i] = readb(sram_buffer + data_idx);
326 i++;
330 ACRY_DBG(acry_dev, "result_nbytes:%d, req->dst_len:%d\n",
331 result_nbytes, req->dst_len);
333 if (result_nbytes <= req->dst_len) {
334 scatterwalk_map_and_copy(dram_buffer, out_sg, 0, result_nbytes,
336 req->dst_len = result_nbytes;
338 } else {
339 dev_err(acry_dev->dev, "RSA engine error!\n");
342 memzero_explicit(acry_dev->buf_addr, ASPEED_ACRY_BUFF_SIZE);
344 return aspeed_acry_complete(acry_dev, 0);
347 static int aspeed_acry_rsa_trigger(struct aspeed_acry_dev *acry_dev)
349 struct akcipher_request *req = acry_dev->req;
350 struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
351 struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
352 int ne, nm;
354 if (!ctx->n || !ctx->n_sz) {
355 dev_err(acry_dev->dev, "%s: key n is not set\n", __func__);
356 return -EINVAL;
359 memzero_explicit(acry_dev->buf_addr, ASPEED_ACRY_BUFF_SIZE);
361 /* Copy source data to DMA buffer */
362 aspeed_acry_rsa_sg_copy_to_buffer(acry_dev, acry_dev->buf_addr,
363 req->src, req->src_len);
365 nm = aspeed_acry_rsa_ctx_copy(acry_dev, acry_dev->buf_addr, ctx->n,
366 ctx->n_sz, ASPEED_RSA_MOD_MODE);
367 if (ctx->enc) {
368 if (!ctx->e || !ctx->e_sz) {
369 dev_err(acry_dev->dev, "%s: key e is not set\n",
370 __func__);
371 return -EINVAL;
373 /* Copy key e to DMA buffer */
374 ne = aspeed_acry_rsa_ctx_copy(acry_dev, acry_dev->buf_addr,
375 ctx->e, ctx->e_sz,
376 ASPEED_RSA_EXP_MODE);
377 } else {
378 if (!ctx->d || !ctx->d_sz) {
379 dev_err(acry_dev->dev, "%s: key d is not set\n",
380 __func__);
381 return -EINVAL;
383 /* Copy key d to DMA buffer */
384 ne = aspeed_acry_rsa_ctx_copy(acry_dev, acry_dev->buf_addr,
385 ctx->key.d, ctx->key.d_sz,
386 ASPEED_RSA_EXP_MODE);
389 ast_acry_write(acry_dev, acry_dev->buf_dma_addr,
390 ASPEED_ACRY_DMA_SRC_BASE);
391 ast_acry_write(acry_dev, (ne << 16) + nm,
392 ASPEED_ACRY_RSA_KEY_LEN);
393 ast_acry_write(acry_dev, ASPEED_ACRY_BUFF_SIZE,
394 ASPEED_ACRY_DMA_LEN);
396 acry_dev->resume = aspeed_acry_rsa_transfer;
398 /* Enable ACRY SRAM protection */
399 regmap_update_bits(acry_dev->ahbc, AHBC_REGION_PROT,
400 REGION_ACRYM, REGION_ACRYM);
402 ast_acry_write(acry_dev, ACRY_RSA_ISR, ASPEED_ACRY_INT_MASK);
403 ast_acry_write(acry_dev, ACRY_CMD_DMA_SRAM_MODE_RSA |
404 ACRY_CMD_DMA_SRAM_AHB_ENGINE, ASPEED_ACRY_DMA_CMD);
406 /* Trigger RSA engines */
407 ast_acry_write(acry_dev, ACRY_CMD_RSA_TRIGGER |
408 ACRY_CMD_DMA_RSA_TRIGGER, ASPEED_ACRY_TRIGGER);
410 return 0;
413 static int aspeed_acry_rsa_enc(struct akcipher_request *req)
415 struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
416 struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
417 struct aspeed_acry_dev *acry_dev = ctx->acry_dev;
419 ctx->trigger = aspeed_acry_rsa_trigger;
420 ctx->enc = 1;
422 return aspeed_acry_handle_queue(acry_dev, req);
425 static int aspeed_acry_rsa_dec(struct akcipher_request *req)
427 struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
428 struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
429 struct aspeed_acry_dev *acry_dev = ctx->acry_dev;
431 ctx->trigger = aspeed_acry_rsa_trigger;
432 ctx->enc = 0;
434 return aspeed_acry_handle_queue(acry_dev, req);
437 static u8 *aspeed_rsa_key_copy(u8 *src, size_t len)
439 return kmemdup(src, len, GFP_KERNEL);
442 static int aspeed_rsa_set_n(struct aspeed_acry_ctx *ctx, u8 *value,
443 size_t len)
445 ctx->n_sz = len;
446 ctx->n = aspeed_rsa_key_copy(value, len);
447 if (!ctx->n)
448 return -ENOMEM;
450 return 0;
453 static int aspeed_rsa_set_e(struct aspeed_acry_ctx *ctx, u8 *value,
454 size_t len)
456 ctx->e_sz = len;
457 ctx->e = aspeed_rsa_key_copy(value, len);
458 if (!ctx->e)
459 return -ENOMEM;
461 return 0;
464 static int aspeed_rsa_set_d(struct aspeed_acry_ctx *ctx, u8 *value,
465 size_t len)
467 ctx->d_sz = len;
468 ctx->d = aspeed_rsa_key_copy(value, len);
469 if (!ctx->d)
470 return -ENOMEM;
472 return 0;
475 static void aspeed_rsa_key_free(struct aspeed_acry_ctx *ctx)
477 kfree_sensitive(ctx->n);
478 kfree_sensitive(ctx->e);
479 kfree_sensitive(ctx->d);
480 ctx->n_sz = 0;
481 ctx->e_sz = 0;
482 ctx->d_sz = 0;
485 static int aspeed_acry_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
486 unsigned int keylen, int priv)
488 struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
489 struct aspeed_acry_dev *acry_dev = ctx->acry_dev;
490 int ret;
492 if (priv)
493 ret = rsa_parse_priv_key(&ctx->key, key, keylen);
494 else
495 ret = rsa_parse_pub_key(&ctx->key, key, keylen);
497 if (ret) {
498 dev_err(acry_dev->dev, "rsa parse key failed, ret:0x%x\n",
499 ret);
500 return ret;
503 /* Aspeed engine supports up to 4096 bits,
504 * Use software fallback instead.
506 if (ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN)
507 return 0;
509 ret = aspeed_rsa_set_n(ctx, (u8 *)ctx->key.n, ctx->key.n_sz);
510 if (ret)
511 goto err;
513 ret = aspeed_rsa_set_e(ctx, (u8 *)ctx->key.e, ctx->key.e_sz);
514 if (ret)
515 goto err;
517 if (priv) {
518 ret = aspeed_rsa_set_d(ctx, (u8 *)ctx->key.d, ctx->key.d_sz);
519 if (ret)
520 goto err;
523 return 0;
525 err:
526 dev_err(acry_dev->dev, "rsa set key failed\n");
527 aspeed_rsa_key_free(ctx);
529 return ret;
532 static int aspeed_acry_rsa_set_pub_key(struct crypto_akcipher *tfm,
533 const void *key,
534 unsigned int keylen)
536 struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
537 int ret;
539 ret = crypto_akcipher_set_pub_key(ctx->fallback_tfm, key, keylen);
540 if (ret)
541 return ret;
543 return aspeed_acry_rsa_setkey(tfm, key, keylen, 0);
546 static int aspeed_acry_rsa_set_priv_key(struct crypto_akcipher *tfm,
547 const void *key,
548 unsigned int keylen)
550 struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
551 int ret;
553 ret = crypto_akcipher_set_priv_key(ctx->fallback_tfm, key, keylen);
554 if (ret)
555 return ret;
557 return aspeed_acry_rsa_setkey(tfm, key, keylen, 1);
560 static unsigned int aspeed_acry_rsa_max_size(struct crypto_akcipher *tfm)
562 struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
564 if (ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN)
565 return crypto_akcipher_maxsize(ctx->fallback_tfm);
567 return ctx->n_sz;
570 static int aspeed_acry_rsa_init_tfm(struct crypto_akcipher *tfm)
572 struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
573 struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
574 const char *name = crypto_tfm_alg_name(&tfm->base);
575 struct aspeed_acry_alg *acry_alg;
577 acry_alg = container_of(alg, struct aspeed_acry_alg, akcipher.base);
579 ctx->acry_dev = acry_alg->acry_dev;
581 ctx->fallback_tfm = crypto_alloc_akcipher(name, 0, CRYPTO_ALG_ASYNC |
582 CRYPTO_ALG_NEED_FALLBACK);
583 if (IS_ERR(ctx->fallback_tfm)) {
584 dev_err(ctx->acry_dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
585 name, PTR_ERR(ctx->fallback_tfm));
586 return PTR_ERR(ctx->fallback_tfm);
589 return 0;
592 static void aspeed_acry_rsa_exit_tfm(struct crypto_akcipher *tfm)
594 struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
596 crypto_free_akcipher(ctx->fallback_tfm);
599 static struct aspeed_acry_alg aspeed_acry_akcipher_algs[] = {
601 .akcipher.base = {
602 .encrypt = aspeed_acry_rsa_enc,
603 .decrypt = aspeed_acry_rsa_dec,
604 .set_pub_key = aspeed_acry_rsa_set_pub_key,
605 .set_priv_key = aspeed_acry_rsa_set_priv_key,
606 .max_size = aspeed_acry_rsa_max_size,
607 .init = aspeed_acry_rsa_init_tfm,
608 .exit = aspeed_acry_rsa_exit_tfm,
609 .base = {
610 .cra_name = "rsa",
611 .cra_driver_name = "aspeed-rsa",
612 .cra_priority = 300,
613 .cra_flags = CRYPTO_ALG_TYPE_AKCIPHER |
614 CRYPTO_ALG_ASYNC |
615 CRYPTO_ALG_KERN_DRIVER_ONLY |
616 CRYPTO_ALG_NEED_FALLBACK,
617 .cra_module = THIS_MODULE,
618 .cra_ctxsize = sizeof(struct aspeed_acry_ctx),
621 .akcipher.op = {
622 .do_one_request = aspeed_acry_do_request,
627 static void aspeed_acry_register(struct aspeed_acry_dev *acry_dev)
629 int i, rc;
631 for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++) {
632 aspeed_acry_akcipher_algs[i].acry_dev = acry_dev;
633 rc = crypto_engine_register_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
634 if (rc) {
635 ACRY_DBG(acry_dev, "Failed to register %s\n",
636 aspeed_acry_akcipher_algs[i].akcipher.base.base.cra_name);
641 static void aspeed_acry_unregister(struct aspeed_acry_dev *acry_dev)
643 int i;
645 for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++)
646 crypto_engine_unregister_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
649 /* ACRY interrupt service routine. */
650 static irqreturn_t aspeed_acry_irq(int irq, void *dev)
652 struct aspeed_acry_dev *acry_dev = (struct aspeed_acry_dev *)dev;
653 u32 sts;
655 sts = ast_acry_read(acry_dev, ASPEED_ACRY_STATUS);
656 ast_acry_write(acry_dev, sts, ASPEED_ACRY_STATUS);
658 ACRY_DBG(acry_dev, "irq sts:0x%x\n", sts);
660 if (sts & ACRY_RSA_ISR) {
661 /* Stop RSA engine */
662 ast_acry_write(acry_dev, 0, ASPEED_ACRY_TRIGGER);
664 if (acry_dev->flags & CRYPTO_FLAGS_BUSY)
665 tasklet_schedule(&acry_dev->done_task);
666 else
667 dev_err(acry_dev->dev, "RSA no active requests.\n");
670 return IRQ_HANDLED;
674 * ACRY SRAM has its own memory layout.
675 * Set the DRAM to SRAM indexing for future used.
677 static void aspeed_acry_sram_mapping(struct aspeed_acry_dev *acry_dev)
679 int i, j = 0;
681 for (i = 0; i < (ASPEED_ACRY_SRAM_MAX_LEN / BYTES_PER_DWORD); i++) {
682 acry_dev->exp_dw_mapping[i] = j;
683 acry_dev->mod_dw_mapping[i] = j + 4;
684 acry_dev->data_byte_mapping[(i * 4)] = (j + 8) * 4;
685 acry_dev->data_byte_mapping[(i * 4) + 1] = (j + 8) * 4 + 1;
686 acry_dev->data_byte_mapping[(i * 4) + 2] = (j + 8) * 4 + 2;
687 acry_dev->data_byte_mapping[(i * 4) + 3] = (j + 8) * 4 + 3;
688 j++;
689 j = j % 4 ? j : j + 8;
693 static void aspeed_acry_done_task(unsigned long data)
695 struct aspeed_acry_dev *acry_dev = (struct aspeed_acry_dev *)data;
697 (void)acry_dev->resume(acry_dev);
700 static const struct of_device_id aspeed_acry_of_matches[] = {
701 { .compatible = "aspeed,ast2600-acry", },
705 static int aspeed_acry_probe(struct platform_device *pdev)
707 struct aspeed_acry_dev *acry_dev;
708 struct device *dev = &pdev->dev;
709 int rc;
711 acry_dev = devm_kzalloc(dev, sizeof(struct aspeed_acry_dev),
712 GFP_KERNEL);
713 if (!acry_dev)
714 return -ENOMEM;
716 acry_dev->dev = dev;
718 platform_set_drvdata(pdev, acry_dev);
720 acry_dev->regs = devm_platform_ioremap_resource(pdev, 0);
721 if (IS_ERR(acry_dev->regs))
722 return PTR_ERR(acry_dev->regs);
724 acry_dev->acry_sram = devm_platform_ioremap_resource(pdev, 1);
725 if (IS_ERR(acry_dev->acry_sram))
726 return PTR_ERR(acry_dev->acry_sram);
728 /* Get irq number and register it */
729 acry_dev->irq = platform_get_irq(pdev, 0);
730 if (acry_dev->irq < 0)
731 return -ENXIO;
733 rc = devm_request_irq(dev, acry_dev->irq, aspeed_acry_irq, 0,
734 dev_name(dev), acry_dev);
735 if (rc) {
736 dev_err(dev, "Failed to request irq.\n");
737 return rc;
740 acry_dev->clk = devm_clk_get_enabled(dev, NULL);
741 if (IS_ERR(acry_dev->clk)) {
742 dev_err(dev, "Failed to get acry clk\n");
743 return PTR_ERR(acry_dev->clk);
746 acry_dev->ahbc = syscon_regmap_lookup_by_phandle(dev->of_node,
747 "aspeed,ahbc");
748 if (IS_ERR(acry_dev->ahbc)) {
749 dev_err(dev, "Failed to get AHBC regmap\n");
750 return -ENODEV;
753 /* Initialize crypto hardware engine structure for RSA */
754 acry_dev->crypt_engine_rsa = crypto_engine_alloc_init(dev, true);
755 if (!acry_dev->crypt_engine_rsa) {
756 rc = -ENOMEM;
757 goto clk_exit;
760 rc = crypto_engine_start(acry_dev->crypt_engine_rsa);
761 if (rc)
762 goto err_engine_rsa_start;
764 tasklet_init(&acry_dev->done_task, aspeed_acry_done_task,
765 (unsigned long)acry_dev);
767 /* Set Data Memory to AHB(CPU) Access Mode */
768 ast_acry_write(acry_dev, ACRY_CMD_DMEM_AHB, ASPEED_ACRY_DMA_CMD);
770 /* Initialize ACRY SRAM index */
771 aspeed_acry_sram_mapping(acry_dev);
773 acry_dev->buf_addr = dmam_alloc_coherent(dev, ASPEED_ACRY_BUFF_SIZE,
774 &acry_dev->buf_dma_addr,
775 GFP_KERNEL);
776 if (!acry_dev->buf_addr) {
777 rc = -ENOMEM;
778 goto err_engine_rsa_start;
781 aspeed_acry_register(acry_dev);
783 dev_info(dev, "Aspeed ACRY Accelerator successfully registered\n");
785 return 0;
787 err_engine_rsa_start:
788 crypto_engine_exit(acry_dev->crypt_engine_rsa);
789 clk_exit:
790 clk_disable_unprepare(acry_dev->clk);
792 return rc;
795 static void aspeed_acry_remove(struct platform_device *pdev)
797 struct aspeed_acry_dev *acry_dev = platform_get_drvdata(pdev);
799 aspeed_acry_unregister(acry_dev);
800 crypto_engine_exit(acry_dev->crypt_engine_rsa);
801 tasklet_kill(&acry_dev->done_task);
802 clk_disable_unprepare(acry_dev->clk);
805 MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches);
807 static struct platform_driver aspeed_acry_driver = {
808 .probe = aspeed_acry_probe,
809 .remove = aspeed_acry_remove,
810 .driver = {
811 .name = KBUILD_MODNAME,
812 .of_match_table = aspeed_acry_of_matches,
816 module_platform_driver(aspeed_acry_driver);
818 MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>");
819 MODULE_DESCRIPTION("ASPEED ACRY driver for hardware RSA Engine");
820 MODULE_LICENSE("GPL");