1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2021 Aspeed Technology Inc.
5 #include <crypto/engine.h>
6 #include <crypto/internal/akcipher.h>
7 #include <crypto/internal/rsa.h>
8 #include <crypto/scatterwalk.h>
10 #include <linux/count_zeros.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/err.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/mfd/syscon.h>
16 #include <linux/module.h>
18 #include <linux/platform_device.h>
19 #include <linux/regmap.h>
20 #include <linux/slab.h>
21 #include <linux/string.h>
23 #ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
24 #define ACRY_DBG(d, fmt, ...) \
25 dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
27 #define ACRY_DBG(d, fmt, ...) \
28 dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
31 /*****************************
33 * ACRY register definitions *
35 * ***************************/
36 #define ASPEED_ACRY_TRIGGER 0x000 /* ACRY Engine Control: trigger */
37 #define ASPEED_ACRY_DMA_CMD 0x048 /* ACRY Engine Control: Command */
38 #define ASPEED_ACRY_DMA_SRC_BASE 0x04C /* ACRY DRAM base address for DMA */
39 #define ASPEED_ACRY_DMA_LEN 0x050 /* ACRY Data Length of DMA */
40 #define ASPEED_ACRY_RSA_KEY_LEN 0x058 /* ACRY RSA Exp/Mod Key Length (Bits) */
41 #define ASPEED_ACRY_INT_MASK 0x3F8 /* ACRY Interrupt Mask */
42 #define ASPEED_ACRY_STATUS 0x3FC /* ACRY Interrupt Status */
45 #define ACRY_CMD_RSA_TRIGGER BIT(0)
46 #define ACRY_CMD_DMA_RSA_TRIGGER BIT(1)
49 #define ACRY_CMD_DMA_SRAM_MODE_RSA (0x3 << 4)
50 #define ACRY_CMD_DMEM_AHB BIT(8)
51 #define ACRY_CMD_DMA_SRAM_AHB_ENGINE 0
54 #define RSA_E_BITS_LEN(x) ((x) << 16)
55 #define RSA_M_BITS_LEN(x) (x)
58 #define ACRY_RSA_ISR BIT(1)
60 #define ASPEED_ACRY_BUFF_SIZE 0x1800 /* DMA buffer size */
61 #define ASPEED_ACRY_SRAM_MAX_LEN 2048 /* ACRY SRAM maximum length (Bytes) */
62 #define ASPEED_ACRY_RSA_MAX_KEY_LEN 512 /* ACRY RSA maximum key length (Bytes) */
64 #define CRYPTO_FLAGS_BUSY BIT(1)
65 #define BYTES_PER_DWORD 4
67 /*****************************
69 * AHBC register definitions *
71 * ***************************/
72 #define AHBC_REGION_PROT 0x240
73 #define REGION_ACRYM BIT(23)
75 #define ast_acry_write(acry, val, offset) \
76 writel((val), (acry)->regs + (offset))
78 #define ast_acry_read(acry, offset) \
79 readl((acry)->regs + (offset))
81 struct aspeed_acry_dev
;
83 typedef int (*aspeed_acry_fn_t
)(struct aspeed_acry_dev
*);
85 struct aspeed_acry_dev
{
92 struct akcipher_request
*req
;
93 struct tasklet_struct done_task
;
94 aspeed_acry_fn_t resume
;
97 /* ACRY output SRAM buffer */
98 void __iomem
*acry_sram
;
100 /* ACRY input DMA buffer */
102 dma_addr_t buf_dma_addr
;
104 struct crypto_engine
*crypt_engine_rsa
;
106 /* ACRY SRAM memory mapped */
107 int exp_dw_mapping
[ASPEED_ACRY_RSA_MAX_KEY_LEN
];
108 int mod_dw_mapping
[ASPEED_ACRY_RSA_MAX_KEY_LEN
];
109 int data_byte_mapping
[ASPEED_ACRY_SRAM_MAX_LEN
];
112 struct aspeed_acry_ctx
{
113 struct aspeed_acry_dev
*acry_dev
;
124 aspeed_acry_fn_t trigger
;
126 struct crypto_akcipher
*fallback_tfm
;
129 struct aspeed_acry_alg
{
130 struct aspeed_acry_dev
*acry_dev
;
131 struct akcipher_engine_alg akcipher
;
134 enum aspeed_rsa_key_mode
{
135 ASPEED_RSA_EXP_MODE
= 0,
137 ASPEED_RSA_DATA_MODE
,
140 static inline struct akcipher_request
*
141 akcipher_request_cast(struct crypto_async_request
*req
)
143 return container_of(req
, struct akcipher_request
, base
);
146 static int aspeed_acry_do_fallback(struct akcipher_request
*req
)
148 struct crypto_akcipher
*cipher
= crypto_akcipher_reqtfm(req
);
149 struct aspeed_acry_ctx
*ctx
= akcipher_tfm_ctx(cipher
);
152 akcipher_request_set_tfm(req
, ctx
->fallback_tfm
);
155 err
= crypto_akcipher_encrypt(req
);
157 err
= crypto_akcipher_decrypt(req
);
159 akcipher_request_set_tfm(req
, cipher
);
164 static bool aspeed_acry_need_fallback(struct akcipher_request
*req
)
166 struct crypto_akcipher
*cipher
= crypto_akcipher_reqtfm(req
);
167 struct aspeed_acry_ctx
*ctx
= akcipher_tfm_ctx(cipher
);
169 return ctx
->key
.n_sz
> ASPEED_ACRY_RSA_MAX_KEY_LEN
;
172 static int aspeed_acry_handle_queue(struct aspeed_acry_dev
*acry_dev
,
173 struct akcipher_request
*req
)
175 if (aspeed_acry_need_fallback(req
)) {
176 ACRY_DBG(acry_dev
, "SW fallback\n");
177 return aspeed_acry_do_fallback(req
);
180 return crypto_transfer_akcipher_request_to_engine(acry_dev
->crypt_engine_rsa
, req
);
183 static int aspeed_acry_do_request(struct crypto_engine
*engine
, void *areq
)
185 struct akcipher_request
*req
= akcipher_request_cast(areq
);
186 struct crypto_akcipher
*cipher
= crypto_akcipher_reqtfm(req
);
187 struct aspeed_acry_ctx
*ctx
= akcipher_tfm_ctx(cipher
);
188 struct aspeed_acry_dev
*acry_dev
= ctx
->acry_dev
;
191 acry_dev
->flags
|= CRYPTO_FLAGS_BUSY
;
193 return ctx
->trigger(acry_dev
);
196 static int aspeed_acry_complete(struct aspeed_acry_dev
*acry_dev
, int err
)
198 struct akcipher_request
*req
= acry_dev
->req
;
200 acry_dev
->flags
&= ~CRYPTO_FLAGS_BUSY
;
202 crypto_finalize_akcipher_request(acry_dev
->crypt_engine_rsa
, req
, err
);
208 * Copy Data to DMA buffer for engine used.
210 static void aspeed_acry_rsa_sg_copy_to_buffer(struct aspeed_acry_dev
*acry_dev
,
211 u8
*buf
, struct scatterlist
*src
,
214 static u8 dram_buffer
[ASPEED_ACRY_SRAM_MAX_LEN
];
218 ACRY_DBG(acry_dev
, "\n");
220 scatterwalk_map_and_copy(dram_buffer
, src
, 0, nbytes
, 0);
222 for (j
= nbytes
- 1; j
>= 0; j
--) {
223 data_idx
= acry_dev
->data_byte_mapping
[i
];
224 buf
[data_idx
] = dram_buffer
[j
];
228 for (; i
< ASPEED_ACRY_SRAM_MAX_LEN
; i
++) {
229 data_idx
= acry_dev
->data_byte_mapping
[i
];
235 * Copy Exp/Mod to DMA buffer for engine used.
238 * - mode 0 : Exponential
242 * - DRAM memory layout:
243 * D[0], D[4], D[8], D[12]
244 * - ACRY SRAM memory layout should reverse the order of source data:
245 * D[12], D[8], D[4], D[0]
247 static int aspeed_acry_rsa_ctx_copy(struct aspeed_acry_dev
*acry_dev
, void *buf
,
248 const void *xbuf
, size_t nbytes
,
249 enum aspeed_rsa_key_mode mode
)
251 const u8
*src
= xbuf
;
252 __le32
*dw_buf
= buf
;
257 ACRY_DBG(acry_dev
, "nbytes:%zu, mode:%d\n", nbytes
, mode
);
259 if (nbytes
> ASPEED_ACRY_RSA_MAX_KEY_LEN
)
262 /* Remove the leading zeros */
263 while (nbytes
> 0 && src
[0] == 0) {
270 nbits
-= count_leading_zeros(src
[0]) - (BITS_PER_LONG
- 8);
272 /* double-world alignment */
273 ndw
= DIV_ROUND_UP(nbytes
, BYTES_PER_DWORD
);
276 i
= BYTES_PER_DWORD
- nbytes
% BYTES_PER_DWORD
;
277 i
%= BYTES_PER_DWORD
;
279 for (j
= ndw
; j
> 0; j
--) {
280 for (; i
< BYTES_PER_DWORD
; i
++) {
287 if (mode
== ASPEED_RSA_EXP_MODE
)
288 idx
= acry_dev
->exp_dw_mapping
[j
- 1];
289 else /* mode == ASPEED_RSA_MOD_MODE */
290 idx
= acry_dev
->mod_dw_mapping
[j
- 1];
292 dw_buf
[idx
] = cpu_to_le32(data
);
299 static int aspeed_acry_rsa_transfer(struct aspeed_acry_dev
*acry_dev
)
301 struct akcipher_request
*req
= acry_dev
->req
;
302 u8 __iomem
*sram_buffer
= acry_dev
->acry_sram
;
303 struct scatterlist
*out_sg
= req
->dst
;
304 static u8 dram_buffer
[ASPEED_ACRY_SRAM_MAX_LEN
];
305 int leading_zero
= 1;
310 /* Set Data Memory to AHB(CPU) Access Mode */
311 ast_acry_write(acry_dev
, ACRY_CMD_DMEM_AHB
, ASPEED_ACRY_DMA_CMD
);
313 /* Disable ACRY SRAM protection */
314 regmap_update_bits(acry_dev
->ahbc
, AHBC_REGION_PROT
,
317 result_nbytes
= ASPEED_ACRY_SRAM_MAX_LEN
;
319 for (j
= ASPEED_ACRY_SRAM_MAX_LEN
- 1; j
>= 0; j
--) {
320 data_idx
= acry_dev
->data_byte_mapping
[j
];
321 if (readb(sram_buffer
+ data_idx
) == 0 && leading_zero
) {
325 dram_buffer
[i
] = readb(sram_buffer
+ data_idx
);
330 ACRY_DBG(acry_dev
, "result_nbytes:%d, req->dst_len:%d\n",
331 result_nbytes
, req
->dst_len
);
333 if (result_nbytes
<= req
->dst_len
) {
334 scatterwalk_map_and_copy(dram_buffer
, out_sg
, 0, result_nbytes
,
336 req
->dst_len
= result_nbytes
;
339 dev_err(acry_dev
->dev
, "RSA engine error!\n");
342 memzero_explicit(acry_dev
->buf_addr
, ASPEED_ACRY_BUFF_SIZE
);
344 return aspeed_acry_complete(acry_dev
, 0);
347 static int aspeed_acry_rsa_trigger(struct aspeed_acry_dev
*acry_dev
)
349 struct akcipher_request
*req
= acry_dev
->req
;
350 struct crypto_akcipher
*cipher
= crypto_akcipher_reqtfm(req
);
351 struct aspeed_acry_ctx
*ctx
= akcipher_tfm_ctx(cipher
);
354 if (!ctx
->n
|| !ctx
->n_sz
) {
355 dev_err(acry_dev
->dev
, "%s: key n is not set\n", __func__
);
359 memzero_explicit(acry_dev
->buf_addr
, ASPEED_ACRY_BUFF_SIZE
);
361 /* Copy source data to DMA buffer */
362 aspeed_acry_rsa_sg_copy_to_buffer(acry_dev
, acry_dev
->buf_addr
,
363 req
->src
, req
->src_len
);
365 nm
= aspeed_acry_rsa_ctx_copy(acry_dev
, acry_dev
->buf_addr
, ctx
->n
,
366 ctx
->n_sz
, ASPEED_RSA_MOD_MODE
);
368 if (!ctx
->e
|| !ctx
->e_sz
) {
369 dev_err(acry_dev
->dev
, "%s: key e is not set\n",
373 /* Copy key e to DMA buffer */
374 ne
= aspeed_acry_rsa_ctx_copy(acry_dev
, acry_dev
->buf_addr
,
376 ASPEED_RSA_EXP_MODE
);
378 if (!ctx
->d
|| !ctx
->d_sz
) {
379 dev_err(acry_dev
->dev
, "%s: key d is not set\n",
383 /* Copy key d to DMA buffer */
384 ne
= aspeed_acry_rsa_ctx_copy(acry_dev
, acry_dev
->buf_addr
,
385 ctx
->key
.d
, ctx
->key
.d_sz
,
386 ASPEED_RSA_EXP_MODE
);
389 ast_acry_write(acry_dev
, acry_dev
->buf_dma_addr
,
390 ASPEED_ACRY_DMA_SRC_BASE
);
391 ast_acry_write(acry_dev
, (ne
<< 16) + nm
,
392 ASPEED_ACRY_RSA_KEY_LEN
);
393 ast_acry_write(acry_dev
, ASPEED_ACRY_BUFF_SIZE
,
394 ASPEED_ACRY_DMA_LEN
);
396 acry_dev
->resume
= aspeed_acry_rsa_transfer
;
398 /* Enable ACRY SRAM protection */
399 regmap_update_bits(acry_dev
->ahbc
, AHBC_REGION_PROT
,
400 REGION_ACRYM
, REGION_ACRYM
);
402 ast_acry_write(acry_dev
, ACRY_RSA_ISR
, ASPEED_ACRY_INT_MASK
);
403 ast_acry_write(acry_dev
, ACRY_CMD_DMA_SRAM_MODE_RSA
|
404 ACRY_CMD_DMA_SRAM_AHB_ENGINE
, ASPEED_ACRY_DMA_CMD
);
406 /* Trigger RSA engines */
407 ast_acry_write(acry_dev
, ACRY_CMD_RSA_TRIGGER
|
408 ACRY_CMD_DMA_RSA_TRIGGER
, ASPEED_ACRY_TRIGGER
);
413 static int aspeed_acry_rsa_enc(struct akcipher_request
*req
)
415 struct crypto_akcipher
*cipher
= crypto_akcipher_reqtfm(req
);
416 struct aspeed_acry_ctx
*ctx
= akcipher_tfm_ctx(cipher
);
417 struct aspeed_acry_dev
*acry_dev
= ctx
->acry_dev
;
419 ctx
->trigger
= aspeed_acry_rsa_trigger
;
422 return aspeed_acry_handle_queue(acry_dev
, req
);
425 static int aspeed_acry_rsa_dec(struct akcipher_request
*req
)
427 struct crypto_akcipher
*cipher
= crypto_akcipher_reqtfm(req
);
428 struct aspeed_acry_ctx
*ctx
= akcipher_tfm_ctx(cipher
);
429 struct aspeed_acry_dev
*acry_dev
= ctx
->acry_dev
;
431 ctx
->trigger
= aspeed_acry_rsa_trigger
;
434 return aspeed_acry_handle_queue(acry_dev
, req
);
437 static u8
*aspeed_rsa_key_copy(u8
*src
, size_t len
)
439 return kmemdup(src
, len
, GFP_KERNEL
);
442 static int aspeed_rsa_set_n(struct aspeed_acry_ctx
*ctx
, u8
*value
,
446 ctx
->n
= aspeed_rsa_key_copy(value
, len
);
453 static int aspeed_rsa_set_e(struct aspeed_acry_ctx
*ctx
, u8
*value
,
457 ctx
->e
= aspeed_rsa_key_copy(value
, len
);
464 static int aspeed_rsa_set_d(struct aspeed_acry_ctx
*ctx
, u8
*value
,
468 ctx
->d
= aspeed_rsa_key_copy(value
, len
);
475 static void aspeed_rsa_key_free(struct aspeed_acry_ctx
*ctx
)
477 kfree_sensitive(ctx
->n
);
478 kfree_sensitive(ctx
->e
);
479 kfree_sensitive(ctx
->d
);
485 static int aspeed_acry_rsa_setkey(struct crypto_akcipher
*tfm
, const void *key
,
486 unsigned int keylen
, int priv
)
488 struct aspeed_acry_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
489 struct aspeed_acry_dev
*acry_dev
= ctx
->acry_dev
;
493 ret
= rsa_parse_priv_key(&ctx
->key
, key
, keylen
);
495 ret
= rsa_parse_pub_key(&ctx
->key
, key
, keylen
);
498 dev_err(acry_dev
->dev
, "rsa parse key failed, ret:0x%x\n",
503 /* Aspeed engine supports up to 4096 bits,
504 * Use software fallback instead.
506 if (ctx
->key
.n_sz
> ASPEED_ACRY_RSA_MAX_KEY_LEN
)
509 ret
= aspeed_rsa_set_n(ctx
, (u8
*)ctx
->key
.n
, ctx
->key
.n_sz
);
513 ret
= aspeed_rsa_set_e(ctx
, (u8
*)ctx
->key
.e
, ctx
->key
.e_sz
);
518 ret
= aspeed_rsa_set_d(ctx
, (u8
*)ctx
->key
.d
, ctx
->key
.d_sz
);
526 dev_err(acry_dev
->dev
, "rsa set key failed\n");
527 aspeed_rsa_key_free(ctx
);
532 static int aspeed_acry_rsa_set_pub_key(struct crypto_akcipher
*tfm
,
536 struct aspeed_acry_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
539 ret
= crypto_akcipher_set_pub_key(ctx
->fallback_tfm
, key
, keylen
);
543 return aspeed_acry_rsa_setkey(tfm
, key
, keylen
, 0);
546 static int aspeed_acry_rsa_set_priv_key(struct crypto_akcipher
*tfm
,
550 struct aspeed_acry_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
553 ret
= crypto_akcipher_set_priv_key(ctx
->fallback_tfm
, key
, keylen
);
557 return aspeed_acry_rsa_setkey(tfm
, key
, keylen
, 1);
560 static unsigned int aspeed_acry_rsa_max_size(struct crypto_akcipher
*tfm
)
562 struct aspeed_acry_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
564 if (ctx
->key
.n_sz
> ASPEED_ACRY_RSA_MAX_KEY_LEN
)
565 return crypto_akcipher_maxsize(ctx
->fallback_tfm
);
570 static int aspeed_acry_rsa_init_tfm(struct crypto_akcipher
*tfm
)
572 struct aspeed_acry_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
573 struct akcipher_alg
*alg
= crypto_akcipher_alg(tfm
);
574 const char *name
= crypto_tfm_alg_name(&tfm
->base
);
575 struct aspeed_acry_alg
*acry_alg
;
577 acry_alg
= container_of(alg
, struct aspeed_acry_alg
, akcipher
.base
);
579 ctx
->acry_dev
= acry_alg
->acry_dev
;
581 ctx
->fallback_tfm
= crypto_alloc_akcipher(name
, 0, CRYPTO_ALG_ASYNC
|
582 CRYPTO_ALG_NEED_FALLBACK
);
583 if (IS_ERR(ctx
->fallback_tfm
)) {
584 dev_err(ctx
->acry_dev
->dev
, "ERROR: Cannot allocate fallback for %s %ld\n",
585 name
, PTR_ERR(ctx
->fallback_tfm
));
586 return PTR_ERR(ctx
->fallback_tfm
);
592 static void aspeed_acry_rsa_exit_tfm(struct crypto_akcipher
*tfm
)
594 struct aspeed_acry_ctx
*ctx
= akcipher_tfm_ctx(tfm
);
596 crypto_free_akcipher(ctx
->fallback_tfm
);
599 static struct aspeed_acry_alg aspeed_acry_akcipher_algs
[] = {
602 .encrypt
= aspeed_acry_rsa_enc
,
603 .decrypt
= aspeed_acry_rsa_dec
,
604 .set_pub_key
= aspeed_acry_rsa_set_pub_key
,
605 .set_priv_key
= aspeed_acry_rsa_set_priv_key
,
606 .max_size
= aspeed_acry_rsa_max_size
,
607 .init
= aspeed_acry_rsa_init_tfm
,
608 .exit
= aspeed_acry_rsa_exit_tfm
,
611 .cra_driver_name
= "aspeed-rsa",
613 .cra_flags
= CRYPTO_ALG_TYPE_AKCIPHER
|
615 CRYPTO_ALG_KERN_DRIVER_ONLY
|
616 CRYPTO_ALG_NEED_FALLBACK
,
617 .cra_module
= THIS_MODULE
,
618 .cra_ctxsize
= sizeof(struct aspeed_acry_ctx
),
622 .do_one_request
= aspeed_acry_do_request
,
627 static void aspeed_acry_register(struct aspeed_acry_dev
*acry_dev
)
631 for (i
= 0; i
< ARRAY_SIZE(aspeed_acry_akcipher_algs
); i
++) {
632 aspeed_acry_akcipher_algs
[i
].acry_dev
= acry_dev
;
633 rc
= crypto_engine_register_akcipher(&aspeed_acry_akcipher_algs
[i
].akcipher
);
635 ACRY_DBG(acry_dev
, "Failed to register %s\n",
636 aspeed_acry_akcipher_algs
[i
].akcipher
.base
.base
.cra_name
);
641 static void aspeed_acry_unregister(struct aspeed_acry_dev
*acry_dev
)
645 for (i
= 0; i
< ARRAY_SIZE(aspeed_acry_akcipher_algs
); i
++)
646 crypto_engine_unregister_akcipher(&aspeed_acry_akcipher_algs
[i
].akcipher
);
649 /* ACRY interrupt service routine. */
650 static irqreturn_t
aspeed_acry_irq(int irq
, void *dev
)
652 struct aspeed_acry_dev
*acry_dev
= (struct aspeed_acry_dev
*)dev
;
655 sts
= ast_acry_read(acry_dev
, ASPEED_ACRY_STATUS
);
656 ast_acry_write(acry_dev
, sts
, ASPEED_ACRY_STATUS
);
658 ACRY_DBG(acry_dev
, "irq sts:0x%x\n", sts
);
660 if (sts
& ACRY_RSA_ISR
) {
661 /* Stop RSA engine */
662 ast_acry_write(acry_dev
, 0, ASPEED_ACRY_TRIGGER
);
664 if (acry_dev
->flags
& CRYPTO_FLAGS_BUSY
)
665 tasklet_schedule(&acry_dev
->done_task
);
667 dev_err(acry_dev
->dev
, "RSA no active requests.\n");
674 * ACRY SRAM has its own memory layout.
675 * Set the DRAM to SRAM indexing for future used.
677 static void aspeed_acry_sram_mapping(struct aspeed_acry_dev
*acry_dev
)
681 for (i
= 0; i
< (ASPEED_ACRY_SRAM_MAX_LEN
/ BYTES_PER_DWORD
); i
++) {
682 acry_dev
->exp_dw_mapping
[i
] = j
;
683 acry_dev
->mod_dw_mapping
[i
] = j
+ 4;
684 acry_dev
->data_byte_mapping
[(i
* 4)] = (j
+ 8) * 4;
685 acry_dev
->data_byte_mapping
[(i
* 4) + 1] = (j
+ 8) * 4 + 1;
686 acry_dev
->data_byte_mapping
[(i
* 4) + 2] = (j
+ 8) * 4 + 2;
687 acry_dev
->data_byte_mapping
[(i
* 4) + 3] = (j
+ 8) * 4 + 3;
689 j
= j
% 4 ? j
: j
+ 8;
693 static void aspeed_acry_done_task(unsigned long data
)
695 struct aspeed_acry_dev
*acry_dev
= (struct aspeed_acry_dev
*)data
;
697 (void)acry_dev
->resume(acry_dev
);
700 static const struct of_device_id aspeed_acry_of_matches
[] = {
701 { .compatible
= "aspeed,ast2600-acry", },
705 static int aspeed_acry_probe(struct platform_device
*pdev
)
707 struct aspeed_acry_dev
*acry_dev
;
708 struct device
*dev
= &pdev
->dev
;
711 acry_dev
= devm_kzalloc(dev
, sizeof(struct aspeed_acry_dev
),
718 platform_set_drvdata(pdev
, acry_dev
);
720 acry_dev
->regs
= devm_platform_ioremap_resource(pdev
, 0);
721 if (IS_ERR(acry_dev
->regs
))
722 return PTR_ERR(acry_dev
->regs
);
724 acry_dev
->acry_sram
= devm_platform_ioremap_resource(pdev
, 1);
725 if (IS_ERR(acry_dev
->acry_sram
))
726 return PTR_ERR(acry_dev
->acry_sram
);
728 /* Get irq number and register it */
729 acry_dev
->irq
= platform_get_irq(pdev
, 0);
730 if (acry_dev
->irq
< 0)
733 rc
= devm_request_irq(dev
, acry_dev
->irq
, aspeed_acry_irq
, 0,
734 dev_name(dev
), acry_dev
);
736 dev_err(dev
, "Failed to request irq.\n");
740 acry_dev
->clk
= devm_clk_get_enabled(dev
, NULL
);
741 if (IS_ERR(acry_dev
->clk
)) {
742 dev_err(dev
, "Failed to get acry clk\n");
743 return PTR_ERR(acry_dev
->clk
);
746 acry_dev
->ahbc
= syscon_regmap_lookup_by_phandle(dev
->of_node
,
748 if (IS_ERR(acry_dev
->ahbc
)) {
749 dev_err(dev
, "Failed to get AHBC regmap\n");
753 /* Initialize crypto hardware engine structure for RSA */
754 acry_dev
->crypt_engine_rsa
= crypto_engine_alloc_init(dev
, true);
755 if (!acry_dev
->crypt_engine_rsa
) {
760 rc
= crypto_engine_start(acry_dev
->crypt_engine_rsa
);
762 goto err_engine_rsa_start
;
764 tasklet_init(&acry_dev
->done_task
, aspeed_acry_done_task
,
765 (unsigned long)acry_dev
);
767 /* Set Data Memory to AHB(CPU) Access Mode */
768 ast_acry_write(acry_dev
, ACRY_CMD_DMEM_AHB
, ASPEED_ACRY_DMA_CMD
);
770 /* Initialize ACRY SRAM index */
771 aspeed_acry_sram_mapping(acry_dev
);
773 acry_dev
->buf_addr
= dmam_alloc_coherent(dev
, ASPEED_ACRY_BUFF_SIZE
,
774 &acry_dev
->buf_dma_addr
,
776 if (!acry_dev
->buf_addr
) {
778 goto err_engine_rsa_start
;
781 aspeed_acry_register(acry_dev
);
783 dev_info(dev
, "Aspeed ACRY Accelerator successfully registered\n");
787 err_engine_rsa_start
:
788 crypto_engine_exit(acry_dev
->crypt_engine_rsa
);
790 clk_disable_unprepare(acry_dev
->clk
);
795 static void aspeed_acry_remove(struct platform_device
*pdev
)
797 struct aspeed_acry_dev
*acry_dev
= platform_get_drvdata(pdev
);
799 aspeed_acry_unregister(acry_dev
);
800 crypto_engine_exit(acry_dev
->crypt_engine_rsa
);
801 tasklet_kill(&acry_dev
->done_task
);
802 clk_disable_unprepare(acry_dev
->clk
);
805 MODULE_DEVICE_TABLE(of
, aspeed_acry_of_matches
);
807 static struct platform_driver aspeed_acry_driver
= {
808 .probe
= aspeed_acry_probe
,
809 .remove
= aspeed_acry_remove
,
811 .name
= KBUILD_MODNAME
,
812 .of_match_table
= aspeed_acry_of_matches
,
816 module_platform_driver(aspeed_acry_driver
);
818 MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>");
819 MODULE_DESCRIPTION("ASPEED ACRY driver for hardware RSA Engine");
820 MODULE_LICENSE("GPL");