4 * Driver for EIP97 SHA1/SHA2(HMAC) acceleration.
6 * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * Some ideas are from atmel-sha.c and omap-sham.c drivers.
15 #include <crypto/hmac.h>
16 #include <crypto/sha.h>
17 #include "mtk-platform.h"
19 #define SHA_ALIGN_MSK (sizeof(u32) - 1)
20 #define SHA_QUEUE_SIZE 512
21 #define SHA_BUF_SIZE ((u32)PAGE_SIZE)
23 #define SHA_OP_UPDATE 1
24 #define SHA_OP_FINAL 2
26 #define SHA_DATA_LEN_MSK cpu_to_le32(GENMASK(16, 0))
27 #define SHA_MAX_DIGEST_BUF_SIZE 32
29 /* SHA command token */
31 #define SHA_CT_CTRL_HDR cpu_to_le32(0x02220000)
32 #define SHA_CMD0 cpu_to_le32(0x03020000)
33 #define SHA_CMD1 cpu_to_le32(0x21060000)
34 #define SHA_CMD2 cpu_to_le32(0xe0e63802)
36 /* SHA transform information */
37 #define SHA_TFM_HASH cpu_to_le32(0x2 << 0)
38 #define SHA_TFM_SIZE(x) cpu_to_le32((x) << 8)
39 #define SHA_TFM_START cpu_to_le32(0x1 << 4)
40 #define SHA_TFM_CONTINUE cpu_to_le32(0x1 << 5)
41 #define SHA_TFM_HASH_STORE cpu_to_le32(0x1 << 19)
42 #define SHA_TFM_SHA1 cpu_to_le32(0x2 << 23)
43 #define SHA_TFM_SHA256 cpu_to_le32(0x3 << 23)
44 #define SHA_TFM_SHA224 cpu_to_le32(0x4 << 23)
45 #define SHA_TFM_SHA512 cpu_to_le32(0x5 << 23)
46 #define SHA_TFM_SHA384 cpu_to_le32(0x6 << 23)
47 #define SHA_TFM_DIGEST(x) cpu_to_le32(((x) & GENMASK(3, 0)) << 24)
50 #define SHA_FLAGS_BUSY BIT(0)
51 #define SHA_FLAGS_FINAL BIT(1)
52 #define SHA_FLAGS_FINUP BIT(2)
53 #define SHA_FLAGS_SG BIT(3)
54 #define SHA_FLAGS_ALGO_MSK GENMASK(8, 4)
55 #define SHA_FLAGS_SHA1 BIT(4)
56 #define SHA_FLAGS_SHA224 BIT(5)
57 #define SHA_FLAGS_SHA256 BIT(6)
58 #define SHA_FLAGS_SHA384 BIT(7)
59 #define SHA_FLAGS_SHA512 BIT(8)
60 #define SHA_FLAGS_HMAC BIT(9)
61 #define SHA_FLAGS_PAD BIT(10)
64 * mtk_sha_info - hardware information of AES
65 * @cmd: command token, hardware instruction
66 * @tfm: transform state of cipher algorithm.
67 * @state: contains keys and initial vectors.
74 __le32 digest
[SHA_MAX_DIGEST_BUF_SIZE
];
77 struct mtk_sha_reqctx
{
78 struct mtk_sha_info info
;
92 struct scatterlist
*sg
;
93 u32 offset
; /* Offset in current sg */
94 u32 total
; /* Total request */
101 struct mtk_sha_hmac_ctx
{
102 struct crypto_shash
*shash
;
103 u8 ipad
[SHA512_BLOCK_SIZE
] __aligned(sizeof(u32
));
104 u8 opad
[SHA512_BLOCK_SIZE
] __aligned(sizeof(u32
));
108 struct mtk_cryp
*cryp
;
111 u8 buf
[SHA_BUF_SIZE
] __aligned(sizeof(u32
));
113 struct mtk_sha_hmac_ctx base
[0];
117 struct list_head dev_list
;
118 /* Device list lock */
122 static struct mtk_sha_drv mtk_sha
= {
123 .dev_list
= LIST_HEAD_INIT(mtk_sha
.dev_list
),
124 .lock
= __SPIN_LOCK_UNLOCKED(mtk_sha
.lock
),
127 static int mtk_sha_handle_queue(struct mtk_cryp
*cryp
, u8 id
,
128 struct ahash_request
*req
);
130 static inline u32
mtk_sha_read(struct mtk_cryp
*cryp
, u32 offset
)
132 return readl_relaxed(cryp
->base
+ offset
);
135 static inline void mtk_sha_write(struct mtk_cryp
*cryp
,
136 u32 offset
, u32 value
)
138 writel_relaxed(value
, cryp
->base
+ offset
);
141 static inline void mtk_sha_ring_shift(struct mtk_ring
*ring
,
142 struct mtk_desc
**cmd_curr
,
143 struct mtk_desc
**res_curr
,
146 *cmd_curr
= ring
->cmd_next
++;
147 *res_curr
= ring
->res_next
++;
150 if (ring
->cmd_next
== ring
->cmd_base
+ MTK_DESC_NUM
) {
151 ring
->cmd_next
= ring
->cmd_base
;
152 ring
->res_next
= ring
->res_base
;
156 static struct mtk_cryp
*mtk_sha_find_dev(struct mtk_sha_ctx
*tctx
)
158 struct mtk_cryp
*cryp
= NULL
;
159 struct mtk_cryp
*tmp
;
161 spin_lock_bh(&mtk_sha
.lock
);
163 list_for_each_entry(tmp
, &mtk_sha
.dev_list
, sha_list
) {
173 * Assign record id to tfm in round-robin fashion, and this
174 * will help tfm to bind to corresponding descriptor rings.
176 tctx
->id
= cryp
->rec
;
177 cryp
->rec
= !cryp
->rec
;
179 spin_unlock_bh(&mtk_sha
.lock
);
184 static int mtk_sha_append_sg(struct mtk_sha_reqctx
*ctx
)
188 while ((ctx
->bufcnt
< SHA_BUF_SIZE
) && ctx
->total
) {
189 count
= min(ctx
->sg
->length
- ctx
->offset
, ctx
->total
);
190 count
= min(count
, SHA_BUF_SIZE
- ctx
->bufcnt
);
194 * Check if count <= 0 because the buffer is full or
195 * because the sg length is 0. In the latest case,
196 * check if there is another sg in the list, a 0 length
197 * sg doesn't necessarily mean the end of the sg list.
199 if ((ctx
->sg
->length
== 0) && !sg_is_last(ctx
->sg
)) {
200 ctx
->sg
= sg_next(ctx
->sg
);
207 scatterwalk_map_and_copy(ctx
->buffer
+ ctx
->bufcnt
, ctx
->sg
,
208 ctx
->offset
, count
, 0);
210 ctx
->bufcnt
+= count
;
211 ctx
->offset
+= count
;
214 if (ctx
->offset
== ctx
->sg
->length
) {
215 ctx
->sg
= sg_next(ctx
->sg
);
227 * The purpose of this padding is to ensure that the padded message is a
228 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
229 * The bit "1" is appended at the end of the message followed by
230 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
231 * 128 bits block (SHA384/SHA512) equals to the message length in bits
234 * For SHA1/SHA224/SHA256, padlen is calculated as followed:
235 * - if message length < 56 bytes then padlen = 56 - message length
236 * - else padlen = 64 + 56 - message length
238 * For SHA384/SHA512, padlen is calculated as followed:
239 * - if message length < 112 bytes then padlen = 112 - message length
240 * - else padlen = 128 + 112 - message length
242 static void mtk_sha_fill_padding(struct mtk_sha_reqctx
*ctx
, u32 len
)
246 u64 size
= ctx
->digcnt
;
251 bits
[1] = cpu_to_be64(size
<< 3);
252 bits
[0] = cpu_to_be64(size
>> 61);
254 switch (ctx
->flags
& SHA_FLAGS_ALGO_MSK
) {
255 case SHA_FLAGS_SHA384
:
256 case SHA_FLAGS_SHA512
:
257 index
= ctx
->bufcnt
& 0x7f;
258 padlen
= (index
< 112) ? (112 - index
) : ((128 + 112) - index
);
259 *(ctx
->buffer
+ ctx
->bufcnt
) = 0x80;
260 memset(ctx
->buffer
+ ctx
->bufcnt
+ 1, 0, padlen
- 1);
261 memcpy(ctx
->buffer
+ ctx
->bufcnt
+ padlen
, bits
, 16);
262 ctx
->bufcnt
+= padlen
+ 16;
263 ctx
->flags
|= SHA_FLAGS_PAD
;
267 index
= ctx
->bufcnt
& 0x3f;
268 padlen
= (index
< 56) ? (56 - index
) : ((64 + 56) - index
);
269 *(ctx
->buffer
+ ctx
->bufcnt
) = 0x80;
270 memset(ctx
->buffer
+ ctx
->bufcnt
+ 1, 0, padlen
- 1);
271 memcpy(ctx
->buffer
+ ctx
->bufcnt
+ padlen
, &bits
[1], 8);
272 ctx
->bufcnt
+= padlen
+ 8;
273 ctx
->flags
|= SHA_FLAGS_PAD
;
278 /* Initialize basic transform information of SHA */
279 static void mtk_sha_info_init(struct mtk_sha_reqctx
*ctx
)
281 struct mtk_sha_info
*info
= &ctx
->info
;
283 ctx
->ct_hdr
= SHA_CT_CTRL_HDR
;
284 ctx
->ct_size
= SHA_CT_SIZE
;
286 info
->tfm
[0] = SHA_TFM_HASH
| SHA_TFM_SIZE(SIZE_IN_WORDS(ctx
->ds
));
288 switch (ctx
->flags
& SHA_FLAGS_ALGO_MSK
) {
290 info
->tfm
[0] |= SHA_TFM_SHA1
;
292 case SHA_FLAGS_SHA224
:
293 info
->tfm
[0] |= SHA_TFM_SHA224
;
295 case SHA_FLAGS_SHA256
:
296 info
->tfm
[0] |= SHA_TFM_SHA256
;
298 case SHA_FLAGS_SHA384
:
299 info
->tfm
[0] |= SHA_TFM_SHA384
;
301 case SHA_FLAGS_SHA512
:
302 info
->tfm
[0] |= SHA_TFM_SHA512
;
306 /* Should not happen... */
310 info
->tfm
[1] = SHA_TFM_HASH_STORE
;
311 info
->ctrl
[0] = info
->tfm
[0] | SHA_TFM_CONTINUE
| SHA_TFM_START
;
312 info
->ctrl
[1] = info
->tfm
[1];
314 info
->cmd
[0] = SHA_CMD0
;
315 info
->cmd
[1] = SHA_CMD1
;
316 info
->cmd
[2] = SHA_CMD2
| SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx
->ds
));
320 * Update input data length field of transform information and
321 * map it to DMA region.
323 static int mtk_sha_info_update(struct mtk_cryp
*cryp
,
324 struct mtk_sha_rec
*sha
,
325 size_t len1
, size_t len2
)
327 struct mtk_sha_reqctx
*ctx
= ahash_request_ctx(sha
->req
);
328 struct mtk_sha_info
*info
= &ctx
->info
;
330 ctx
->ct_hdr
&= ~SHA_DATA_LEN_MSK
;
331 ctx
->ct_hdr
|= cpu_to_le32(len1
+ len2
);
332 info
->cmd
[0] &= ~SHA_DATA_LEN_MSK
;
333 info
->cmd
[0] |= cpu_to_le32(len1
+ len2
);
335 /* Setting SHA_TFM_START only for the first iteration */
337 info
->ctrl
[0] &= ~SHA_TFM_START
;
341 ctx
->ct_dma
= dma_map_single(cryp
->dev
, info
, sizeof(*info
),
343 if (unlikely(dma_mapping_error(cryp
->dev
, ctx
->ct_dma
))) {
344 dev_err(cryp
->dev
, "dma %zu bytes error\n", sizeof(*info
));
348 ctx
->tfm_dma
= ctx
->ct_dma
+ sizeof(info
->ctrl
) + sizeof(info
->cmd
);
354 * Because of hardware limitation, we must pre-calculate the inner
355 * and outer digest that need to be processed firstly by engine, then
356 * apply the result digest to the input message. These complex hashing
357 * procedures limits HMAC performance, so we use fallback SW encoding.
359 static int mtk_sha_finish_hmac(struct ahash_request
*req
)
361 struct mtk_sha_ctx
*tctx
= crypto_tfm_ctx(req
->base
.tfm
);
362 struct mtk_sha_hmac_ctx
*bctx
= tctx
->base
;
363 struct mtk_sha_reqctx
*ctx
= ahash_request_ctx(req
);
365 SHASH_DESC_ON_STACK(shash
, bctx
->shash
);
367 shash
->tfm
= bctx
->shash
;
368 shash
->flags
= 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
370 return crypto_shash_init(shash
) ?:
371 crypto_shash_update(shash
, bctx
->opad
, ctx
->bs
) ?:
372 crypto_shash_finup(shash
, req
->result
, ctx
->ds
, req
->result
);
375 /* Initialize request context */
376 static int mtk_sha_init(struct ahash_request
*req
)
378 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
379 struct mtk_sha_ctx
*tctx
= crypto_ahash_ctx(tfm
);
380 struct mtk_sha_reqctx
*ctx
= ahash_request_ctx(req
);
383 ctx
->ds
= crypto_ahash_digestsize(tfm
);
386 case SHA1_DIGEST_SIZE
:
387 ctx
->flags
|= SHA_FLAGS_SHA1
;
388 ctx
->bs
= SHA1_BLOCK_SIZE
;
390 case SHA224_DIGEST_SIZE
:
391 ctx
->flags
|= SHA_FLAGS_SHA224
;
392 ctx
->bs
= SHA224_BLOCK_SIZE
;
394 case SHA256_DIGEST_SIZE
:
395 ctx
->flags
|= SHA_FLAGS_SHA256
;
396 ctx
->bs
= SHA256_BLOCK_SIZE
;
398 case SHA384_DIGEST_SIZE
:
399 ctx
->flags
|= SHA_FLAGS_SHA384
;
400 ctx
->bs
= SHA384_BLOCK_SIZE
;
402 case SHA512_DIGEST_SIZE
:
403 ctx
->flags
|= SHA_FLAGS_SHA512
;
404 ctx
->bs
= SHA512_BLOCK_SIZE
;
412 ctx
->buffer
= tctx
->buf
;
414 if (tctx
->flags
& SHA_FLAGS_HMAC
) {
415 struct mtk_sha_hmac_ctx
*bctx
= tctx
->base
;
417 memcpy(ctx
->buffer
, bctx
->ipad
, ctx
->bs
);
418 ctx
->bufcnt
= ctx
->bs
;
419 ctx
->flags
|= SHA_FLAGS_HMAC
;
425 static int mtk_sha_xmit(struct mtk_cryp
*cryp
, struct mtk_sha_rec
*sha
,
426 dma_addr_t addr1
, size_t len1
,
427 dma_addr_t addr2
, size_t len2
)
429 struct mtk_sha_reqctx
*ctx
= ahash_request_ctx(sha
->req
);
430 struct mtk_ring
*ring
= cryp
->ring
[sha
->id
];
431 struct mtk_desc
*cmd
, *res
;
434 err
= mtk_sha_info_update(cryp
, sha
, len1
, len2
);
438 /* Fill in the command/result descriptors */
439 mtk_sha_ring_shift(ring
, &cmd
, &res
, &count
);
441 res
->hdr
= MTK_DESC_FIRST
| MTK_DESC_BUF_LEN(len1
);
442 cmd
->hdr
= MTK_DESC_FIRST
| MTK_DESC_BUF_LEN(len1
) |
443 MTK_DESC_CT_LEN(ctx
->ct_size
);
444 cmd
->buf
= cpu_to_le32(addr1
);
445 cmd
->ct
= cpu_to_le32(ctx
->ct_dma
);
446 cmd
->ct_hdr
= ctx
->ct_hdr
;
447 cmd
->tfm
= cpu_to_le32(ctx
->tfm_dma
);
450 mtk_sha_ring_shift(ring
, &cmd
, &res
, &count
);
452 res
->hdr
= MTK_DESC_BUF_LEN(len2
);
453 cmd
->hdr
= MTK_DESC_BUF_LEN(len2
);
454 cmd
->buf
= cpu_to_le32(addr2
);
457 cmd
->hdr
|= MTK_DESC_LAST
;
458 res
->hdr
|= MTK_DESC_LAST
;
461 * Make sure that all changes to the DMA ring are done before we
465 /* Start DMA transfer */
466 mtk_sha_write(cryp
, RDR_PREP_COUNT(sha
->id
), MTK_DESC_CNT(count
));
467 mtk_sha_write(cryp
, CDR_PREP_COUNT(sha
->id
), MTK_DESC_CNT(count
));
472 static int mtk_sha_dma_map(struct mtk_cryp
*cryp
,
473 struct mtk_sha_rec
*sha
,
474 struct mtk_sha_reqctx
*ctx
,
477 ctx
->dma_addr
= dma_map_single(cryp
->dev
, ctx
->buffer
,
478 SHA_BUF_SIZE
, DMA_TO_DEVICE
);
479 if (unlikely(dma_mapping_error(cryp
->dev
, ctx
->dma_addr
))) {
480 dev_err(cryp
->dev
, "dma map error\n");
484 ctx
->flags
&= ~SHA_FLAGS_SG
;
486 return mtk_sha_xmit(cryp
, sha
, ctx
->dma_addr
, count
, 0, 0);
489 static int mtk_sha_update_slow(struct mtk_cryp
*cryp
,
490 struct mtk_sha_rec
*sha
)
492 struct mtk_sha_reqctx
*ctx
= ahash_request_ctx(sha
->req
);
496 mtk_sha_append_sg(ctx
);
498 final
= (ctx
->flags
& SHA_FLAGS_FINUP
) && !ctx
->total
;
500 dev_dbg(cryp
->dev
, "slow: bufcnt: %zu\n", ctx
->bufcnt
);
503 sha
->flags
|= SHA_FLAGS_FINAL
;
504 mtk_sha_fill_padding(ctx
, 0);
507 if (final
|| (ctx
->bufcnt
== SHA_BUF_SIZE
&& ctx
->total
)) {
511 return mtk_sha_dma_map(cryp
, sha
, ctx
, count
);
516 static int mtk_sha_update_start(struct mtk_cryp
*cryp
,
517 struct mtk_sha_rec
*sha
)
519 struct mtk_sha_reqctx
*ctx
= ahash_request_ctx(sha
->req
);
520 u32 len
, final
, tail
;
521 struct scatterlist
*sg
;
526 if (ctx
->bufcnt
|| ctx
->offset
)
527 return mtk_sha_update_slow(cryp
, sha
);
531 if (!IS_ALIGNED(sg
->offset
, sizeof(u32
)))
532 return mtk_sha_update_slow(cryp
, sha
);
534 if (!sg_is_last(sg
) && !IS_ALIGNED(sg
->length
, ctx
->bs
))
535 /* size is not ctx->bs aligned */
536 return mtk_sha_update_slow(cryp
, sha
);
538 len
= min(ctx
->total
, sg
->length
);
540 if (sg_is_last(sg
)) {
541 if (!(ctx
->flags
& SHA_FLAGS_FINUP
)) {
542 /* not last sg must be ctx->bs aligned */
543 tail
= len
& (ctx
->bs
- 1);
549 ctx
->offset
= len
; /* offset where to start slow */
551 final
= (ctx
->flags
& SHA_FLAGS_FINUP
) && !ctx
->total
;
557 tail
= len
& (ctx
->bs
- 1);
560 ctx
->offset
= len
; /* offset where to start slow */
563 mtk_sha_append_sg(ctx
);
564 mtk_sha_fill_padding(ctx
, len
);
566 ctx
->dma_addr
= dma_map_single(cryp
->dev
, ctx
->buffer
,
567 SHA_BUF_SIZE
, DMA_TO_DEVICE
);
568 if (unlikely(dma_mapping_error(cryp
->dev
, ctx
->dma_addr
))) {
569 dev_err(cryp
->dev
, "dma map bytes error\n");
573 sha
->flags
|= SHA_FLAGS_FINAL
;
578 ctx
->flags
&= ~SHA_FLAGS_SG
;
579 return mtk_sha_xmit(cryp
, sha
, ctx
->dma_addr
,
584 if (!dma_map_sg(cryp
->dev
, ctx
->sg
, 1, DMA_TO_DEVICE
)) {
585 dev_err(cryp
->dev
, "dma_map_sg error\n");
589 ctx
->flags
|= SHA_FLAGS_SG
;
590 return mtk_sha_xmit(cryp
, sha
, sg_dma_address(ctx
->sg
),
591 len
, ctx
->dma_addr
, count
);
595 if (!dma_map_sg(cryp
->dev
, ctx
->sg
, 1, DMA_TO_DEVICE
)) {
596 dev_err(cryp
->dev
, "dma_map_sg error\n");
600 ctx
->flags
|= SHA_FLAGS_SG
;
602 return mtk_sha_xmit(cryp
, sha
, sg_dma_address(ctx
->sg
),
606 static int mtk_sha_final_req(struct mtk_cryp
*cryp
,
607 struct mtk_sha_rec
*sha
)
609 struct mtk_sha_reqctx
*ctx
= ahash_request_ctx(sha
->req
);
612 mtk_sha_fill_padding(ctx
, 0);
614 sha
->flags
|= SHA_FLAGS_FINAL
;
618 return mtk_sha_dma_map(cryp
, sha
, ctx
, count
);
621 /* Copy ready hash (+ finalize hmac) */
622 static int mtk_sha_finish(struct ahash_request
*req
)
624 struct mtk_sha_reqctx
*ctx
= ahash_request_ctx(req
);
625 __le32
*digest
= ctx
->info
.digest
;
626 u32
*result
= (u32
*)req
->result
;
629 /* Get the hash from the digest buffer */
630 for (i
= 0; i
< SIZE_IN_WORDS(ctx
->ds
); i
++)
631 result
[i
] = le32_to_cpu(digest
[i
]);
633 if (ctx
->flags
& SHA_FLAGS_HMAC
)
634 return mtk_sha_finish_hmac(req
);
639 static void mtk_sha_finish_req(struct mtk_cryp
*cryp
,
640 struct mtk_sha_rec
*sha
,
643 if (likely(!err
&& (SHA_FLAGS_FINAL
& sha
->flags
)))
644 err
= mtk_sha_finish(sha
->req
);
646 sha
->flags
&= ~(SHA_FLAGS_BUSY
| SHA_FLAGS_FINAL
);
648 sha
->req
->base
.complete(&sha
->req
->base
, err
);
650 /* Handle new request */
651 tasklet_schedule(&sha
->queue_task
);
654 static int mtk_sha_handle_queue(struct mtk_cryp
*cryp
, u8 id
,
655 struct ahash_request
*req
)
657 struct mtk_sha_rec
*sha
= cryp
->sha
[id
];
658 struct crypto_async_request
*async_req
, *backlog
;
659 struct mtk_sha_reqctx
*ctx
;
661 int err
= 0, ret
= 0;
663 spin_lock_irqsave(&sha
->lock
, flags
);
665 ret
= ahash_enqueue_request(&sha
->queue
, req
);
667 if (SHA_FLAGS_BUSY
& sha
->flags
) {
668 spin_unlock_irqrestore(&sha
->lock
, flags
);
672 backlog
= crypto_get_backlog(&sha
->queue
);
673 async_req
= crypto_dequeue_request(&sha
->queue
);
675 sha
->flags
|= SHA_FLAGS_BUSY
;
676 spin_unlock_irqrestore(&sha
->lock
, flags
);
682 backlog
->complete(backlog
, -EINPROGRESS
);
684 req
= ahash_request_cast(async_req
);
685 ctx
= ahash_request_ctx(req
);
689 mtk_sha_info_init(ctx
);
691 if (ctx
->op
== SHA_OP_UPDATE
) {
692 err
= mtk_sha_update_start(cryp
, sha
);
693 if (err
!= -EINPROGRESS
&& (ctx
->flags
& SHA_FLAGS_FINUP
))
694 /* No final() after finup() */
695 err
= mtk_sha_final_req(cryp
, sha
);
696 } else if (ctx
->op
== SHA_OP_FINAL
) {
697 err
= mtk_sha_final_req(cryp
, sha
);
700 if (unlikely(err
!= -EINPROGRESS
))
701 /* Task will not finish it, so do it here */
702 mtk_sha_finish_req(cryp
, sha
, err
);
707 static int mtk_sha_enqueue(struct ahash_request
*req
, u32 op
)
709 struct mtk_sha_reqctx
*ctx
= ahash_request_ctx(req
);
710 struct mtk_sha_ctx
*tctx
= crypto_tfm_ctx(req
->base
.tfm
);
714 return mtk_sha_handle_queue(tctx
->cryp
, tctx
->id
, req
);
717 static void mtk_sha_unmap(struct mtk_cryp
*cryp
, struct mtk_sha_rec
*sha
)
719 struct mtk_sha_reqctx
*ctx
= ahash_request_ctx(sha
->req
);
721 dma_unmap_single(cryp
->dev
, ctx
->ct_dma
, sizeof(ctx
->info
),
724 if (ctx
->flags
& SHA_FLAGS_SG
) {
725 dma_unmap_sg(cryp
->dev
, ctx
->sg
, 1, DMA_TO_DEVICE
);
726 if (ctx
->sg
->length
== ctx
->offset
) {
727 ctx
->sg
= sg_next(ctx
->sg
);
731 if (ctx
->flags
& SHA_FLAGS_PAD
) {
732 dma_unmap_single(cryp
->dev
, ctx
->dma_addr
,
733 SHA_BUF_SIZE
, DMA_TO_DEVICE
);
736 dma_unmap_single(cryp
->dev
, ctx
->dma_addr
,
737 SHA_BUF_SIZE
, DMA_TO_DEVICE
);
740 static void mtk_sha_complete(struct mtk_cryp
*cryp
,
741 struct mtk_sha_rec
*sha
)
745 err
= mtk_sha_update_start(cryp
, sha
);
746 if (err
!= -EINPROGRESS
)
747 mtk_sha_finish_req(cryp
, sha
, err
);
750 static int mtk_sha_update(struct ahash_request
*req
)
752 struct mtk_sha_reqctx
*ctx
= ahash_request_ctx(req
);
754 ctx
->total
= req
->nbytes
;
758 if ((ctx
->bufcnt
+ ctx
->total
< SHA_BUF_SIZE
) &&
759 !(ctx
->flags
& SHA_FLAGS_FINUP
))
760 return mtk_sha_append_sg(ctx
);
762 return mtk_sha_enqueue(req
, SHA_OP_UPDATE
);
765 static int mtk_sha_final(struct ahash_request
*req
)
767 struct mtk_sha_reqctx
*ctx
= ahash_request_ctx(req
);
769 ctx
->flags
|= SHA_FLAGS_FINUP
;
771 if (ctx
->flags
& SHA_FLAGS_PAD
)
772 return mtk_sha_finish(req
);
774 return mtk_sha_enqueue(req
, SHA_OP_FINAL
);
777 static int mtk_sha_finup(struct ahash_request
*req
)
779 struct mtk_sha_reqctx
*ctx
= ahash_request_ctx(req
);
782 ctx
->flags
|= SHA_FLAGS_FINUP
;
784 err1
= mtk_sha_update(req
);
785 if (err1
== -EINPROGRESS
|| err1
== -EBUSY
)
788 * final() has to be always called to cleanup resources
789 * even if update() failed
791 err2
= mtk_sha_final(req
);
796 static int mtk_sha_digest(struct ahash_request
*req
)
798 return mtk_sha_init(req
) ?: mtk_sha_finup(req
);
801 static int mtk_sha_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
804 struct mtk_sha_ctx
*tctx
= crypto_ahash_ctx(tfm
);
805 struct mtk_sha_hmac_ctx
*bctx
= tctx
->base
;
806 size_t bs
= crypto_shash_blocksize(bctx
->shash
);
807 size_t ds
= crypto_shash_digestsize(bctx
->shash
);
810 SHASH_DESC_ON_STACK(shash
, bctx
->shash
);
812 shash
->tfm
= bctx
->shash
;
813 shash
->flags
= crypto_shash_get_flags(bctx
->shash
) &
814 CRYPTO_TFM_REQ_MAY_SLEEP
;
817 err
= crypto_shash_digest(shash
, key
, keylen
, bctx
->ipad
);
822 memcpy(bctx
->ipad
, key
, keylen
);
825 memset(bctx
->ipad
+ keylen
, 0, bs
- keylen
);
826 memcpy(bctx
->opad
, bctx
->ipad
, bs
);
828 for (i
= 0; i
< bs
; i
++) {
829 bctx
->ipad
[i
] ^= HMAC_IPAD_VALUE
;
830 bctx
->opad
[i
] ^= HMAC_OPAD_VALUE
;
836 static int mtk_sha_export(struct ahash_request
*req
, void *out
)
838 const struct mtk_sha_reqctx
*ctx
= ahash_request_ctx(req
);
840 memcpy(out
, ctx
, sizeof(*ctx
));
844 static int mtk_sha_import(struct ahash_request
*req
, const void *in
)
846 struct mtk_sha_reqctx
*ctx
= ahash_request_ctx(req
);
848 memcpy(ctx
, in
, sizeof(*ctx
));
852 static int mtk_sha_cra_init_alg(struct crypto_tfm
*tfm
,
853 const char *alg_base
)
855 struct mtk_sha_ctx
*tctx
= crypto_tfm_ctx(tfm
);
856 struct mtk_cryp
*cryp
= NULL
;
858 cryp
= mtk_sha_find_dev(tctx
);
862 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
863 sizeof(struct mtk_sha_reqctx
));
866 struct mtk_sha_hmac_ctx
*bctx
= tctx
->base
;
868 tctx
->flags
|= SHA_FLAGS_HMAC
;
869 bctx
->shash
= crypto_alloc_shash(alg_base
, 0,
870 CRYPTO_ALG_NEED_FALLBACK
);
871 if (IS_ERR(bctx
->shash
)) {
872 pr_err("base driver %s could not be loaded.\n",
875 return PTR_ERR(bctx
->shash
);
881 static int mtk_sha_cra_init(struct crypto_tfm
*tfm
)
883 return mtk_sha_cra_init_alg(tfm
, NULL
);
886 static int mtk_sha_cra_sha1_init(struct crypto_tfm
*tfm
)
888 return mtk_sha_cra_init_alg(tfm
, "sha1");
891 static int mtk_sha_cra_sha224_init(struct crypto_tfm
*tfm
)
893 return mtk_sha_cra_init_alg(tfm
, "sha224");
896 static int mtk_sha_cra_sha256_init(struct crypto_tfm
*tfm
)
898 return mtk_sha_cra_init_alg(tfm
, "sha256");
901 static int mtk_sha_cra_sha384_init(struct crypto_tfm
*tfm
)
903 return mtk_sha_cra_init_alg(tfm
, "sha384");
906 static int mtk_sha_cra_sha512_init(struct crypto_tfm
*tfm
)
908 return mtk_sha_cra_init_alg(tfm
, "sha512");
911 static void mtk_sha_cra_exit(struct crypto_tfm
*tfm
)
913 struct mtk_sha_ctx
*tctx
= crypto_tfm_ctx(tfm
);
915 if (tctx
->flags
& SHA_FLAGS_HMAC
) {
916 struct mtk_sha_hmac_ctx
*bctx
= tctx
->base
;
918 crypto_free_shash(bctx
->shash
);
922 static struct ahash_alg algs_sha1_sha224_sha256
[] = {
924 .init
= mtk_sha_init
,
925 .update
= mtk_sha_update
,
926 .final
= mtk_sha_final
,
927 .finup
= mtk_sha_finup
,
928 .digest
= mtk_sha_digest
,
929 .export
= mtk_sha_export
,
930 .import
= mtk_sha_import
,
931 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
932 .halg
.statesize
= sizeof(struct mtk_sha_reqctx
),
935 .cra_driver_name
= "mtk-sha1",
937 .cra_flags
= CRYPTO_ALG_ASYNC
,
938 .cra_blocksize
= SHA1_BLOCK_SIZE
,
939 .cra_ctxsize
= sizeof(struct mtk_sha_ctx
),
940 .cra_alignmask
= SHA_ALIGN_MSK
,
941 .cra_module
= THIS_MODULE
,
942 .cra_init
= mtk_sha_cra_init
,
943 .cra_exit
= mtk_sha_cra_exit
,
947 .init
= mtk_sha_init
,
948 .update
= mtk_sha_update
,
949 .final
= mtk_sha_final
,
950 .finup
= mtk_sha_finup
,
951 .digest
= mtk_sha_digest
,
952 .export
= mtk_sha_export
,
953 .import
= mtk_sha_import
,
954 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
955 .halg
.statesize
= sizeof(struct mtk_sha_reqctx
),
957 .cra_name
= "sha224",
958 .cra_driver_name
= "mtk-sha224",
960 .cra_flags
= CRYPTO_ALG_ASYNC
,
961 .cra_blocksize
= SHA224_BLOCK_SIZE
,
962 .cra_ctxsize
= sizeof(struct mtk_sha_ctx
),
963 .cra_alignmask
= SHA_ALIGN_MSK
,
964 .cra_module
= THIS_MODULE
,
965 .cra_init
= mtk_sha_cra_init
,
966 .cra_exit
= mtk_sha_cra_exit
,
970 .init
= mtk_sha_init
,
971 .update
= mtk_sha_update
,
972 .final
= mtk_sha_final
,
973 .finup
= mtk_sha_finup
,
974 .digest
= mtk_sha_digest
,
975 .export
= mtk_sha_export
,
976 .import
= mtk_sha_import
,
977 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
978 .halg
.statesize
= sizeof(struct mtk_sha_reqctx
),
980 .cra_name
= "sha256",
981 .cra_driver_name
= "mtk-sha256",
983 .cra_flags
= CRYPTO_ALG_ASYNC
,
984 .cra_blocksize
= SHA256_BLOCK_SIZE
,
985 .cra_ctxsize
= sizeof(struct mtk_sha_ctx
),
986 .cra_alignmask
= SHA_ALIGN_MSK
,
987 .cra_module
= THIS_MODULE
,
988 .cra_init
= mtk_sha_cra_init
,
989 .cra_exit
= mtk_sha_cra_exit
,
993 .init
= mtk_sha_init
,
994 .update
= mtk_sha_update
,
995 .final
= mtk_sha_final
,
996 .finup
= mtk_sha_finup
,
997 .digest
= mtk_sha_digest
,
998 .export
= mtk_sha_export
,
999 .import
= mtk_sha_import
,
1000 .setkey
= mtk_sha_setkey
,
1001 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
1002 .halg
.statesize
= sizeof(struct mtk_sha_reqctx
),
1004 .cra_name
= "hmac(sha1)",
1005 .cra_driver_name
= "mtk-hmac-sha1",
1006 .cra_priority
= 400,
1007 .cra_flags
= CRYPTO_ALG_ASYNC
|
1008 CRYPTO_ALG_NEED_FALLBACK
,
1009 .cra_blocksize
= SHA1_BLOCK_SIZE
,
1010 .cra_ctxsize
= sizeof(struct mtk_sha_ctx
) +
1011 sizeof(struct mtk_sha_hmac_ctx
),
1012 .cra_alignmask
= SHA_ALIGN_MSK
,
1013 .cra_module
= THIS_MODULE
,
1014 .cra_init
= mtk_sha_cra_sha1_init
,
1015 .cra_exit
= mtk_sha_cra_exit
,
1019 .init
= mtk_sha_init
,
1020 .update
= mtk_sha_update
,
1021 .final
= mtk_sha_final
,
1022 .finup
= mtk_sha_finup
,
1023 .digest
= mtk_sha_digest
,
1024 .export
= mtk_sha_export
,
1025 .import
= mtk_sha_import
,
1026 .setkey
= mtk_sha_setkey
,
1027 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
1028 .halg
.statesize
= sizeof(struct mtk_sha_reqctx
),
1030 .cra_name
= "hmac(sha224)",
1031 .cra_driver_name
= "mtk-hmac-sha224",
1032 .cra_priority
= 400,
1033 .cra_flags
= CRYPTO_ALG_ASYNC
|
1034 CRYPTO_ALG_NEED_FALLBACK
,
1035 .cra_blocksize
= SHA224_BLOCK_SIZE
,
1036 .cra_ctxsize
= sizeof(struct mtk_sha_ctx
) +
1037 sizeof(struct mtk_sha_hmac_ctx
),
1038 .cra_alignmask
= SHA_ALIGN_MSK
,
1039 .cra_module
= THIS_MODULE
,
1040 .cra_init
= mtk_sha_cra_sha224_init
,
1041 .cra_exit
= mtk_sha_cra_exit
,
1045 .init
= mtk_sha_init
,
1046 .update
= mtk_sha_update
,
1047 .final
= mtk_sha_final
,
1048 .finup
= mtk_sha_finup
,
1049 .digest
= mtk_sha_digest
,
1050 .export
= mtk_sha_export
,
1051 .import
= mtk_sha_import
,
1052 .setkey
= mtk_sha_setkey
,
1053 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
1054 .halg
.statesize
= sizeof(struct mtk_sha_reqctx
),
1056 .cra_name
= "hmac(sha256)",
1057 .cra_driver_name
= "mtk-hmac-sha256",
1058 .cra_priority
= 400,
1059 .cra_flags
= CRYPTO_ALG_ASYNC
|
1060 CRYPTO_ALG_NEED_FALLBACK
,
1061 .cra_blocksize
= SHA256_BLOCK_SIZE
,
1062 .cra_ctxsize
= sizeof(struct mtk_sha_ctx
) +
1063 sizeof(struct mtk_sha_hmac_ctx
),
1064 .cra_alignmask
= SHA_ALIGN_MSK
,
1065 .cra_module
= THIS_MODULE
,
1066 .cra_init
= mtk_sha_cra_sha256_init
,
1067 .cra_exit
= mtk_sha_cra_exit
,
1072 static struct ahash_alg algs_sha384_sha512
[] = {
1074 .init
= mtk_sha_init
,
1075 .update
= mtk_sha_update
,
1076 .final
= mtk_sha_final
,
1077 .finup
= mtk_sha_finup
,
1078 .digest
= mtk_sha_digest
,
1079 .export
= mtk_sha_export
,
1080 .import
= mtk_sha_import
,
1081 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
1082 .halg
.statesize
= sizeof(struct mtk_sha_reqctx
),
1084 .cra_name
= "sha384",
1085 .cra_driver_name
= "mtk-sha384",
1086 .cra_priority
= 400,
1087 .cra_flags
= CRYPTO_ALG_ASYNC
,
1088 .cra_blocksize
= SHA384_BLOCK_SIZE
,
1089 .cra_ctxsize
= sizeof(struct mtk_sha_ctx
),
1090 .cra_alignmask
= SHA_ALIGN_MSK
,
1091 .cra_module
= THIS_MODULE
,
1092 .cra_init
= mtk_sha_cra_init
,
1093 .cra_exit
= mtk_sha_cra_exit
,
1097 .init
= mtk_sha_init
,
1098 .update
= mtk_sha_update
,
1099 .final
= mtk_sha_final
,
1100 .finup
= mtk_sha_finup
,
1101 .digest
= mtk_sha_digest
,
1102 .export
= mtk_sha_export
,
1103 .import
= mtk_sha_import
,
1104 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
1105 .halg
.statesize
= sizeof(struct mtk_sha_reqctx
),
1107 .cra_name
= "sha512",
1108 .cra_driver_name
= "mtk-sha512",
1109 .cra_priority
= 400,
1110 .cra_flags
= CRYPTO_ALG_ASYNC
,
1111 .cra_blocksize
= SHA512_BLOCK_SIZE
,
1112 .cra_ctxsize
= sizeof(struct mtk_sha_ctx
),
1113 .cra_alignmask
= SHA_ALIGN_MSK
,
1114 .cra_module
= THIS_MODULE
,
1115 .cra_init
= mtk_sha_cra_init
,
1116 .cra_exit
= mtk_sha_cra_exit
,
1120 .init
= mtk_sha_init
,
1121 .update
= mtk_sha_update
,
1122 .final
= mtk_sha_final
,
1123 .finup
= mtk_sha_finup
,
1124 .digest
= mtk_sha_digest
,
1125 .export
= mtk_sha_export
,
1126 .import
= mtk_sha_import
,
1127 .setkey
= mtk_sha_setkey
,
1128 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
1129 .halg
.statesize
= sizeof(struct mtk_sha_reqctx
),
1131 .cra_name
= "hmac(sha384)",
1132 .cra_driver_name
= "mtk-hmac-sha384",
1133 .cra_priority
= 400,
1134 .cra_flags
= CRYPTO_ALG_ASYNC
|
1135 CRYPTO_ALG_NEED_FALLBACK
,
1136 .cra_blocksize
= SHA384_BLOCK_SIZE
,
1137 .cra_ctxsize
= sizeof(struct mtk_sha_ctx
) +
1138 sizeof(struct mtk_sha_hmac_ctx
),
1139 .cra_alignmask
= SHA_ALIGN_MSK
,
1140 .cra_module
= THIS_MODULE
,
1141 .cra_init
= mtk_sha_cra_sha384_init
,
1142 .cra_exit
= mtk_sha_cra_exit
,
1146 .init
= mtk_sha_init
,
1147 .update
= mtk_sha_update
,
1148 .final
= mtk_sha_final
,
1149 .finup
= mtk_sha_finup
,
1150 .digest
= mtk_sha_digest
,
1151 .export
= mtk_sha_export
,
1152 .import
= mtk_sha_import
,
1153 .setkey
= mtk_sha_setkey
,
1154 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
1155 .halg
.statesize
= sizeof(struct mtk_sha_reqctx
),
1157 .cra_name
= "hmac(sha512)",
1158 .cra_driver_name
= "mtk-hmac-sha512",
1159 .cra_priority
= 400,
1160 .cra_flags
= CRYPTO_ALG_ASYNC
|
1161 CRYPTO_ALG_NEED_FALLBACK
,
1162 .cra_blocksize
= SHA512_BLOCK_SIZE
,
1163 .cra_ctxsize
= sizeof(struct mtk_sha_ctx
) +
1164 sizeof(struct mtk_sha_hmac_ctx
),
1165 .cra_alignmask
= SHA_ALIGN_MSK
,
1166 .cra_module
= THIS_MODULE
,
1167 .cra_init
= mtk_sha_cra_sha512_init
,
1168 .cra_exit
= mtk_sha_cra_exit
,
1173 static void mtk_sha_queue_task(unsigned long data
)
1175 struct mtk_sha_rec
*sha
= (struct mtk_sha_rec
*)data
;
1177 mtk_sha_handle_queue(sha
->cryp
, sha
->id
- MTK_RING2
, NULL
);
1180 static void mtk_sha_done_task(unsigned long data
)
1182 struct mtk_sha_rec
*sha
= (struct mtk_sha_rec
*)data
;
1183 struct mtk_cryp
*cryp
= sha
->cryp
;
1185 mtk_sha_unmap(cryp
, sha
);
1186 mtk_sha_complete(cryp
, sha
);
1189 static irqreturn_t
mtk_sha_irq(int irq
, void *dev_id
)
1191 struct mtk_sha_rec
*sha
= (struct mtk_sha_rec
*)dev_id
;
1192 struct mtk_cryp
*cryp
= sha
->cryp
;
1193 u32 val
= mtk_sha_read(cryp
, RDR_STAT(sha
->id
));
1195 mtk_sha_write(cryp
, RDR_STAT(sha
->id
), val
);
1197 if (likely((SHA_FLAGS_BUSY
& sha
->flags
))) {
1198 mtk_sha_write(cryp
, RDR_PROC_COUNT(sha
->id
), MTK_CNT_RST
);
1199 mtk_sha_write(cryp
, RDR_THRESH(sha
->id
),
1200 MTK_RDR_PROC_THRESH
| MTK_RDR_PROC_MODE
);
1202 tasklet_schedule(&sha
->done_task
);
1204 dev_warn(cryp
->dev
, "SHA interrupt when no active requests.\n");
1210 * The purpose of two SHA records is used to get extra performance.
1211 * It is similar to mtk_aes_record_init().
1213 static int mtk_sha_record_init(struct mtk_cryp
*cryp
)
1215 struct mtk_sha_rec
**sha
= cryp
->sha
;
1216 int i
, err
= -ENOMEM
;
1218 for (i
= 0; i
< MTK_REC_NUM
; i
++) {
1219 sha
[i
] = kzalloc(sizeof(**sha
), GFP_KERNEL
);
1223 sha
[i
]->cryp
= cryp
;
1225 spin_lock_init(&sha
[i
]->lock
);
1226 crypto_init_queue(&sha
[i
]->queue
, SHA_QUEUE_SIZE
);
1228 tasklet_init(&sha
[i
]->queue_task
, mtk_sha_queue_task
,
1229 (unsigned long)sha
[i
]);
1230 tasklet_init(&sha
[i
]->done_task
, mtk_sha_done_task
,
1231 (unsigned long)sha
[i
]);
1234 /* Link to ring2 and ring3 respectively */
1235 sha
[0]->id
= MTK_RING2
;
1236 sha
[1]->id
= MTK_RING3
;
1248 static void mtk_sha_record_free(struct mtk_cryp
*cryp
)
1252 for (i
= 0; i
< MTK_REC_NUM
; i
++) {
1253 tasklet_kill(&cryp
->sha
[i
]->done_task
);
1254 tasklet_kill(&cryp
->sha
[i
]->queue_task
);
1256 kfree(cryp
->sha
[i
]);
1260 static void mtk_sha_unregister_algs(void)
1264 for (i
= 0; i
< ARRAY_SIZE(algs_sha1_sha224_sha256
); i
++)
1265 crypto_unregister_ahash(&algs_sha1_sha224_sha256
[i
]);
1267 for (i
= 0; i
< ARRAY_SIZE(algs_sha384_sha512
); i
++)
1268 crypto_unregister_ahash(&algs_sha384_sha512
[i
]);
1271 static int mtk_sha_register_algs(void)
1275 for (i
= 0; i
< ARRAY_SIZE(algs_sha1_sha224_sha256
); i
++) {
1276 err
= crypto_register_ahash(&algs_sha1_sha224_sha256
[i
]);
1278 goto err_sha_224_256_algs
;
1281 for (i
= 0; i
< ARRAY_SIZE(algs_sha384_sha512
); i
++) {
1282 err
= crypto_register_ahash(&algs_sha384_sha512
[i
]);
1284 goto err_sha_384_512_algs
;
1289 err_sha_384_512_algs
:
1291 crypto_unregister_ahash(&algs_sha384_sha512
[i
]);
1292 i
= ARRAY_SIZE(algs_sha1_sha224_sha256
);
1293 err_sha_224_256_algs
:
1295 crypto_unregister_ahash(&algs_sha1_sha224_sha256
[i
]);
1300 int mtk_hash_alg_register(struct mtk_cryp
*cryp
)
1304 INIT_LIST_HEAD(&cryp
->sha_list
);
1306 /* Initialize two hash records */
1307 err
= mtk_sha_record_init(cryp
);
1311 err
= devm_request_irq(cryp
->dev
, cryp
->irq
[MTK_RING2
], mtk_sha_irq
,
1312 0, "mtk-sha", cryp
->sha
[0]);
1314 dev_err(cryp
->dev
, "unable to request sha irq0.\n");
1318 err
= devm_request_irq(cryp
->dev
, cryp
->irq
[MTK_RING3
], mtk_sha_irq
,
1319 0, "mtk-sha", cryp
->sha
[1]);
1321 dev_err(cryp
->dev
, "unable to request sha irq1.\n");
1325 /* Enable ring2 and ring3 interrupt for hash */
1326 mtk_sha_write(cryp
, AIC_ENABLE_SET(MTK_RING2
), MTK_IRQ_RDR2
);
1327 mtk_sha_write(cryp
, AIC_ENABLE_SET(MTK_RING3
), MTK_IRQ_RDR3
);
1329 spin_lock(&mtk_sha
.lock
);
1330 list_add_tail(&cryp
->sha_list
, &mtk_sha
.dev_list
);
1331 spin_unlock(&mtk_sha
.lock
);
1333 err
= mtk_sha_register_algs();
1340 spin_lock(&mtk_sha
.lock
);
1341 list_del(&cryp
->sha_list
);
1342 spin_unlock(&mtk_sha
.lock
);
1344 mtk_sha_record_free(cryp
);
1347 dev_err(cryp
->dev
, "mtk-sha initialization failed.\n");
1351 void mtk_hash_alg_release(struct mtk_cryp
*cryp
)
1353 spin_lock(&mtk_sha
.lock
);
1354 list_del(&cryp
->sha_list
);
1355 spin_unlock(&mtk_sha
.lock
);
1357 mtk_sha_unregister_algs();
1358 mtk_sha_record_free(cryp
);