dm writecache: add cond_resched to loop in persistent_memory_claim()
[linux/fpc-iii.git] / drivers / crypto / mediatek / mtk-aes.c
blob78d660d963e231ae428579552e8bea0b1c207e4c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Cryptographic API.
5 * Driver for EIP97 AES acceleration.
7 * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
9 * Some ideas are from atmel-aes.c drivers.
12 #include <crypto/aes.h>
13 #include <crypto/gcm.h>
14 #include <crypto/internal/skcipher.h>
15 #include "mtk-platform.h"
17 #define AES_QUEUE_SIZE 512
18 #define AES_BUF_ORDER 2
19 #define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \
20 & ~(AES_BLOCK_SIZE - 1))
21 #define AES_MAX_STATE_BUF_SIZE SIZE_IN_WORDS(AES_KEYSIZE_256 + \
22 AES_BLOCK_SIZE * 2)
23 #define AES_MAX_CT_SIZE 6
25 #define AES_CT_CTRL_HDR cpu_to_le32(0x00220000)
27 /* AES-CBC/ECB/CTR/OFB/CFB command token */
28 #define AES_CMD0 cpu_to_le32(0x05000000)
29 #define AES_CMD1 cpu_to_le32(0x2d060000)
30 #define AES_CMD2 cpu_to_le32(0xe4a63806)
31 /* AES-GCM command token */
32 #define AES_GCM_CMD0 cpu_to_le32(0x0b000000)
33 #define AES_GCM_CMD1 cpu_to_le32(0xa0800000)
34 #define AES_GCM_CMD2 cpu_to_le32(0x25000010)
35 #define AES_GCM_CMD3 cpu_to_le32(0x0f020000)
36 #define AES_GCM_CMD4 cpu_to_le32(0x21e60000)
37 #define AES_GCM_CMD5 cpu_to_le32(0x40e60000)
38 #define AES_GCM_CMD6 cpu_to_le32(0xd0070000)
40 /* AES transform information word 0 fields */
41 #define AES_TFM_BASIC_OUT cpu_to_le32(0x4 << 0)
42 #define AES_TFM_BASIC_IN cpu_to_le32(0x5 << 0)
43 #define AES_TFM_GCM_OUT cpu_to_le32(0x6 << 0)
44 #define AES_TFM_GCM_IN cpu_to_le32(0xf << 0)
45 #define AES_TFM_SIZE(x) cpu_to_le32((x) << 8)
46 #define AES_TFM_128BITS cpu_to_le32(0xb << 16)
47 #define AES_TFM_192BITS cpu_to_le32(0xd << 16)
48 #define AES_TFM_256BITS cpu_to_le32(0xf << 16)
49 #define AES_TFM_GHASH_DIGEST cpu_to_le32(0x2 << 21)
50 #define AES_TFM_GHASH cpu_to_le32(0x4 << 23)
51 /* AES transform information word 1 fields */
52 #define AES_TFM_ECB cpu_to_le32(0x0 << 0)
53 #define AES_TFM_CBC cpu_to_le32(0x1 << 0)
54 #define AES_TFM_OFB cpu_to_le32(0x4 << 0)
55 #define AES_TFM_CFB128 cpu_to_le32(0x5 << 0)
56 #define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */
57 #define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */
58 #define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */
59 #define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */
60 #define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10)
61 #define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17)
63 /* AES flags */
64 #define AES_FLAGS_CIPHER_MSK GENMASK(4, 0)
65 #define AES_FLAGS_ECB BIT(0)
66 #define AES_FLAGS_CBC BIT(1)
67 #define AES_FLAGS_CTR BIT(2)
68 #define AES_FLAGS_OFB BIT(3)
69 #define AES_FLAGS_CFB128 BIT(4)
70 #define AES_FLAGS_GCM BIT(5)
71 #define AES_FLAGS_ENCRYPT BIT(6)
72 #define AES_FLAGS_BUSY BIT(7)
74 #define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26))
76 /**
77 * mtk_aes_info - hardware information of AES
78 * @cmd: command token, hardware instruction
79 * @tfm: transform state of cipher algorithm.
80 * @state: contains keys and initial vectors.
82 * Memory layout of GCM buffer:
83 * /-----------\
84 * | AES KEY | 128/196/256 bits
85 * |-----------|
86 * | HASH KEY | a string 128 zero bits encrypted using the block cipher
87 * |-----------|
88 * | IVs | 4 * 4 bytes
89 * \-----------/
91 * The engine requires all these info to do:
92 * - Commands decoding and control of the engine's data path.
93 * - Coordinating hardware data fetch and store operations.
94 * - Result token construction and output.
96 struct mtk_aes_info {
97 __le32 cmd[AES_MAX_CT_SIZE];
98 __le32 tfm[2];
99 __le32 state[AES_MAX_STATE_BUF_SIZE];
102 struct mtk_aes_reqctx {
103 u64 mode;
106 struct mtk_aes_base_ctx {
107 struct mtk_cryp *cryp;
108 u32 keylen;
109 __le32 key[12];
110 __le32 keymode;
112 mtk_aes_fn start;
114 struct mtk_aes_info info;
115 dma_addr_t ct_dma;
116 dma_addr_t tfm_dma;
118 __le32 ct_hdr;
119 u32 ct_size;
122 struct mtk_aes_ctx {
123 struct mtk_aes_base_ctx base;
126 struct mtk_aes_ctr_ctx {
127 struct mtk_aes_base_ctx base;
129 u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
130 size_t offset;
131 struct scatterlist src[2];
132 struct scatterlist dst[2];
135 struct mtk_aes_gcm_ctx {
136 struct mtk_aes_base_ctx base;
138 u32 authsize;
139 size_t textlen;
141 struct crypto_skcipher *ctr;
144 struct mtk_aes_drv {
145 struct list_head dev_list;
146 /* Device list lock */
147 spinlock_t lock;
150 static struct mtk_aes_drv mtk_aes = {
151 .dev_list = LIST_HEAD_INIT(mtk_aes.dev_list),
152 .lock = __SPIN_LOCK_UNLOCKED(mtk_aes.lock),
155 static inline u32 mtk_aes_read(struct mtk_cryp *cryp, u32 offset)
157 return readl_relaxed(cryp->base + offset);
160 static inline void mtk_aes_write(struct mtk_cryp *cryp,
161 u32 offset, u32 value)
163 writel_relaxed(value, cryp->base + offset);
166 static struct mtk_cryp *mtk_aes_find_dev(struct mtk_aes_base_ctx *ctx)
168 struct mtk_cryp *cryp = NULL;
169 struct mtk_cryp *tmp;
171 spin_lock_bh(&mtk_aes.lock);
172 if (!ctx->cryp) {
173 list_for_each_entry(tmp, &mtk_aes.dev_list, aes_list) {
174 cryp = tmp;
175 break;
177 ctx->cryp = cryp;
178 } else {
179 cryp = ctx->cryp;
181 spin_unlock_bh(&mtk_aes.lock);
183 return cryp;
186 static inline size_t mtk_aes_padlen(size_t len)
188 len &= AES_BLOCK_SIZE - 1;
189 return len ? AES_BLOCK_SIZE - len : 0;
192 static bool mtk_aes_check_aligned(struct scatterlist *sg, size_t len,
193 struct mtk_aes_dma *dma)
195 int nents;
197 if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
198 return false;
200 for (nents = 0; sg; sg = sg_next(sg), ++nents) {
201 if (!IS_ALIGNED(sg->offset, sizeof(u32)))
202 return false;
204 if (len <= sg->length) {
205 if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
206 return false;
208 dma->nents = nents + 1;
209 dma->remainder = sg->length - len;
210 sg->length = len;
211 return true;
214 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
215 return false;
217 len -= sg->length;
220 return false;
223 static inline void mtk_aes_set_mode(struct mtk_aes_rec *aes,
224 const struct mtk_aes_reqctx *rctx)
226 /* Clear all but persistent flags and set request flags. */
227 aes->flags = (aes->flags & AES_FLAGS_BUSY) | rctx->mode;
230 static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma)
232 struct scatterlist *sg = dma->sg;
233 int nents = dma->nents;
235 if (!dma->remainder)
236 return;
238 while (--nents > 0 && sg)
239 sg = sg_next(sg);
241 if (!sg)
242 return;
244 sg->length += dma->remainder;
247 static inline void mtk_aes_write_state_le(__le32 *dst, const u32 *src, u32 size)
249 int i;
251 for (i = 0; i < SIZE_IN_WORDS(size); i++)
252 dst[i] = cpu_to_le32(src[i]);
255 static inline void mtk_aes_write_state_be(__be32 *dst, const u32 *src, u32 size)
257 int i;
259 for (i = 0; i < SIZE_IN_WORDS(size); i++)
260 dst[i] = cpu_to_be32(src[i]);
263 static inline int mtk_aes_complete(struct mtk_cryp *cryp,
264 struct mtk_aes_rec *aes,
265 int err)
267 aes->flags &= ~AES_FLAGS_BUSY;
268 aes->areq->complete(aes->areq, err);
269 /* Handle new request */
270 tasklet_schedule(&aes->queue_task);
271 return err;
275 * Write descriptors for processing. This will configure the engine, load
276 * the transform information and then start the packet processing.
278 static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
280 struct mtk_ring *ring = cryp->ring[aes->id];
281 struct mtk_desc *cmd = NULL, *res = NULL;
282 struct scatterlist *ssg = aes->src.sg, *dsg = aes->dst.sg;
283 u32 slen = aes->src.sg_len, dlen = aes->dst.sg_len;
284 int nents;
286 /* Write command descriptors */
287 for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) {
288 cmd = ring->cmd_next;
289 cmd->hdr = MTK_DESC_BUF_LEN(ssg->length);
290 cmd->buf = cpu_to_le32(sg_dma_address(ssg));
292 if (nents == 0) {
293 cmd->hdr |= MTK_DESC_FIRST |
294 MTK_DESC_CT_LEN(aes->ctx->ct_size);
295 cmd->ct = cpu_to_le32(aes->ctx->ct_dma);
296 cmd->ct_hdr = aes->ctx->ct_hdr;
297 cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma);
300 /* Shift ring buffer and check boundary */
301 if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM)
302 ring->cmd_next = ring->cmd_base;
304 cmd->hdr |= MTK_DESC_LAST;
306 /* Prepare result descriptors */
307 for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) {
308 res = ring->res_next;
309 res->hdr = MTK_DESC_BUF_LEN(dsg->length);
310 res->buf = cpu_to_le32(sg_dma_address(dsg));
312 if (nents == 0)
313 res->hdr |= MTK_DESC_FIRST;
315 /* Shift ring buffer and check boundary */
316 if (++ring->res_next == ring->res_base + MTK_DESC_NUM)
317 ring->res_next = ring->res_base;
319 res->hdr |= MTK_DESC_LAST;
321 /* Pointer to current result descriptor */
322 ring->res_prev = res;
324 /* Prepare enough space for authenticated tag */
325 if (aes->flags & AES_FLAGS_GCM)
326 res->hdr += AES_BLOCK_SIZE;
329 * Make sure that all changes to the DMA ring are done before we
330 * start engine.
332 wmb();
333 /* Start DMA transfer */
334 mtk_aes_write(cryp, RDR_PREP_COUNT(aes->id), MTK_DESC_CNT(dlen));
335 mtk_aes_write(cryp, CDR_PREP_COUNT(aes->id), MTK_DESC_CNT(slen));
337 return -EINPROGRESS;
340 static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
342 struct mtk_aes_base_ctx *ctx = aes->ctx;
344 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
345 DMA_TO_DEVICE);
347 if (aes->src.sg == aes->dst.sg) {
348 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
349 DMA_BIDIRECTIONAL);
351 if (aes->src.sg != &aes->aligned_sg)
352 mtk_aes_restore_sg(&aes->src);
353 } else {
354 dma_unmap_sg(cryp->dev, aes->dst.sg, aes->dst.nents,
355 DMA_FROM_DEVICE);
357 if (aes->dst.sg != &aes->aligned_sg)
358 mtk_aes_restore_sg(&aes->dst);
360 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
361 DMA_TO_DEVICE);
363 if (aes->src.sg != &aes->aligned_sg)
364 mtk_aes_restore_sg(&aes->src);
367 if (aes->dst.sg == &aes->aligned_sg)
368 sg_copy_from_buffer(aes->real_dst, sg_nents(aes->real_dst),
369 aes->buf, aes->total);
372 static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
374 struct mtk_aes_base_ctx *ctx = aes->ctx;
375 struct mtk_aes_info *info = &ctx->info;
377 ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
378 DMA_TO_DEVICE);
379 if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma)))
380 goto exit;
382 ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd);
384 if (aes->src.sg == aes->dst.sg) {
385 aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
386 aes->src.nents,
387 DMA_BIDIRECTIONAL);
388 aes->dst.sg_len = aes->src.sg_len;
389 if (unlikely(!aes->src.sg_len))
390 goto sg_map_err;
391 } else {
392 aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
393 aes->src.nents, DMA_TO_DEVICE);
394 if (unlikely(!aes->src.sg_len))
395 goto sg_map_err;
397 aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg,
398 aes->dst.nents, DMA_FROM_DEVICE);
399 if (unlikely(!aes->dst.sg_len)) {
400 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
401 DMA_TO_DEVICE);
402 goto sg_map_err;
406 return mtk_aes_xmit(cryp, aes);
408 sg_map_err:
409 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE);
410 exit:
411 return mtk_aes_complete(cryp, aes, -EINVAL);
414 /* Initialize transform information of CBC/ECB/CTR/OFB/CFB mode */
415 static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
416 size_t len)
418 struct skcipher_request *req = skcipher_request_cast(aes->areq);
419 struct mtk_aes_base_ctx *ctx = aes->ctx;
420 struct mtk_aes_info *info = &ctx->info;
421 u32 cnt = 0;
423 ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
424 info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len);
425 info->cmd[cnt++] = AES_CMD1;
427 info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode;
428 if (aes->flags & AES_FLAGS_ENCRYPT)
429 info->tfm[0] |= AES_TFM_BASIC_OUT;
430 else
431 info->tfm[0] |= AES_TFM_BASIC_IN;
433 switch (aes->flags & AES_FLAGS_CIPHER_MSK) {
434 case AES_FLAGS_CBC:
435 info->tfm[1] = AES_TFM_CBC;
436 break;
437 case AES_FLAGS_ECB:
438 info->tfm[1] = AES_TFM_ECB;
439 goto ecb;
440 case AES_FLAGS_CTR:
441 info->tfm[1] = AES_TFM_CTR_LOAD;
442 goto ctr;
443 case AES_FLAGS_OFB:
444 info->tfm[1] = AES_TFM_OFB;
445 break;
446 case AES_FLAGS_CFB128:
447 info->tfm[1] = AES_TFM_CFB128;
448 break;
449 default:
450 /* Should not happen... */
451 return;
454 mtk_aes_write_state_le(info->state + ctx->keylen, (void *)req->iv,
455 AES_BLOCK_SIZE);
456 ctr:
457 info->tfm[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE));
458 info->tfm[1] |= AES_TFM_FULL_IV;
459 info->cmd[cnt++] = AES_CMD2;
460 ecb:
461 ctx->ct_size = cnt;
464 static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
465 struct scatterlist *src, struct scatterlist *dst,
466 size_t len)
468 size_t padlen = 0;
469 bool src_aligned, dst_aligned;
471 aes->total = len;
472 aes->src.sg = src;
473 aes->dst.sg = dst;
474 aes->real_dst = dst;
476 src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
477 if (src == dst)
478 dst_aligned = src_aligned;
479 else
480 dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
482 if (!src_aligned || !dst_aligned) {
483 padlen = mtk_aes_padlen(len);
485 if (len + padlen > AES_BUF_SIZE)
486 return mtk_aes_complete(cryp, aes, -ENOMEM);
488 if (!src_aligned) {
489 sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
490 aes->src.sg = &aes->aligned_sg;
491 aes->src.nents = 1;
492 aes->src.remainder = 0;
495 if (!dst_aligned) {
496 aes->dst.sg = &aes->aligned_sg;
497 aes->dst.nents = 1;
498 aes->dst.remainder = 0;
501 sg_init_table(&aes->aligned_sg, 1);
502 sg_set_buf(&aes->aligned_sg, aes->buf, len + padlen);
505 mtk_aes_info_init(cryp, aes, len + padlen);
507 return mtk_aes_map(cryp, aes);
510 static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
511 struct crypto_async_request *new_areq)
513 struct mtk_aes_rec *aes = cryp->aes[id];
514 struct crypto_async_request *areq, *backlog;
515 struct mtk_aes_base_ctx *ctx;
516 unsigned long flags;
517 int ret = 0;
519 spin_lock_irqsave(&aes->lock, flags);
520 if (new_areq)
521 ret = crypto_enqueue_request(&aes->queue, new_areq);
522 if (aes->flags & AES_FLAGS_BUSY) {
523 spin_unlock_irqrestore(&aes->lock, flags);
524 return ret;
526 backlog = crypto_get_backlog(&aes->queue);
527 areq = crypto_dequeue_request(&aes->queue);
528 if (areq)
529 aes->flags |= AES_FLAGS_BUSY;
530 spin_unlock_irqrestore(&aes->lock, flags);
532 if (!areq)
533 return ret;
535 if (backlog)
536 backlog->complete(backlog, -EINPROGRESS);
538 ctx = crypto_tfm_ctx(areq->tfm);
539 /* Write key into state buffer */
540 memcpy(ctx->info.state, ctx->key, sizeof(ctx->key));
542 aes->areq = areq;
543 aes->ctx = ctx;
545 return ctx->start(cryp, aes);
548 static int mtk_aes_transfer_complete(struct mtk_cryp *cryp,
549 struct mtk_aes_rec *aes)
551 return mtk_aes_complete(cryp, aes, 0);
554 static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
556 struct skcipher_request *req = skcipher_request_cast(aes->areq);
557 struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req);
559 mtk_aes_set_mode(aes, rctx);
560 aes->resume = mtk_aes_transfer_complete;
562 return mtk_aes_dma(cryp, aes, req->src, req->dst, req->cryptlen);
565 static inline struct mtk_aes_ctr_ctx *
566 mtk_aes_ctr_ctx_cast(struct mtk_aes_base_ctx *ctx)
568 return container_of(ctx, struct mtk_aes_ctr_ctx, base);
571 static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
573 struct mtk_aes_base_ctx *ctx = aes->ctx;
574 struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx);
575 struct skcipher_request *req = skcipher_request_cast(aes->areq);
576 struct scatterlist *src, *dst;
577 u32 start, end, ctr, blocks;
578 size_t datalen;
579 bool fragmented = false;
581 /* Check for transfer completion. */
582 cctx->offset += aes->total;
583 if (cctx->offset >= req->cryptlen)
584 return mtk_aes_transfer_complete(cryp, aes);
586 /* Compute data length. */
587 datalen = req->cryptlen - cctx->offset;
588 blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
589 ctr = be32_to_cpu(cctx->iv[3]);
591 /* Check 32bit counter overflow. */
592 start = ctr;
593 end = start + blocks - 1;
594 if (end < start) {
595 ctr = 0xffffffff;
596 datalen = AES_BLOCK_SIZE * -start;
597 fragmented = true;
600 /* Jump to offset. */
601 src = scatterwalk_ffwd(cctx->src, req->src, cctx->offset);
602 dst = ((req->src == req->dst) ? src :
603 scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset));
605 /* Write IVs into transform state buffer. */
606 mtk_aes_write_state_le(ctx->info.state + ctx->keylen, cctx->iv,
607 AES_BLOCK_SIZE);
609 if (unlikely(fragmented)) {
611 * Increment the counter manually to cope with the hardware
612 * counter overflow.
614 cctx->iv[3] = cpu_to_be32(ctr);
615 crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE);
618 return mtk_aes_dma(cryp, aes, src, dst, datalen);
621 static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
623 struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(aes->ctx);
624 struct skcipher_request *req = skcipher_request_cast(aes->areq);
625 struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req);
627 mtk_aes_set_mode(aes, rctx);
629 memcpy(cctx->iv, req->iv, AES_BLOCK_SIZE);
630 cctx->offset = 0;
631 aes->total = 0;
632 aes->resume = mtk_aes_ctr_transfer;
634 return mtk_aes_ctr_transfer(cryp, aes);
637 /* Check and set the AES key to transform state buffer */
638 static int mtk_aes_setkey(struct crypto_skcipher *tfm,
639 const u8 *key, u32 keylen)
641 struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(tfm);
643 switch (keylen) {
644 case AES_KEYSIZE_128:
645 ctx->keymode = AES_TFM_128BITS;
646 break;
647 case AES_KEYSIZE_192:
648 ctx->keymode = AES_TFM_192BITS;
649 break;
650 case AES_KEYSIZE_256:
651 ctx->keymode = AES_TFM_256BITS;
652 break;
654 default:
655 return -EINVAL;
658 ctx->keylen = SIZE_IN_WORDS(keylen);
659 mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen);
661 return 0;
664 static int mtk_aes_crypt(struct skcipher_request *req, u64 mode)
666 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
667 struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher);
668 struct mtk_aes_reqctx *rctx;
669 struct mtk_cryp *cryp;
671 cryp = mtk_aes_find_dev(ctx);
672 if (!cryp)
673 return -ENODEV;
675 rctx = skcipher_request_ctx(req);
676 rctx->mode = mode;
678 return mtk_aes_handle_queue(cryp, !(mode & AES_FLAGS_ENCRYPT),
679 &req->base);
682 static int mtk_aes_ecb_encrypt(struct skcipher_request *req)
684 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB);
687 static int mtk_aes_ecb_decrypt(struct skcipher_request *req)
689 return mtk_aes_crypt(req, AES_FLAGS_ECB);
692 static int mtk_aes_cbc_encrypt(struct skcipher_request *req)
694 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
697 static int mtk_aes_cbc_decrypt(struct skcipher_request *req)
699 return mtk_aes_crypt(req, AES_FLAGS_CBC);
702 static int mtk_aes_ctr_encrypt(struct skcipher_request *req)
704 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
707 static int mtk_aes_ctr_decrypt(struct skcipher_request *req)
709 return mtk_aes_crypt(req, AES_FLAGS_CTR);
712 static int mtk_aes_ofb_encrypt(struct skcipher_request *req)
714 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
717 static int mtk_aes_ofb_decrypt(struct skcipher_request *req)
719 return mtk_aes_crypt(req, AES_FLAGS_OFB);
722 static int mtk_aes_cfb_encrypt(struct skcipher_request *req)
724 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CFB128);
727 static int mtk_aes_cfb_decrypt(struct skcipher_request *req)
729 return mtk_aes_crypt(req, AES_FLAGS_CFB128);
732 static int mtk_aes_init_tfm(struct crypto_skcipher *tfm)
734 struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
736 crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx));
737 ctx->base.start = mtk_aes_start;
738 return 0;
741 static int mtk_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
743 struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
745 crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx));
746 ctx->base.start = mtk_aes_ctr_start;
747 return 0;
750 static struct skcipher_alg aes_algs[] = {
752 .base.cra_name = "cbc(aes)",
753 .base.cra_driver_name = "cbc-aes-mtk",
754 .base.cra_priority = 400,
755 .base.cra_flags = CRYPTO_ALG_ASYNC,
756 .base.cra_blocksize = AES_BLOCK_SIZE,
757 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
758 .base.cra_alignmask = 0xf,
759 .base.cra_module = THIS_MODULE,
761 .min_keysize = AES_MIN_KEY_SIZE,
762 .max_keysize = AES_MAX_KEY_SIZE,
763 .setkey = mtk_aes_setkey,
764 .encrypt = mtk_aes_cbc_encrypt,
765 .decrypt = mtk_aes_cbc_decrypt,
766 .ivsize = AES_BLOCK_SIZE,
767 .init = mtk_aes_init_tfm,
770 .base.cra_name = "ecb(aes)",
771 .base.cra_driver_name = "ecb-aes-mtk",
772 .base.cra_priority = 400,
773 .base.cra_flags = CRYPTO_ALG_ASYNC,
774 .base.cra_blocksize = AES_BLOCK_SIZE,
775 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
776 .base.cra_alignmask = 0xf,
777 .base.cra_module = THIS_MODULE,
779 .min_keysize = AES_MIN_KEY_SIZE,
780 .max_keysize = AES_MAX_KEY_SIZE,
781 .setkey = mtk_aes_setkey,
782 .encrypt = mtk_aes_ecb_encrypt,
783 .decrypt = mtk_aes_ecb_decrypt,
784 .init = mtk_aes_init_tfm,
787 .base.cra_name = "ctr(aes)",
788 .base.cra_driver_name = "ctr-aes-mtk",
789 .base.cra_priority = 400,
790 .base.cra_flags = CRYPTO_ALG_ASYNC,
791 .base.cra_blocksize = 1,
792 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
793 .base.cra_alignmask = 0xf,
794 .base.cra_module = THIS_MODULE,
796 .min_keysize = AES_MIN_KEY_SIZE,
797 .max_keysize = AES_MAX_KEY_SIZE,
798 .ivsize = AES_BLOCK_SIZE,
799 .setkey = mtk_aes_setkey,
800 .encrypt = mtk_aes_ctr_encrypt,
801 .decrypt = mtk_aes_ctr_decrypt,
802 .init = mtk_aes_ctr_init_tfm,
805 .base.cra_name = "ofb(aes)",
806 .base.cra_driver_name = "ofb-aes-mtk",
807 .base.cra_priority = 400,
808 .base.cra_flags = CRYPTO_ALG_ASYNC,
809 .base.cra_blocksize = AES_BLOCK_SIZE,
810 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
811 .base.cra_alignmask = 0xf,
812 .base.cra_module = THIS_MODULE,
814 .min_keysize = AES_MIN_KEY_SIZE,
815 .max_keysize = AES_MAX_KEY_SIZE,
816 .ivsize = AES_BLOCK_SIZE,
817 .setkey = mtk_aes_setkey,
818 .encrypt = mtk_aes_ofb_encrypt,
819 .decrypt = mtk_aes_ofb_decrypt,
822 .base.cra_name = "cfb(aes)",
823 .base.cra_driver_name = "cfb-aes-mtk",
824 .base.cra_priority = 400,
825 .base.cra_flags = CRYPTO_ALG_ASYNC,
826 .base.cra_blocksize = 1,
827 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
828 .base.cra_alignmask = 0xf,
829 .base.cra_module = THIS_MODULE,
831 .min_keysize = AES_MIN_KEY_SIZE,
832 .max_keysize = AES_MAX_KEY_SIZE,
833 .ivsize = AES_BLOCK_SIZE,
834 .setkey = mtk_aes_setkey,
835 .encrypt = mtk_aes_cfb_encrypt,
836 .decrypt = mtk_aes_cfb_decrypt,
840 static inline struct mtk_aes_gcm_ctx *
841 mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx)
843 return container_of(ctx, struct mtk_aes_gcm_ctx, base);
847 * Engine will verify and compare tag automatically, so we just need
848 * to check returned status which stored in the result descriptor.
850 static int mtk_aes_gcm_tag_verify(struct mtk_cryp *cryp,
851 struct mtk_aes_rec *aes)
853 u32 status = cryp->ring[aes->id]->res_prev->ct;
855 return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ?
856 -EBADMSG : 0);
859 /* Initialize transform information of GCM mode */
860 static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
861 struct mtk_aes_rec *aes,
862 size_t len)
864 struct aead_request *req = aead_request_cast(aes->areq);
865 struct mtk_aes_base_ctx *ctx = aes->ctx;
866 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
867 struct mtk_aes_info *info = &ctx->info;
868 u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
869 u32 cnt = 0;
871 ctx->ct_hdr = AES_CT_CTRL_HDR | len;
873 info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen);
874 info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen);
875 info->cmd[cnt++] = AES_GCM_CMD2;
876 info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen);
878 if (aes->flags & AES_FLAGS_ENCRYPT) {
879 info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize);
880 info->tfm[0] = AES_TFM_GCM_OUT;
881 } else {
882 info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize);
883 info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize);
884 info->tfm[0] = AES_TFM_GCM_IN;
886 ctx->ct_size = cnt;
888 info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE(
889 ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) |
890 ctx->keymode;
891 info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV |
892 AES_TFM_ENC_HASH;
894 mtk_aes_write_state_le(info->state + ctx->keylen + SIZE_IN_WORDS(
895 AES_BLOCK_SIZE), (const u32 *)req->iv, ivsize);
898 static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
899 struct scatterlist *src, struct scatterlist *dst,
900 size_t len)
902 bool src_aligned, dst_aligned;
904 aes->src.sg = src;
905 aes->dst.sg = dst;
906 aes->real_dst = dst;
908 src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
909 if (src == dst)
910 dst_aligned = src_aligned;
911 else
912 dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
914 if (!src_aligned || !dst_aligned) {
915 if (aes->total > AES_BUF_SIZE)
916 return mtk_aes_complete(cryp, aes, -ENOMEM);
918 if (!src_aligned) {
919 sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
920 aes->src.sg = &aes->aligned_sg;
921 aes->src.nents = 1;
922 aes->src.remainder = 0;
925 if (!dst_aligned) {
926 aes->dst.sg = &aes->aligned_sg;
927 aes->dst.nents = 1;
928 aes->dst.remainder = 0;
931 sg_init_table(&aes->aligned_sg, 1);
932 sg_set_buf(&aes->aligned_sg, aes->buf, aes->total);
935 mtk_aes_gcm_info_init(cryp, aes, len);
937 return mtk_aes_map(cryp, aes);
940 /* Todo: GMAC */
941 static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
943 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(aes->ctx);
944 struct aead_request *req = aead_request_cast(aes->areq);
945 struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
946 u32 len = req->assoclen + req->cryptlen;
948 mtk_aes_set_mode(aes, rctx);
950 if (aes->flags & AES_FLAGS_ENCRYPT) {
951 u32 tag[4];
953 aes->resume = mtk_aes_transfer_complete;
954 /* Compute total process length. */
955 aes->total = len + gctx->authsize;
956 /* Hardware will append authenticated tag to output buffer */
957 scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1);
958 } else {
959 aes->resume = mtk_aes_gcm_tag_verify;
960 aes->total = len;
963 return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len);
966 static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
968 struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
969 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
970 struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
971 struct mtk_cryp *cryp;
972 bool enc = !!(mode & AES_FLAGS_ENCRYPT);
974 cryp = mtk_aes_find_dev(ctx);
975 if (!cryp)
976 return -ENODEV;
978 /* Compute text length. */
979 gctx->textlen = req->cryptlen - (enc ? 0 : gctx->authsize);
981 /* Empty messages are not supported yet */
982 if (!gctx->textlen && !req->assoclen)
983 return -EINVAL;
985 rctx->mode = AES_FLAGS_GCM | mode;
987 return mtk_aes_handle_queue(cryp, enc, &req->base);
991 * Because of the hardware limitation, we need to pre-calculate key(H)
992 * for the GHASH operation. The result of the encryption operation
993 * need to be stored in the transform state buffer.
995 static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
996 u32 keylen)
998 struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
999 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
1000 struct crypto_skcipher *ctr = gctx->ctr;
1001 struct {
1002 u32 hash[4];
1003 u8 iv[8];
1005 struct crypto_wait wait;
1007 struct scatterlist sg[1];
1008 struct skcipher_request req;
1009 } *data;
1010 int err;
1012 switch (keylen) {
1013 case AES_KEYSIZE_128:
1014 ctx->keymode = AES_TFM_128BITS;
1015 break;
1016 case AES_KEYSIZE_192:
1017 ctx->keymode = AES_TFM_192BITS;
1018 break;
1019 case AES_KEYSIZE_256:
1020 ctx->keymode = AES_TFM_256BITS;
1021 break;
1023 default:
1024 return -EINVAL;
1027 ctx->keylen = SIZE_IN_WORDS(keylen);
1029 /* Same as crypto_gcm_setkey() from crypto/gcm.c */
1030 crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
1031 crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
1032 CRYPTO_TFM_REQ_MASK);
1033 err = crypto_skcipher_setkey(ctr, key, keylen);
1034 if (err)
1035 return err;
1037 data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr),
1038 GFP_KERNEL);
1039 if (!data)
1040 return -ENOMEM;
1042 crypto_init_wait(&data->wait);
1043 sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE);
1044 skcipher_request_set_tfm(&data->req, ctr);
1045 skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
1046 CRYPTO_TFM_REQ_MAY_BACKLOG,
1047 crypto_req_done, &data->wait);
1048 skcipher_request_set_crypt(&data->req, data->sg, data->sg,
1049 AES_BLOCK_SIZE, data->iv);
1051 err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
1052 &data->wait);
1053 if (err)
1054 goto out;
1056 mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen);
1057 mtk_aes_write_state_be(ctx->key + ctx->keylen, data->hash,
1058 AES_BLOCK_SIZE);
1059 out:
1060 kzfree(data);
1061 return err;
1064 static int mtk_aes_gcm_setauthsize(struct crypto_aead *aead,
1065 u32 authsize)
1067 struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
1068 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
1070 /* Same as crypto_gcm_authsize() from crypto/gcm.c */
1071 switch (authsize) {
1072 case 8:
1073 case 12:
1074 case 16:
1075 break;
1076 default:
1077 return -EINVAL;
1080 gctx->authsize = authsize;
1081 return 0;
1084 static int mtk_aes_gcm_encrypt(struct aead_request *req)
1086 return mtk_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
1089 static int mtk_aes_gcm_decrypt(struct aead_request *req)
1091 return mtk_aes_gcm_crypt(req, 0);
1094 static int mtk_aes_gcm_init(struct crypto_aead *aead)
1096 struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
1098 ctx->ctr = crypto_alloc_skcipher("ctr(aes)", 0,
1099 CRYPTO_ALG_ASYNC);
1100 if (IS_ERR(ctx->ctr)) {
1101 pr_err("Error allocating ctr(aes)\n");
1102 return PTR_ERR(ctx->ctr);
1105 crypto_aead_set_reqsize(aead, sizeof(struct mtk_aes_reqctx));
1106 ctx->base.start = mtk_aes_gcm_start;
1107 return 0;
1110 static void mtk_aes_gcm_exit(struct crypto_aead *aead)
1112 struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
1114 crypto_free_skcipher(ctx->ctr);
1117 static struct aead_alg aes_gcm_alg = {
1118 .setkey = mtk_aes_gcm_setkey,
1119 .setauthsize = mtk_aes_gcm_setauthsize,
1120 .encrypt = mtk_aes_gcm_encrypt,
1121 .decrypt = mtk_aes_gcm_decrypt,
1122 .init = mtk_aes_gcm_init,
1123 .exit = mtk_aes_gcm_exit,
1124 .ivsize = GCM_AES_IV_SIZE,
1125 .maxauthsize = AES_BLOCK_SIZE,
1127 .base = {
1128 .cra_name = "gcm(aes)",
1129 .cra_driver_name = "gcm-aes-mtk",
1130 .cra_priority = 400,
1131 .cra_flags = CRYPTO_ALG_ASYNC,
1132 .cra_blocksize = 1,
1133 .cra_ctxsize = sizeof(struct mtk_aes_gcm_ctx),
1134 .cra_alignmask = 0xf,
1135 .cra_module = THIS_MODULE,
1139 static void mtk_aes_queue_task(unsigned long data)
1141 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
1143 mtk_aes_handle_queue(aes->cryp, aes->id, NULL);
1146 static void mtk_aes_done_task(unsigned long data)
1148 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
1149 struct mtk_cryp *cryp = aes->cryp;
1151 mtk_aes_unmap(cryp, aes);
1152 aes->resume(cryp, aes);
1155 static irqreturn_t mtk_aes_irq(int irq, void *dev_id)
1157 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)dev_id;
1158 struct mtk_cryp *cryp = aes->cryp;
1159 u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id));
1161 mtk_aes_write(cryp, RDR_STAT(aes->id), val);
1163 if (likely(AES_FLAGS_BUSY & aes->flags)) {
1164 mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST);
1165 mtk_aes_write(cryp, RDR_THRESH(aes->id),
1166 MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
1168 tasklet_schedule(&aes->done_task);
1169 } else {
1170 dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
1172 return IRQ_HANDLED;
1176 * The purpose of creating encryption and decryption records is
1177 * to process outbound/inbound data in parallel, it can improve
1178 * performance in most use cases, such as IPSec VPN, especially
1179 * under heavy network traffic.
1181 static int mtk_aes_record_init(struct mtk_cryp *cryp)
1183 struct mtk_aes_rec **aes = cryp->aes;
1184 int i, err = -ENOMEM;
1186 for (i = 0; i < MTK_REC_NUM; i++) {
1187 aes[i] = kzalloc(sizeof(**aes), GFP_KERNEL);
1188 if (!aes[i])
1189 goto err_cleanup;
1191 aes[i]->buf = (void *)__get_free_pages(GFP_KERNEL,
1192 AES_BUF_ORDER);
1193 if (!aes[i]->buf)
1194 goto err_cleanup;
1196 aes[i]->cryp = cryp;
1198 spin_lock_init(&aes[i]->lock);
1199 crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE);
1201 tasklet_init(&aes[i]->queue_task, mtk_aes_queue_task,
1202 (unsigned long)aes[i]);
1203 tasklet_init(&aes[i]->done_task, mtk_aes_done_task,
1204 (unsigned long)aes[i]);
1207 /* Link to ring0 and ring1 respectively */
1208 aes[0]->id = MTK_RING0;
1209 aes[1]->id = MTK_RING1;
1211 return 0;
1213 err_cleanup:
1214 for (; i--; ) {
1215 free_page((unsigned long)aes[i]->buf);
1216 kfree(aes[i]);
1219 return err;
1222 static void mtk_aes_record_free(struct mtk_cryp *cryp)
1224 int i;
1226 for (i = 0; i < MTK_REC_NUM; i++) {
1227 tasklet_kill(&cryp->aes[i]->done_task);
1228 tasklet_kill(&cryp->aes[i]->queue_task);
1230 free_page((unsigned long)cryp->aes[i]->buf);
1231 kfree(cryp->aes[i]);
1235 static void mtk_aes_unregister_algs(void)
1237 int i;
1239 crypto_unregister_aead(&aes_gcm_alg);
1241 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1242 crypto_unregister_skcipher(&aes_algs[i]);
1245 static int mtk_aes_register_algs(void)
1247 int err, i;
1249 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1250 err = crypto_register_skcipher(&aes_algs[i]);
1251 if (err)
1252 goto err_aes_algs;
1255 err = crypto_register_aead(&aes_gcm_alg);
1256 if (err)
1257 goto err_aes_algs;
1259 return 0;
1261 err_aes_algs:
1262 for (; i--; )
1263 crypto_unregister_skcipher(&aes_algs[i]);
1265 return err;
1268 int mtk_cipher_alg_register(struct mtk_cryp *cryp)
1270 int ret;
1272 INIT_LIST_HEAD(&cryp->aes_list);
1274 /* Initialize two cipher records */
1275 ret = mtk_aes_record_init(cryp);
1276 if (ret)
1277 goto err_record;
1279 ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING0], mtk_aes_irq,
1280 0, "mtk-aes", cryp->aes[0]);
1281 if (ret) {
1282 dev_err(cryp->dev, "unable to request AES irq.\n");
1283 goto err_res;
1286 ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING1], mtk_aes_irq,
1287 0, "mtk-aes", cryp->aes[1]);
1288 if (ret) {
1289 dev_err(cryp->dev, "unable to request AES irq.\n");
1290 goto err_res;
1293 /* Enable ring0 and ring1 interrupt */
1294 mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING0), MTK_IRQ_RDR0);
1295 mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING1), MTK_IRQ_RDR1);
1297 spin_lock(&mtk_aes.lock);
1298 list_add_tail(&cryp->aes_list, &mtk_aes.dev_list);
1299 spin_unlock(&mtk_aes.lock);
1301 ret = mtk_aes_register_algs();
1302 if (ret)
1303 goto err_algs;
1305 return 0;
1307 err_algs:
1308 spin_lock(&mtk_aes.lock);
1309 list_del(&cryp->aes_list);
1310 spin_unlock(&mtk_aes.lock);
1311 err_res:
1312 mtk_aes_record_free(cryp);
1313 err_record:
1315 dev_err(cryp->dev, "mtk-aes initialization failed.\n");
1316 return ret;
1319 void mtk_cipher_alg_release(struct mtk_cryp *cryp)
1321 spin_lock(&mtk_aes.lock);
1322 list_del(&cryp->aes_list);
1323 spin_unlock(&mtk_aes.lock);
1325 mtk_aes_unregister_algs();
1326 mtk_aes_record_free(cryp);