1 // SPDX-License-Identifier: GPL-2.0-only
5 * Driver for EIP97 AES acceleration.
7 * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
9 * Some ideas are from atmel-aes.c drivers.
12 #include <crypto/aes.h>
13 #include <crypto/gcm.h>
14 #include "mtk-platform.h"
16 #define AES_QUEUE_SIZE 512
17 #define AES_BUF_ORDER 2
18 #define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \
19 & ~(AES_BLOCK_SIZE - 1))
20 #define AES_MAX_STATE_BUF_SIZE SIZE_IN_WORDS(AES_KEYSIZE_256 + \
22 #define AES_MAX_CT_SIZE 6
24 #define AES_CT_CTRL_HDR cpu_to_le32(0x00220000)
26 /* AES-CBC/ECB/CTR command token */
27 #define AES_CMD0 cpu_to_le32(0x05000000)
28 #define AES_CMD1 cpu_to_le32(0x2d060000)
29 #define AES_CMD2 cpu_to_le32(0xe4a63806)
30 /* AES-GCM command token */
31 #define AES_GCM_CMD0 cpu_to_le32(0x0b000000)
32 #define AES_GCM_CMD1 cpu_to_le32(0xa0800000)
33 #define AES_GCM_CMD2 cpu_to_le32(0x25000010)
34 #define AES_GCM_CMD3 cpu_to_le32(0x0f020000)
35 #define AES_GCM_CMD4 cpu_to_le32(0x21e60000)
36 #define AES_GCM_CMD5 cpu_to_le32(0x40e60000)
37 #define AES_GCM_CMD6 cpu_to_le32(0xd0070000)
39 /* AES transform information word 0 fields */
40 #define AES_TFM_BASIC_OUT cpu_to_le32(0x4 << 0)
41 #define AES_TFM_BASIC_IN cpu_to_le32(0x5 << 0)
42 #define AES_TFM_GCM_OUT cpu_to_le32(0x6 << 0)
43 #define AES_TFM_GCM_IN cpu_to_le32(0xf << 0)
44 #define AES_TFM_SIZE(x) cpu_to_le32((x) << 8)
45 #define AES_TFM_128BITS cpu_to_le32(0xb << 16)
46 #define AES_TFM_192BITS cpu_to_le32(0xd << 16)
47 #define AES_TFM_256BITS cpu_to_le32(0xf << 16)
48 #define AES_TFM_GHASH_DIGEST cpu_to_le32(0x2 << 21)
49 #define AES_TFM_GHASH cpu_to_le32(0x4 << 23)
50 /* AES transform information word 1 fields */
51 #define AES_TFM_ECB cpu_to_le32(0x0 << 0)
52 #define AES_TFM_CBC cpu_to_le32(0x1 << 0)
53 #define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */
54 #define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */
55 #define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */
56 #define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */
57 #define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10)
58 #define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17)
61 #define AES_FLAGS_CIPHER_MSK GENMASK(2, 0)
62 #define AES_FLAGS_ECB BIT(0)
63 #define AES_FLAGS_CBC BIT(1)
64 #define AES_FLAGS_CTR BIT(2)
65 #define AES_FLAGS_GCM BIT(3)
66 #define AES_FLAGS_ENCRYPT BIT(4)
67 #define AES_FLAGS_BUSY BIT(5)
69 #define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26))
72 * mtk_aes_info - hardware information of AES
73 * @cmd: command token, hardware instruction
74 * @tfm: transform state of cipher algorithm.
75 * @state: contains keys and initial vectors.
77 * Memory layout of GCM buffer:
79 * | AES KEY | 128/196/256 bits
81 * | HASH KEY | a string 128 zero bits encrypted using the block cipher
86 * The engine requires all these info to do:
87 * - Commands decoding and control of the engine's data path.
88 * - Coordinating hardware data fetch and store operations.
89 * - Result token construction and output.
92 __le32 cmd
[AES_MAX_CT_SIZE
];
94 __le32 state
[AES_MAX_STATE_BUF_SIZE
];
97 struct mtk_aes_reqctx
{
101 struct mtk_aes_base_ctx
{
102 struct mtk_cryp
*cryp
;
108 struct mtk_aes_info info
;
117 struct mtk_aes_base_ctx base
;
120 struct mtk_aes_ctr_ctx
{
121 struct mtk_aes_base_ctx base
;
123 u32 iv
[AES_BLOCK_SIZE
/ sizeof(u32
)];
125 struct scatterlist src
[2];
126 struct scatterlist dst
[2];
129 struct mtk_aes_gcm_ctx
{
130 struct mtk_aes_base_ctx base
;
135 struct crypto_skcipher
*ctr
;
139 struct list_head dev_list
;
140 /* Device list lock */
144 static struct mtk_aes_drv mtk_aes
= {
145 .dev_list
= LIST_HEAD_INIT(mtk_aes
.dev_list
),
146 .lock
= __SPIN_LOCK_UNLOCKED(mtk_aes
.lock
),
149 static inline u32
mtk_aes_read(struct mtk_cryp
*cryp
, u32 offset
)
151 return readl_relaxed(cryp
->base
+ offset
);
154 static inline void mtk_aes_write(struct mtk_cryp
*cryp
,
155 u32 offset
, u32 value
)
157 writel_relaxed(value
, cryp
->base
+ offset
);
160 static struct mtk_cryp
*mtk_aes_find_dev(struct mtk_aes_base_ctx
*ctx
)
162 struct mtk_cryp
*cryp
= NULL
;
163 struct mtk_cryp
*tmp
;
165 spin_lock_bh(&mtk_aes
.lock
);
167 list_for_each_entry(tmp
, &mtk_aes
.dev_list
, aes_list
) {
175 spin_unlock_bh(&mtk_aes
.lock
);
180 static inline size_t mtk_aes_padlen(size_t len
)
182 len
&= AES_BLOCK_SIZE
- 1;
183 return len
? AES_BLOCK_SIZE
- len
: 0;
186 static bool mtk_aes_check_aligned(struct scatterlist
*sg
, size_t len
,
187 struct mtk_aes_dma
*dma
)
191 if (!IS_ALIGNED(len
, AES_BLOCK_SIZE
))
194 for (nents
= 0; sg
; sg
= sg_next(sg
), ++nents
) {
195 if (!IS_ALIGNED(sg
->offset
, sizeof(u32
)))
198 if (len
<= sg
->length
) {
199 if (!IS_ALIGNED(len
, AES_BLOCK_SIZE
))
202 dma
->nents
= nents
+ 1;
203 dma
->remainder
= sg
->length
- len
;
208 if (!IS_ALIGNED(sg
->length
, AES_BLOCK_SIZE
))
217 static inline void mtk_aes_set_mode(struct mtk_aes_rec
*aes
,
218 const struct mtk_aes_reqctx
*rctx
)
220 /* Clear all but persistent flags and set request flags. */
221 aes
->flags
= (aes
->flags
& AES_FLAGS_BUSY
) | rctx
->mode
;
224 static inline void mtk_aes_restore_sg(const struct mtk_aes_dma
*dma
)
226 struct scatterlist
*sg
= dma
->sg
;
227 int nents
= dma
->nents
;
232 while (--nents
> 0 && sg
)
238 sg
->length
+= dma
->remainder
;
241 static inline void mtk_aes_write_state_le(__le32
*dst
, const u32
*src
, u32 size
)
245 for (i
= 0; i
< SIZE_IN_WORDS(size
); i
++)
246 dst
[i
] = cpu_to_le32(src
[i
]);
249 static inline void mtk_aes_write_state_be(__be32
*dst
, const u32
*src
, u32 size
)
253 for (i
= 0; i
< SIZE_IN_WORDS(size
); i
++)
254 dst
[i
] = cpu_to_be32(src
[i
]);
257 static inline int mtk_aes_complete(struct mtk_cryp
*cryp
,
258 struct mtk_aes_rec
*aes
,
261 aes
->flags
&= ~AES_FLAGS_BUSY
;
262 aes
->areq
->complete(aes
->areq
, err
);
263 /* Handle new request */
264 tasklet_schedule(&aes
->queue_task
);
269 * Write descriptors for processing. This will configure the engine, load
270 * the transform information and then start the packet processing.
272 static int mtk_aes_xmit(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
274 struct mtk_ring
*ring
= cryp
->ring
[aes
->id
];
275 struct mtk_desc
*cmd
= NULL
, *res
= NULL
;
276 struct scatterlist
*ssg
= aes
->src
.sg
, *dsg
= aes
->dst
.sg
;
277 u32 slen
= aes
->src
.sg_len
, dlen
= aes
->dst
.sg_len
;
280 /* Write command descriptors */
281 for (nents
= 0; nents
< slen
; ++nents
, ssg
= sg_next(ssg
)) {
282 cmd
= ring
->cmd_next
;
283 cmd
->hdr
= MTK_DESC_BUF_LEN(ssg
->length
);
284 cmd
->buf
= cpu_to_le32(sg_dma_address(ssg
));
287 cmd
->hdr
|= MTK_DESC_FIRST
|
288 MTK_DESC_CT_LEN(aes
->ctx
->ct_size
);
289 cmd
->ct
= cpu_to_le32(aes
->ctx
->ct_dma
);
290 cmd
->ct_hdr
= aes
->ctx
->ct_hdr
;
291 cmd
->tfm
= cpu_to_le32(aes
->ctx
->tfm_dma
);
294 /* Shift ring buffer and check boundary */
295 if (++ring
->cmd_next
== ring
->cmd_base
+ MTK_DESC_NUM
)
296 ring
->cmd_next
= ring
->cmd_base
;
298 cmd
->hdr
|= MTK_DESC_LAST
;
300 /* Prepare result descriptors */
301 for (nents
= 0; nents
< dlen
; ++nents
, dsg
= sg_next(dsg
)) {
302 res
= ring
->res_next
;
303 res
->hdr
= MTK_DESC_BUF_LEN(dsg
->length
);
304 res
->buf
= cpu_to_le32(sg_dma_address(dsg
));
307 res
->hdr
|= MTK_DESC_FIRST
;
309 /* Shift ring buffer and check boundary */
310 if (++ring
->res_next
== ring
->res_base
+ MTK_DESC_NUM
)
311 ring
->res_next
= ring
->res_base
;
313 res
->hdr
|= MTK_DESC_LAST
;
315 /* Pointer to current result descriptor */
316 ring
->res_prev
= res
;
318 /* Prepare enough space for authenticated tag */
319 if (aes
->flags
& AES_FLAGS_GCM
)
320 res
->hdr
+= AES_BLOCK_SIZE
;
323 * Make sure that all changes to the DMA ring are done before we
327 /* Start DMA transfer */
328 mtk_aes_write(cryp
, RDR_PREP_COUNT(aes
->id
), MTK_DESC_CNT(dlen
));
329 mtk_aes_write(cryp
, CDR_PREP_COUNT(aes
->id
), MTK_DESC_CNT(slen
));
334 static void mtk_aes_unmap(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
336 struct mtk_aes_base_ctx
*ctx
= aes
->ctx
;
338 dma_unmap_single(cryp
->dev
, ctx
->ct_dma
, sizeof(ctx
->info
),
341 if (aes
->src
.sg
== aes
->dst
.sg
) {
342 dma_unmap_sg(cryp
->dev
, aes
->src
.sg
, aes
->src
.nents
,
345 if (aes
->src
.sg
!= &aes
->aligned_sg
)
346 mtk_aes_restore_sg(&aes
->src
);
348 dma_unmap_sg(cryp
->dev
, aes
->dst
.sg
, aes
->dst
.nents
,
351 if (aes
->dst
.sg
!= &aes
->aligned_sg
)
352 mtk_aes_restore_sg(&aes
->dst
);
354 dma_unmap_sg(cryp
->dev
, aes
->src
.sg
, aes
->src
.nents
,
357 if (aes
->src
.sg
!= &aes
->aligned_sg
)
358 mtk_aes_restore_sg(&aes
->src
);
361 if (aes
->dst
.sg
== &aes
->aligned_sg
)
362 sg_copy_from_buffer(aes
->real_dst
, sg_nents(aes
->real_dst
),
363 aes
->buf
, aes
->total
);
366 static int mtk_aes_map(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
368 struct mtk_aes_base_ctx
*ctx
= aes
->ctx
;
369 struct mtk_aes_info
*info
= &ctx
->info
;
371 ctx
->ct_dma
= dma_map_single(cryp
->dev
, info
, sizeof(*info
),
373 if (unlikely(dma_mapping_error(cryp
->dev
, ctx
->ct_dma
)))
376 ctx
->tfm_dma
= ctx
->ct_dma
+ sizeof(info
->cmd
);
378 if (aes
->src
.sg
== aes
->dst
.sg
) {
379 aes
->src
.sg_len
= dma_map_sg(cryp
->dev
, aes
->src
.sg
,
382 aes
->dst
.sg_len
= aes
->src
.sg_len
;
383 if (unlikely(!aes
->src
.sg_len
))
386 aes
->src
.sg_len
= dma_map_sg(cryp
->dev
, aes
->src
.sg
,
387 aes
->src
.nents
, DMA_TO_DEVICE
);
388 if (unlikely(!aes
->src
.sg_len
))
391 aes
->dst
.sg_len
= dma_map_sg(cryp
->dev
, aes
->dst
.sg
,
392 aes
->dst
.nents
, DMA_FROM_DEVICE
);
393 if (unlikely(!aes
->dst
.sg_len
)) {
394 dma_unmap_sg(cryp
->dev
, aes
->src
.sg
, aes
->src
.nents
,
400 return mtk_aes_xmit(cryp
, aes
);
403 dma_unmap_single(cryp
->dev
, ctx
->ct_dma
, sizeof(*info
), DMA_TO_DEVICE
);
405 return mtk_aes_complete(cryp
, aes
, -EINVAL
);
408 /* Initialize transform information of CBC/ECB/CTR mode */
409 static void mtk_aes_info_init(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
,
412 struct ablkcipher_request
*req
= ablkcipher_request_cast(aes
->areq
);
413 struct mtk_aes_base_ctx
*ctx
= aes
->ctx
;
414 struct mtk_aes_info
*info
= &ctx
->info
;
417 ctx
->ct_hdr
= AES_CT_CTRL_HDR
| cpu_to_le32(len
);
418 info
->cmd
[cnt
++] = AES_CMD0
| cpu_to_le32(len
);
419 info
->cmd
[cnt
++] = AES_CMD1
;
421 info
->tfm
[0] = AES_TFM_SIZE(ctx
->keylen
) | ctx
->keymode
;
422 if (aes
->flags
& AES_FLAGS_ENCRYPT
)
423 info
->tfm
[0] |= AES_TFM_BASIC_OUT
;
425 info
->tfm
[0] |= AES_TFM_BASIC_IN
;
427 switch (aes
->flags
& AES_FLAGS_CIPHER_MSK
) {
429 info
->tfm
[1] = AES_TFM_CBC
;
432 info
->tfm
[1] = AES_TFM_ECB
;
435 info
->tfm
[1] = AES_TFM_CTR_LOAD
;
439 /* Should not happen... */
443 mtk_aes_write_state_le(info
->state
+ ctx
->keylen
, req
->info
,
446 info
->tfm
[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE
));
447 info
->tfm
[1] |= AES_TFM_FULL_IV
;
448 info
->cmd
[cnt
++] = AES_CMD2
;
453 static int mtk_aes_dma(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
,
454 struct scatterlist
*src
, struct scatterlist
*dst
,
458 bool src_aligned
, dst_aligned
;
465 src_aligned
= mtk_aes_check_aligned(src
, len
, &aes
->src
);
467 dst_aligned
= src_aligned
;
469 dst_aligned
= mtk_aes_check_aligned(dst
, len
, &aes
->dst
);
471 if (!src_aligned
|| !dst_aligned
) {
472 padlen
= mtk_aes_padlen(len
);
474 if (len
+ padlen
> AES_BUF_SIZE
)
475 return mtk_aes_complete(cryp
, aes
, -ENOMEM
);
478 sg_copy_to_buffer(src
, sg_nents(src
), aes
->buf
, len
);
479 aes
->src
.sg
= &aes
->aligned_sg
;
481 aes
->src
.remainder
= 0;
485 aes
->dst
.sg
= &aes
->aligned_sg
;
487 aes
->dst
.remainder
= 0;
490 sg_init_table(&aes
->aligned_sg
, 1);
491 sg_set_buf(&aes
->aligned_sg
, aes
->buf
, len
+ padlen
);
494 mtk_aes_info_init(cryp
, aes
, len
+ padlen
);
496 return mtk_aes_map(cryp
, aes
);
499 static int mtk_aes_handle_queue(struct mtk_cryp
*cryp
, u8 id
,
500 struct crypto_async_request
*new_areq
)
502 struct mtk_aes_rec
*aes
= cryp
->aes
[id
];
503 struct crypto_async_request
*areq
, *backlog
;
504 struct mtk_aes_base_ctx
*ctx
;
508 spin_lock_irqsave(&aes
->lock
, flags
);
510 ret
= crypto_enqueue_request(&aes
->queue
, new_areq
);
511 if (aes
->flags
& AES_FLAGS_BUSY
) {
512 spin_unlock_irqrestore(&aes
->lock
, flags
);
515 backlog
= crypto_get_backlog(&aes
->queue
);
516 areq
= crypto_dequeue_request(&aes
->queue
);
518 aes
->flags
|= AES_FLAGS_BUSY
;
519 spin_unlock_irqrestore(&aes
->lock
, flags
);
525 backlog
->complete(backlog
, -EINPROGRESS
);
527 ctx
= crypto_tfm_ctx(areq
->tfm
);
532 return ctx
->start(cryp
, aes
);
535 static int mtk_aes_transfer_complete(struct mtk_cryp
*cryp
,
536 struct mtk_aes_rec
*aes
)
538 return mtk_aes_complete(cryp
, aes
, 0);
541 static int mtk_aes_start(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
543 struct ablkcipher_request
*req
= ablkcipher_request_cast(aes
->areq
);
544 struct mtk_aes_reqctx
*rctx
= ablkcipher_request_ctx(req
);
546 mtk_aes_set_mode(aes
, rctx
);
547 aes
->resume
= mtk_aes_transfer_complete
;
549 return mtk_aes_dma(cryp
, aes
, req
->src
, req
->dst
, req
->nbytes
);
552 static inline struct mtk_aes_ctr_ctx
*
553 mtk_aes_ctr_ctx_cast(struct mtk_aes_base_ctx
*ctx
)
555 return container_of(ctx
, struct mtk_aes_ctr_ctx
, base
);
558 static int mtk_aes_ctr_transfer(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
560 struct mtk_aes_base_ctx
*ctx
= aes
->ctx
;
561 struct mtk_aes_ctr_ctx
*cctx
= mtk_aes_ctr_ctx_cast(ctx
);
562 struct ablkcipher_request
*req
= ablkcipher_request_cast(aes
->areq
);
563 struct scatterlist
*src
, *dst
;
564 u32 start
, end
, ctr
, blocks
;
566 bool fragmented
= false;
568 /* Check for transfer completion. */
569 cctx
->offset
+= aes
->total
;
570 if (cctx
->offset
>= req
->nbytes
)
571 return mtk_aes_transfer_complete(cryp
, aes
);
573 /* Compute data length. */
574 datalen
= req
->nbytes
- cctx
->offset
;
575 blocks
= DIV_ROUND_UP(datalen
, AES_BLOCK_SIZE
);
576 ctr
= be32_to_cpu(cctx
->iv
[3]);
578 /* Check 32bit counter overflow. */
580 end
= start
+ blocks
- 1;
583 datalen
= AES_BLOCK_SIZE
* -start
;
587 /* Jump to offset. */
588 src
= scatterwalk_ffwd(cctx
->src
, req
->src
, cctx
->offset
);
589 dst
= ((req
->src
== req
->dst
) ? src
:
590 scatterwalk_ffwd(cctx
->dst
, req
->dst
, cctx
->offset
));
592 /* Write IVs into transform state buffer. */
593 mtk_aes_write_state_le(ctx
->info
.state
+ ctx
->keylen
, cctx
->iv
,
596 if (unlikely(fragmented
)) {
598 * Increment the counter manually to cope with the hardware
601 cctx
->iv
[3] = cpu_to_be32(ctr
);
602 crypto_inc((u8
*)cctx
->iv
, AES_BLOCK_SIZE
);
605 return mtk_aes_dma(cryp
, aes
, src
, dst
, datalen
);
608 static int mtk_aes_ctr_start(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
610 struct mtk_aes_ctr_ctx
*cctx
= mtk_aes_ctr_ctx_cast(aes
->ctx
);
611 struct ablkcipher_request
*req
= ablkcipher_request_cast(aes
->areq
);
612 struct mtk_aes_reqctx
*rctx
= ablkcipher_request_ctx(req
);
614 mtk_aes_set_mode(aes
, rctx
);
616 memcpy(cctx
->iv
, req
->info
, AES_BLOCK_SIZE
);
619 aes
->resume
= mtk_aes_ctr_transfer
;
621 return mtk_aes_ctr_transfer(cryp
, aes
);
624 /* Check and set the AES key to transform state buffer */
625 static int mtk_aes_setkey(struct crypto_ablkcipher
*tfm
,
626 const u8
*key
, u32 keylen
)
628 struct mtk_aes_base_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
631 case AES_KEYSIZE_128
:
632 ctx
->keymode
= AES_TFM_128BITS
;
634 case AES_KEYSIZE_192
:
635 ctx
->keymode
= AES_TFM_192BITS
;
637 case AES_KEYSIZE_256
:
638 ctx
->keymode
= AES_TFM_256BITS
;
642 crypto_ablkcipher_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
646 ctx
->keylen
= SIZE_IN_WORDS(keylen
);
647 mtk_aes_write_state_le(ctx
->info
.state
, (const u32
*)key
, keylen
);
652 static int mtk_aes_crypt(struct ablkcipher_request
*req
, u64 mode
)
654 struct mtk_aes_base_ctx
*ctx
;
655 struct mtk_aes_reqctx
*rctx
;
657 ctx
= crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req
));
658 rctx
= ablkcipher_request_ctx(req
);
661 return mtk_aes_handle_queue(ctx
->cryp
, !(mode
& AES_FLAGS_ENCRYPT
),
665 static int mtk_aes_ecb_encrypt(struct ablkcipher_request
*req
)
667 return mtk_aes_crypt(req
, AES_FLAGS_ENCRYPT
| AES_FLAGS_ECB
);
670 static int mtk_aes_ecb_decrypt(struct ablkcipher_request
*req
)
672 return mtk_aes_crypt(req
, AES_FLAGS_ECB
);
675 static int mtk_aes_cbc_encrypt(struct ablkcipher_request
*req
)
677 return mtk_aes_crypt(req
, AES_FLAGS_ENCRYPT
| AES_FLAGS_CBC
);
680 static int mtk_aes_cbc_decrypt(struct ablkcipher_request
*req
)
682 return mtk_aes_crypt(req
, AES_FLAGS_CBC
);
685 static int mtk_aes_ctr_encrypt(struct ablkcipher_request
*req
)
687 return mtk_aes_crypt(req
, AES_FLAGS_ENCRYPT
| AES_FLAGS_CTR
);
690 static int mtk_aes_ctr_decrypt(struct ablkcipher_request
*req
)
692 return mtk_aes_crypt(req
, AES_FLAGS_CTR
);
695 static int mtk_aes_cra_init(struct crypto_tfm
*tfm
)
697 struct mtk_aes_ctx
*ctx
= crypto_tfm_ctx(tfm
);
698 struct mtk_cryp
*cryp
= NULL
;
700 cryp
= mtk_aes_find_dev(&ctx
->base
);
702 pr_err("can't find crypto device\n");
706 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct mtk_aes_reqctx
);
707 ctx
->base
.start
= mtk_aes_start
;
711 static int mtk_aes_ctr_cra_init(struct crypto_tfm
*tfm
)
713 struct mtk_aes_ctx
*ctx
= crypto_tfm_ctx(tfm
);
714 struct mtk_cryp
*cryp
= NULL
;
716 cryp
= mtk_aes_find_dev(&ctx
->base
);
718 pr_err("can't find crypto device\n");
722 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct mtk_aes_reqctx
);
723 ctx
->base
.start
= mtk_aes_ctr_start
;
727 static struct crypto_alg aes_algs
[] = {
729 .cra_name
= "cbc(aes)",
730 .cra_driver_name
= "cbc-aes-mtk",
732 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
734 .cra_init
= mtk_aes_cra_init
,
735 .cra_blocksize
= AES_BLOCK_SIZE
,
736 .cra_ctxsize
= sizeof(struct mtk_aes_ctx
),
737 .cra_alignmask
= 0xf,
738 .cra_type
= &crypto_ablkcipher_type
,
739 .cra_module
= THIS_MODULE
,
740 .cra_u
.ablkcipher
= {
741 .min_keysize
= AES_MIN_KEY_SIZE
,
742 .max_keysize
= AES_MAX_KEY_SIZE
,
743 .setkey
= mtk_aes_setkey
,
744 .encrypt
= mtk_aes_cbc_encrypt
,
745 .decrypt
= mtk_aes_cbc_decrypt
,
746 .ivsize
= AES_BLOCK_SIZE
,
750 .cra_name
= "ecb(aes)",
751 .cra_driver_name
= "ecb-aes-mtk",
753 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
755 .cra_init
= mtk_aes_cra_init
,
756 .cra_blocksize
= AES_BLOCK_SIZE
,
757 .cra_ctxsize
= sizeof(struct mtk_aes_ctx
),
758 .cra_alignmask
= 0xf,
759 .cra_type
= &crypto_ablkcipher_type
,
760 .cra_module
= THIS_MODULE
,
761 .cra_u
.ablkcipher
= {
762 .min_keysize
= AES_MIN_KEY_SIZE
,
763 .max_keysize
= AES_MAX_KEY_SIZE
,
764 .setkey
= mtk_aes_setkey
,
765 .encrypt
= mtk_aes_ecb_encrypt
,
766 .decrypt
= mtk_aes_ecb_decrypt
,
770 .cra_name
= "ctr(aes)",
771 .cra_driver_name
= "ctr-aes-mtk",
773 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
775 .cra_init
= mtk_aes_ctr_cra_init
,
777 .cra_ctxsize
= sizeof(struct mtk_aes_ctr_ctx
),
778 .cra_alignmask
= 0xf,
779 .cra_type
= &crypto_ablkcipher_type
,
780 .cra_module
= THIS_MODULE
,
781 .cra_u
.ablkcipher
= {
782 .min_keysize
= AES_MIN_KEY_SIZE
,
783 .max_keysize
= AES_MAX_KEY_SIZE
,
784 .ivsize
= AES_BLOCK_SIZE
,
785 .setkey
= mtk_aes_setkey
,
786 .encrypt
= mtk_aes_ctr_encrypt
,
787 .decrypt
= mtk_aes_ctr_decrypt
,
792 static inline struct mtk_aes_gcm_ctx
*
793 mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx
*ctx
)
795 return container_of(ctx
, struct mtk_aes_gcm_ctx
, base
);
799 * Engine will verify and compare tag automatically, so we just need
800 * to check returned status which stored in the result descriptor.
802 static int mtk_aes_gcm_tag_verify(struct mtk_cryp
*cryp
,
803 struct mtk_aes_rec
*aes
)
805 u32 status
= cryp
->ring
[aes
->id
]->res_prev
->ct
;
807 return mtk_aes_complete(cryp
, aes
, (status
& AES_AUTH_TAG_ERR
) ?
811 /* Initialize transform information of GCM mode */
812 static void mtk_aes_gcm_info_init(struct mtk_cryp
*cryp
,
813 struct mtk_aes_rec
*aes
,
816 struct aead_request
*req
= aead_request_cast(aes
->areq
);
817 struct mtk_aes_base_ctx
*ctx
= aes
->ctx
;
818 struct mtk_aes_gcm_ctx
*gctx
= mtk_aes_gcm_ctx_cast(ctx
);
819 struct mtk_aes_info
*info
= &ctx
->info
;
820 u32 ivsize
= crypto_aead_ivsize(crypto_aead_reqtfm(req
));
823 ctx
->ct_hdr
= AES_CT_CTRL_HDR
| len
;
825 info
->cmd
[cnt
++] = AES_GCM_CMD0
| cpu_to_le32(req
->assoclen
);
826 info
->cmd
[cnt
++] = AES_GCM_CMD1
| cpu_to_le32(req
->assoclen
);
827 info
->cmd
[cnt
++] = AES_GCM_CMD2
;
828 info
->cmd
[cnt
++] = AES_GCM_CMD3
| cpu_to_le32(gctx
->textlen
);
830 if (aes
->flags
& AES_FLAGS_ENCRYPT
) {
831 info
->cmd
[cnt
++] = AES_GCM_CMD4
| cpu_to_le32(gctx
->authsize
);
832 info
->tfm
[0] = AES_TFM_GCM_OUT
;
834 info
->cmd
[cnt
++] = AES_GCM_CMD5
| cpu_to_le32(gctx
->authsize
);
835 info
->cmd
[cnt
++] = AES_GCM_CMD6
| cpu_to_le32(gctx
->authsize
);
836 info
->tfm
[0] = AES_TFM_GCM_IN
;
840 info
->tfm
[0] |= AES_TFM_GHASH_DIGEST
| AES_TFM_GHASH
| AES_TFM_SIZE(
841 ctx
->keylen
+ SIZE_IN_WORDS(AES_BLOCK_SIZE
+ ivsize
)) |
843 info
->tfm
[1] = AES_TFM_CTR_INIT
| AES_TFM_IV_CTR_MODE
| AES_TFM_3IV
|
846 mtk_aes_write_state_le(info
->state
+ ctx
->keylen
+ SIZE_IN_WORDS(
847 AES_BLOCK_SIZE
), (const u32
*)req
->iv
, ivsize
);
850 static int mtk_aes_gcm_dma(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
,
851 struct scatterlist
*src
, struct scatterlist
*dst
,
854 bool src_aligned
, dst_aligned
;
860 src_aligned
= mtk_aes_check_aligned(src
, len
, &aes
->src
);
862 dst_aligned
= src_aligned
;
864 dst_aligned
= mtk_aes_check_aligned(dst
, len
, &aes
->dst
);
866 if (!src_aligned
|| !dst_aligned
) {
867 if (aes
->total
> AES_BUF_SIZE
)
868 return mtk_aes_complete(cryp
, aes
, -ENOMEM
);
871 sg_copy_to_buffer(src
, sg_nents(src
), aes
->buf
, len
);
872 aes
->src
.sg
= &aes
->aligned_sg
;
874 aes
->src
.remainder
= 0;
878 aes
->dst
.sg
= &aes
->aligned_sg
;
880 aes
->dst
.remainder
= 0;
883 sg_init_table(&aes
->aligned_sg
, 1);
884 sg_set_buf(&aes
->aligned_sg
, aes
->buf
, aes
->total
);
887 mtk_aes_gcm_info_init(cryp
, aes
, len
);
889 return mtk_aes_map(cryp
, aes
);
893 static int mtk_aes_gcm_start(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
895 struct mtk_aes_gcm_ctx
*gctx
= mtk_aes_gcm_ctx_cast(aes
->ctx
);
896 struct aead_request
*req
= aead_request_cast(aes
->areq
);
897 struct mtk_aes_reqctx
*rctx
= aead_request_ctx(req
);
898 u32 len
= req
->assoclen
+ req
->cryptlen
;
900 mtk_aes_set_mode(aes
, rctx
);
902 if (aes
->flags
& AES_FLAGS_ENCRYPT
) {
905 aes
->resume
= mtk_aes_transfer_complete
;
906 /* Compute total process length. */
907 aes
->total
= len
+ gctx
->authsize
;
908 /* Compute text length. */
909 gctx
->textlen
= req
->cryptlen
;
910 /* Hardware will append authenticated tag to output buffer */
911 scatterwalk_map_and_copy(tag
, req
->dst
, len
, gctx
->authsize
, 1);
913 aes
->resume
= mtk_aes_gcm_tag_verify
;
915 gctx
->textlen
= req
->cryptlen
- gctx
->authsize
;
918 return mtk_aes_gcm_dma(cryp
, aes
, req
->src
, req
->dst
, len
);
921 static int mtk_aes_gcm_crypt(struct aead_request
*req
, u64 mode
)
923 struct mtk_aes_base_ctx
*ctx
= crypto_aead_ctx(crypto_aead_reqtfm(req
));
924 struct mtk_aes_gcm_ctx
*gctx
= mtk_aes_gcm_ctx_cast(ctx
);
925 struct mtk_aes_reqctx
*rctx
= aead_request_ctx(req
);
927 /* Empty messages are not supported yet */
928 if (!gctx
->textlen
&& !req
->assoclen
)
931 rctx
->mode
= AES_FLAGS_GCM
| mode
;
933 return mtk_aes_handle_queue(ctx
->cryp
, !!(mode
& AES_FLAGS_ENCRYPT
),
938 * Because of the hardware limitation, we need to pre-calculate key(H)
939 * for the GHASH operation. The result of the encryption operation
940 * need to be stored in the transform state buffer.
942 static int mtk_aes_gcm_setkey(struct crypto_aead
*aead
, const u8
*key
,
945 struct mtk_aes_base_ctx
*ctx
= crypto_aead_ctx(aead
);
946 struct mtk_aes_gcm_ctx
*gctx
= mtk_aes_gcm_ctx_cast(ctx
);
947 struct crypto_skcipher
*ctr
= gctx
->ctr
;
952 struct crypto_wait wait
;
954 struct scatterlist sg
[1];
955 struct skcipher_request req
;
960 case AES_KEYSIZE_128
:
961 ctx
->keymode
= AES_TFM_128BITS
;
963 case AES_KEYSIZE_192
:
964 ctx
->keymode
= AES_TFM_192BITS
;
966 case AES_KEYSIZE_256
:
967 ctx
->keymode
= AES_TFM_256BITS
;
971 crypto_aead_set_flags(aead
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
975 ctx
->keylen
= SIZE_IN_WORDS(keylen
);
977 /* Same as crypto_gcm_setkey() from crypto/gcm.c */
978 crypto_skcipher_clear_flags(ctr
, CRYPTO_TFM_REQ_MASK
);
979 crypto_skcipher_set_flags(ctr
, crypto_aead_get_flags(aead
) &
980 CRYPTO_TFM_REQ_MASK
);
981 err
= crypto_skcipher_setkey(ctr
, key
, keylen
);
982 crypto_aead_set_flags(aead
, crypto_skcipher_get_flags(ctr
) &
983 CRYPTO_TFM_RES_MASK
);
987 data
= kzalloc(sizeof(*data
) + crypto_skcipher_reqsize(ctr
),
992 crypto_init_wait(&data
->wait
);
993 sg_init_one(data
->sg
, &data
->hash
, AES_BLOCK_SIZE
);
994 skcipher_request_set_tfm(&data
->req
, ctr
);
995 skcipher_request_set_callback(&data
->req
, CRYPTO_TFM_REQ_MAY_SLEEP
|
996 CRYPTO_TFM_REQ_MAY_BACKLOG
,
997 crypto_req_done
, &data
->wait
);
998 skcipher_request_set_crypt(&data
->req
, data
->sg
, data
->sg
,
999 AES_BLOCK_SIZE
, data
->iv
);
1001 err
= crypto_wait_req(crypto_skcipher_encrypt(&data
->req
),
1006 /* Write key into state buffer */
1007 mtk_aes_write_state_le(ctx
->info
.state
, (const u32
*)key
, keylen
);
1008 /* Write key(H) into state buffer */
1009 mtk_aes_write_state_be(ctx
->info
.state
+ ctx
->keylen
, data
->hash
,
1016 static int mtk_aes_gcm_setauthsize(struct crypto_aead
*aead
,
1019 struct mtk_aes_base_ctx
*ctx
= crypto_aead_ctx(aead
);
1020 struct mtk_aes_gcm_ctx
*gctx
= mtk_aes_gcm_ctx_cast(ctx
);
1022 /* Same as crypto_gcm_authsize() from crypto/gcm.c */
1032 gctx
->authsize
= authsize
;
1036 static int mtk_aes_gcm_encrypt(struct aead_request
*req
)
1038 return mtk_aes_gcm_crypt(req
, AES_FLAGS_ENCRYPT
);
1041 static int mtk_aes_gcm_decrypt(struct aead_request
*req
)
1043 return mtk_aes_gcm_crypt(req
, 0);
1046 static int mtk_aes_gcm_init(struct crypto_aead
*aead
)
1048 struct mtk_aes_gcm_ctx
*ctx
= crypto_aead_ctx(aead
);
1049 struct mtk_cryp
*cryp
= NULL
;
1051 cryp
= mtk_aes_find_dev(&ctx
->base
);
1053 pr_err("can't find crypto device\n");
1057 ctx
->ctr
= crypto_alloc_skcipher("ctr(aes)", 0,
1059 if (IS_ERR(ctx
->ctr
)) {
1060 pr_err("Error allocating ctr(aes)\n");
1061 return PTR_ERR(ctx
->ctr
);
1064 crypto_aead_set_reqsize(aead
, sizeof(struct mtk_aes_reqctx
));
1065 ctx
->base
.start
= mtk_aes_gcm_start
;
1069 static void mtk_aes_gcm_exit(struct crypto_aead
*aead
)
1071 struct mtk_aes_gcm_ctx
*ctx
= crypto_aead_ctx(aead
);
1073 crypto_free_skcipher(ctx
->ctr
);
1076 static struct aead_alg aes_gcm_alg
= {
1077 .setkey
= mtk_aes_gcm_setkey
,
1078 .setauthsize
= mtk_aes_gcm_setauthsize
,
1079 .encrypt
= mtk_aes_gcm_encrypt
,
1080 .decrypt
= mtk_aes_gcm_decrypt
,
1081 .init
= mtk_aes_gcm_init
,
1082 .exit
= mtk_aes_gcm_exit
,
1083 .ivsize
= GCM_AES_IV_SIZE
,
1084 .maxauthsize
= AES_BLOCK_SIZE
,
1087 .cra_name
= "gcm(aes)",
1088 .cra_driver_name
= "gcm-aes-mtk",
1089 .cra_priority
= 400,
1090 .cra_flags
= CRYPTO_ALG_ASYNC
,
1092 .cra_ctxsize
= sizeof(struct mtk_aes_gcm_ctx
),
1093 .cra_alignmask
= 0xf,
1094 .cra_module
= THIS_MODULE
,
1098 static void mtk_aes_queue_task(unsigned long data
)
1100 struct mtk_aes_rec
*aes
= (struct mtk_aes_rec
*)data
;
1102 mtk_aes_handle_queue(aes
->cryp
, aes
->id
, NULL
);
1105 static void mtk_aes_done_task(unsigned long data
)
1107 struct mtk_aes_rec
*aes
= (struct mtk_aes_rec
*)data
;
1108 struct mtk_cryp
*cryp
= aes
->cryp
;
1110 mtk_aes_unmap(cryp
, aes
);
1111 aes
->resume(cryp
, aes
);
1114 static irqreturn_t
mtk_aes_irq(int irq
, void *dev_id
)
1116 struct mtk_aes_rec
*aes
= (struct mtk_aes_rec
*)dev_id
;
1117 struct mtk_cryp
*cryp
= aes
->cryp
;
1118 u32 val
= mtk_aes_read(cryp
, RDR_STAT(aes
->id
));
1120 mtk_aes_write(cryp
, RDR_STAT(aes
->id
), val
);
1122 if (likely(AES_FLAGS_BUSY
& aes
->flags
)) {
1123 mtk_aes_write(cryp
, RDR_PROC_COUNT(aes
->id
), MTK_CNT_RST
);
1124 mtk_aes_write(cryp
, RDR_THRESH(aes
->id
),
1125 MTK_RDR_PROC_THRESH
| MTK_RDR_PROC_MODE
);
1127 tasklet_schedule(&aes
->done_task
);
1129 dev_warn(cryp
->dev
, "AES interrupt when no active requests.\n");
1135 * The purpose of creating encryption and decryption records is
1136 * to process outbound/inbound data in parallel, it can improve
1137 * performance in most use cases, such as IPSec VPN, especially
1138 * under heavy network traffic.
1140 static int mtk_aes_record_init(struct mtk_cryp
*cryp
)
1142 struct mtk_aes_rec
**aes
= cryp
->aes
;
1143 int i
, err
= -ENOMEM
;
1145 for (i
= 0; i
< MTK_REC_NUM
; i
++) {
1146 aes
[i
] = kzalloc(sizeof(**aes
), GFP_KERNEL
);
1150 aes
[i
]->buf
= (void *)__get_free_pages(GFP_KERNEL
,
1155 aes
[i
]->cryp
= cryp
;
1157 spin_lock_init(&aes
[i
]->lock
);
1158 crypto_init_queue(&aes
[i
]->queue
, AES_QUEUE_SIZE
);
1160 tasklet_init(&aes
[i
]->queue_task
, mtk_aes_queue_task
,
1161 (unsigned long)aes
[i
]);
1162 tasklet_init(&aes
[i
]->done_task
, mtk_aes_done_task
,
1163 (unsigned long)aes
[i
]);
1166 /* Link to ring0 and ring1 respectively */
1167 aes
[0]->id
= MTK_RING0
;
1168 aes
[1]->id
= MTK_RING1
;
1174 free_page((unsigned long)aes
[i
]->buf
);
1181 static void mtk_aes_record_free(struct mtk_cryp
*cryp
)
1185 for (i
= 0; i
< MTK_REC_NUM
; i
++) {
1186 tasklet_kill(&cryp
->aes
[i
]->done_task
);
1187 tasklet_kill(&cryp
->aes
[i
]->queue_task
);
1189 free_page((unsigned long)cryp
->aes
[i
]->buf
);
1190 kfree(cryp
->aes
[i
]);
1194 static void mtk_aes_unregister_algs(void)
1198 crypto_unregister_aead(&aes_gcm_alg
);
1200 for (i
= 0; i
< ARRAY_SIZE(aes_algs
); i
++)
1201 crypto_unregister_alg(&aes_algs
[i
]);
1204 static int mtk_aes_register_algs(void)
1208 for (i
= 0; i
< ARRAY_SIZE(aes_algs
); i
++) {
1209 err
= crypto_register_alg(&aes_algs
[i
]);
1214 err
= crypto_register_aead(&aes_gcm_alg
);
1222 crypto_unregister_alg(&aes_algs
[i
]);
1227 int mtk_cipher_alg_register(struct mtk_cryp
*cryp
)
1231 INIT_LIST_HEAD(&cryp
->aes_list
);
1233 /* Initialize two cipher records */
1234 ret
= mtk_aes_record_init(cryp
);
1238 ret
= devm_request_irq(cryp
->dev
, cryp
->irq
[MTK_RING0
], mtk_aes_irq
,
1239 0, "mtk-aes", cryp
->aes
[0]);
1241 dev_err(cryp
->dev
, "unable to request AES irq.\n");
1245 ret
= devm_request_irq(cryp
->dev
, cryp
->irq
[MTK_RING1
], mtk_aes_irq
,
1246 0, "mtk-aes", cryp
->aes
[1]);
1248 dev_err(cryp
->dev
, "unable to request AES irq.\n");
1252 /* Enable ring0 and ring1 interrupt */
1253 mtk_aes_write(cryp
, AIC_ENABLE_SET(MTK_RING0
), MTK_IRQ_RDR0
);
1254 mtk_aes_write(cryp
, AIC_ENABLE_SET(MTK_RING1
), MTK_IRQ_RDR1
);
1256 spin_lock(&mtk_aes
.lock
);
1257 list_add_tail(&cryp
->aes_list
, &mtk_aes
.dev_list
);
1258 spin_unlock(&mtk_aes
.lock
);
1260 ret
= mtk_aes_register_algs();
1267 spin_lock(&mtk_aes
.lock
);
1268 list_del(&cryp
->aes_list
);
1269 spin_unlock(&mtk_aes
.lock
);
1271 mtk_aes_record_free(cryp
);
1274 dev_err(cryp
->dev
, "mtk-aes initialization failed.\n");
1278 void mtk_cipher_alg_release(struct mtk_cryp
*cryp
)
1280 spin_lock(&mtk_aes
.lock
);
1281 list_del(&cryp
->aes_list
);
1282 spin_unlock(&mtk_aes
.lock
);
1284 mtk_aes_unregister_algs();
1285 mtk_aes_record_free(cryp
);