4 * Driver for EIP97 AES acceleration.
6 * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * Some ideas are from atmel-aes.c drivers.
15 #include <crypto/aes.h>
16 #include <crypto/gcm.h>
17 #include "mtk-platform.h"
19 #define AES_QUEUE_SIZE 512
20 #define AES_BUF_ORDER 2
21 #define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \
22 & ~(AES_BLOCK_SIZE - 1))
23 #define AES_MAX_STATE_BUF_SIZE SIZE_IN_WORDS(AES_KEYSIZE_256 + \
25 #define AES_MAX_CT_SIZE 6
27 #define AES_CT_CTRL_HDR cpu_to_le32(0x00220000)
29 /* AES-CBC/ECB/CTR command token */
30 #define AES_CMD0 cpu_to_le32(0x05000000)
31 #define AES_CMD1 cpu_to_le32(0x2d060000)
32 #define AES_CMD2 cpu_to_le32(0xe4a63806)
33 /* AES-GCM command token */
34 #define AES_GCM_CMD0 cpu_to_le32(0x0b000000)
35 #define AES_GCM_CMD1 cpu_to_le32(0xa0800000)
36 #define AES_GCM_CMD2 cpu_to_le32(0x25000010)
37 #define AES_GCM_CMD3 cpu_to_le32(0x0f020000)
38 #define AES_GCM_CMD4 cpu_to_le32(0x21e60000)
39 #define AES_GCM_CMD5 cpu_to_le32(0x40e60000)
40 #define AES_GCM_CMD6 cpu_to_le32(0xd0070000)
42 /* AES transform information word 0 fields */
43 #define AES_TFM_BASIC_OUT cpu_to_le32(0x4 << 0)
44 #define AES_TFM_BASIC_IN cpu_to_le32(0x5 << 0)
45 #define AES_TFM_GCM_OUT cpu_to_le32(0x6 << 0)
46 #define AES_TFM_GCM_IN cpu_to_le32(0xf << 0)
47 #define AES_TFM_SIZE(x) cpu_to_le32((x) << 8)
48 #define AES_TFM_128BITS cpu_to_le32(0xb << 16)
49 #define AES_TFM_192BITS cpu_to_le32(0xd << 16)
50 #define AES_TFM_256BITS cpu_to_le32(0xf << 16)
51 #define AES_TFM_GHASH_DIGEST cpu_to_le32(0x2 << 21)
52 #define AES_TFM_GHASH cpu_to_le32(0x4 << 23)
53 /* AES transform information word 1 fields */
54 #define AES_TFM_ECB cpu_to_le32(0x0 << 0)
55 #define AES_TFM_CBC cpu_to_le32(0x1 << 0)
56 #define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */
57 #define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */
58 #define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */
59 #define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */
60 #define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10)
61 #define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17)
64 #define AES_FLAGS_CIPHER_MSK GENMASK(2, 0)
65 #define AES_FLAGS_ECB BIT(0)
66 #define AES_FLAGS_CBC BIT(1)
67 #define AES_FLAGS_CTR BIT(2)
68 #define AES_FLAGS_GCM BIT(3)
69 #define AES_FLAGS_ENCRYPT BIT(4)
70 #define AES_FLAGS_BUSY BIT(5)
72 #define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26))
75 * mtk_aes_info - hardware information of AES
76 * @cmd: command token, hardware instruction
77 * @tfm: transform state of cipher algorithm.
78 * @state: contains keys and initial vectors.
80 * Memory layout of GCM buffer:
82 * | AES KEY | 128/196/256 bits
84 * | HASH KEY | a string 128 zero bits encrypted using the block cipher
89 * The engine requires all these info to do:
90 * - Commands decoding and control of the engine's data path.
91 * - Coordinating hardware data fetch and store operations.
92 * - Result token construction and output.
95 __le32 cmd
[AES_MAX_CT_SIZE
];
97 __le32 state
[AES_MAX_STATE_BUF_SIZE
];
100 struct mtk_aes_reqctx
{
104 struct mtk_aes_base_ctx
{
105 struct mtk_cryp
*cryp
;
111 struct mtk_aes_info info
;
120 struct mtk_aes_base_ctx base
;
123 struct mtk_aes_ctr_ctx
{
124 struct mtk_aes_base_ctx base
;
126 u32 iv
[AES_BLOCK_SIZE
/ sizeof(u32
)];
128 struct scatterlist src
[2];
129 struct scatterlist dst
[2];
132 struct mtk_aes_gcm_ctx
{
133 struct mtk_aes_base_ctx base
;
138 struct crypto_skcipher
*ctr
;
142 struct list_head dev_list
;
143 /* Device list lock */
147 static struct mtk_aes_drv mtk_aes
= {
148 .dev_list
= LIST_HEAD_INIT(mtk_aes
.dev_list
),
149 .lock
= __SPIN_LOCK_UNLOCKED(mtk_aes
.lock
),
152 static inline u32
mtk_aes_read(struct mtk_cryp
*cryp
, u32 offset
)
154 return readl_relaxed(cryp
->base
+ offset
);
157 static inline void mtk_aes_write(struct mtk_cryp
*cryp
,
158 u32 offset
, u32 value
)
160 writel_relaxed(value
, cryp
->base
+ offset
);
163 static struct mtk_cryp
*mtk_aes_find_dev(struct mtk_aes_base_ctx
*ctx
)
165 struct mtk_cryp
*cryp
= NULL
;
166 struct mtk_cryp
*tmp
;
168 spin_lock_bh(&mtk_aes
.lock
);
170 list_for_each_entry(tmp
, &mtk_aes
.dev_list
, aes_list
) {
178 spin_unlock_bh(&mtk_aes
.lock
);
183 static inline size_t mtk_aes_padlen(size_t len
)
185 len
&= AES_BLOCK_SIZE
- 1;
186 return len
? AES_BLOCK_SIZE
- len
: 0;
189 static bool mtk_aes_check_aligned(struct scatterlist
*sg
, size_t len
,
190 struct mtk_aes_dma
*dma
)
194 if (!IS_ALIGNED(len
, AES_BLOCK_SIZE
))
197 for (nents
= 0; sg
; sg
= sg_next(sg
), ++nents
) {
198 if (!IS_ALIGNED(sg
->offset
, sizeof(u32
)))
201 if (len
<= sg
->length
) {
202 if (!IS_ALIGNED(len
, AES_BLOCK_SIZE
))
205 dma
->nents
= nents
+ 1;
206 dma
->remainder
= sg
->length
- len
;
211 if (!IS_ALIGNED(sg
->length
, AES_BLOCK_SIZE
))
220 static inline void mtk_aes_set_mode(struct mtk_aes_rec
*aes
,
221 const struct mtk_aes_reqctx
*rctx
)
223 /* Clear all but persistent flags and set request flags. */
224 aes
->flags
= (aes
->flags
& AES_FLAGS_BUSY
) | rctx
->mode
;
227 static inline void mtk_aes_restore_sg(const struct mtk_aes_dma
*dma
)
229 struct scatterlist
*sg
= dma
->sg
;
230 int nents
= dma
->nents
;
235 while (--nents
> 0 && sg
)
241 sg
->length
+= dma
->remainder
;
244 static inline void mtk_aes_write_state_le(__le32
*dst
, const u32
*src
, u32 size
)
248 for (i
= 0; i
< SIZE_IN_WORDS(size
); i
++)
249 dst
[i
] = cpu_to_le32(src
[i
]);
252 static inline void mtk_aes_write_state_be(__be32
*dst
, const u32
*src
, u32 size
)
256 for (i
= 0; i
< SIZE_IN_WORDS(size
); i
++)
257 dst
[i
] = cpu_to_be32(src
[i
]);
260 static inline int mtk_aes_complete(struct mtk_cryp
*cryp
,
261 struct mtk_aes_rec
*aes
,
264 aes
->flags
&= ~AES_FLAGS_BUSY
;
265 aes
->areq
->complete(aes
->areq
, err
);
266 /* Handle new request */
267 tasklet_schedule(&aes
->queue_task
);
272 * Write descriptors for processing. This will configure the engine, load
273 * the transform information and then start the packet processing.
275 static int mtk_aes_xmit(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
277 struct mtk_ring
*ring
= cryp
->ring
[aes
->id
];
278 struct mtk_desc
*cmd
= NULL
, *res
= NULL
;
279 struct scatterlist
*ssg
= aes
->src
.sg
, *dsg
= aes
->dst
.sg
;
280 u32 slen
= aes
->src
.sg_len
, dlen
= aes
->dst
.sg_len
;
283 /* Write command descriptors */
284 for (nents
= 0; nents
< slen
; ++nents
, ssg
= sg_next(ssg
)) {
285 cmd
= ring
->cmd_next
;
286 cmd
->hdr
= MTK_DESC_BUF_LEN(ssg
->length
);
287 cmd
->buf
= cpu_to_le32(sg_dma_address(ssg
));
290 cmd
->hdr
|= MTK_DESC_FIRST
|
291 MTK_DESC_CT_LEN(aes
->ctx
->ct_size
);
292 cmd
->ct
= cpu_to_le32(aes
->ctx
->ct_dma
);
293 cmd
->ct_hdr
= aes
->ctx
->ct_hdr
;
294 cmd
->tfm
= cpu_to_le32(aes
->ctx
->tfm_dma
);
297 /* Shift ring buffer and check boundary */
298 if (++ring
->cmd_next
== ring
->cmd_base
+ MTK_DESC_NUM
)
299 ring
->cmd_next
= ring
->cmd_base
;
301 cmd
->hdr
|= MTK_DESC_LAST
;
303 /* Prepare result descriptors */
304 for (nents
= 0; nents
< dlen
; ++nents
, dsg
= sg_next(dsg
)) {
305 res
= ring
->res_next
;
306 res
->hdr
= MTK_DESC_BUF_LEN(dsg
->length
);
307 res
->buf
= cpu_to_le32(sg_dma_address(dsg
));
310 res
->hdr
|= MTK_DESC_FIRST
;
312 /* Shift ring buffer and check boundary */
313 if (++ring
->res_next
== ring
->res_base
+ MTK_DESC_NUM
)
314 ring
->res_next
= ring
->res_base
;
316 res
->hdr
|= MTK_DESC_LAST
;
318 /* Pointer to current result descriptor */
319 ring
->res_prev
= res
;
321 /* Prepare enough space for authenticated tag */
322 if (aes
->flags
& AES_FLAGS_GCM
)
323 res
->hdr
+= AES_BLOCK_SIZE
;
326 * Make sure that all changes to the DMA ring are done before we
330 /* Start DMA transfer */
331 mtk_aes_write(cryp
, RDR_PREP_COUNT(aes
->id
), MTK_DESC_CNT(dlen
));
332 mtk_aes_write(cryp
, CDR_PREP_COUNT(aes
->id
), MTK_DESC_CNT(slen
));
337 static void mtk_aes_unmap(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
339 struct mtk_aes_base_ctx
*ctx
= aes
->ctx
;
341 dma_unmap_single(cryp
->dev
, ctx
->ct_dma
, sizeof(ctx
->info
),
344 if (aes
->src
.sg
== aes
->dst
.sg
) {
345 dma_unmap_sg(cryp
->dev
, aes
->src
.sg
, aes
->src
.nents
,
348 if (aes
->src
.sg
!= &aes
->aligned_sg
)
349 mtk_aes_restore_sg(&aes
->src
);
351 dma_unmap_sg(cryp
->dev
, aes
->dst
.sg
, aes
->dst
.nents
,
354 if (aes
->dst
.sg
!= &aes
->aligned_sg
)
355 mtk_aes_restore_sg(&aes
->dst
);
357 dma_unmap_sg(cryp
->dev
, aes
->src
.sg
, aes
->src
.nents
,
360 if (aes
->src
.sg
!= &aes
->aligned_sg
)
361 mtk_aes_restore_sg(&aes
->src
);
364 if (aes
->dst
.sg
== &aes
->aligned_sg
)
365 sg_copy_from_buffer(aes
->real_dst
, sg_nents(aes
->real_dst
),
366 aes
->buf
, aes
->total
);
369 static int mtk_aes_map(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
371 struct mtk_aes_base_ctx
*ctx
= aes
->ctx
;
372 struct mtk_aes_info
*info
= &ctx
->info
;
374 ctx
->ct_dma
= dma_map_single(cryp
->dev
, info
, sizeof(*info
),
376 if (unlikely(dma_mapping_error(cryp
->dev
, ctx
->ct_dma
)))
379 ctx
->tfm_dma
= ctx
->ct_dma
+ sizeof(info
->cmd
);
381 if (aes
->src
.sg
== aes
->dst
.sg
) {
382 aes
->src
.sg_len
= dma_map_sg(cryp
->dev
, aes
->src
.sg
,
385 aes
->dst
.sg_len
= aes
->src
.sg_len
;
386 if (unlikely(!aes
->src
.sg_len
))
389 aes
->src
.sg_len
= dma_map_sg(cryp
->dev
, aes
->src
.sg
,
390 aes
->src
.nents
, DMA_TO_DEVICE
);
391 if (unlikely(!aes
->src
.sg_len
))
394 aes
->dst
.sg_len
= dma_map_sg(cryp
->dev
, aes
->dst
.sg
,
395 aes
->dst
.nents
, DMA_FROM_DEVICE
);
396 if (unlikely(!aes
->dst
.sg_len
)) {
397 dma_unmap_sg(cryp
->dev
, aes
->src
.sg
, aes
->src
.nents
,
403 return mtk_aes_xmit(cryp
, aes
);
406 dma_unmap_single(cryp
->dev
, ctx
->ct_dma
, sizeof(*info
), DMA_TO_DEVICE
);
408 return mtk_aes_complete(cryp
, aes
, -EINVAL
);
411 /* Initialize transform information of CBC/ECB/CTR mode */
412 static void mtk_aes_info_init(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
,
415 struct ablkcipher_request
*req
= ablkcipher_request_cast(aes
->areq
);
416 struct mtk_aes_base_ctx
*ctx
= aes
->ctx
;
417 struct mtk_aes_info
*info
= &ctx
->info
;
420 ctx
->ct_hdr
= AES_CT_CTRL_HDR
| cpu_to_le32(len
);
421 info
->cmd
[cnt
++] = AES_CMD0
| cpu_to_le32(len
);
422 info
->cmd
[cnt
++] = AES_CMD1
;
424 info
->tfm
[0] = AES_TFM_SIZE(ctx
->keylen
) | ctx
->keymode
;
425 if (aes
->flags
& AES_FLAGS_ENCRYPT
)
426 info
->tfm
[0] |= AES_TFM_BASIC_OUT
;
428 info
->tfm
[0] |= AES_TFM_BASIC_IN
;
430 switch (aes
->flags
& AES_FLAGS_CIPHER_MSK
) {
432 info
->tfm
[1] = AES_TFM_CBC
;
435 info
->tfm
[1] = AES_TFM_ECB
;
438 info
->tfm
[1] = AES_TFM_CTR_LOAD
;
442 /* Should not happen... */
446 mtk_aes_write_state_le(info
->state
+ ctx
->keylen
, req
->info
,
449 info
->tfm
[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE
));
450 info
->tfm
[1] |= AES_TFM_FULL_IV
;
451 info
->cmd
[cnt
++] = AES_CMD2
;
456 static int mtk_aes_dma(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
,
457 struct scatterlist
*src
, struct scatterlist
*dst
,
461 bool src_aligned
, dst_aligned
;
468 src_aligned
= mtk_aes_check_aligned(src
, len
, &aes
->src
);
470 dst_aligned
= src_aligned
;
472 dst_aligned
= mtk_aes_check_aligned(dst
, len
, &aes
->dst
);
474 if (!src_aligned
|| !dst_aligned
) {
475 padlen
= mtk_aes_padlen(len
);
477 if (len
+ padlen
> AES_BUF_SIZE
)
478 return mtk_aes_complete(cryp
, aes
, -ENOMEM
);
481 sg_copy_to_buffer(src
, sg_nents(src
), aes
->buf
, len
);
482 aes
->src
.sg
= &aes
->aligned_sg
;
484 aes
->src
.remainder
= 0;
488 aes
->dst
.sg
= &aes
->aligned_sg
;
490 aes
->dst
.remainder
= 0;
493 sg_init_table(&aes
->aligned_sg
, 1);
494 sg_set_buf(&aes
->aligned_sg
, aes
->buf
, len
+ padlen
);
497 mtk_aes_info_init(cryp
, aes
, len
+ padlen
);
499 return mtk_aes_map(cryp
, aes
);
502 static int mtk_aes_handle_queue(struct mtk_cryp
*cryp
, u8 id
,
503 struct crypto_async_request
*new_areq
)
505 struct mtk_aes_rec
*aes
= cryp
->aes
[id
];
506 struct crypto_async_request
*areq
, *backlog
;
507 struct mtk_aes_base_ctx
*ctx
;
511 spin_lock_irqsave(&aes
->lock
, flags
);
513 ret
= crypto_enqueue_request(&aes
->queue
, new_areq
);
514 if (aes
->flags
& AES_FLAGS_BUSY
) {
515 spin_unlock_irqrestore(&aes
->lock
, flags
);
518 backlog
= crypto_get_backlog(&aes
->queue
);
519 areq
= crypto_dequeue_request(&aes
->queue
);
521 aes
->flags
|= AES_FLAGS_BUSY
;
522 spin_unlock_irqrestore(&aes
->lock
, flags
);
528 backlog
->complete(backlog
, -EINPROGRESS
);
530 ctx
= crypto_tfm_ctx(areq
->tfm
);
535 return ctx
->start(cryp
, aes
);
538 static int mtk_aes_transfer_complete(struct mtk_cryp
*cryp
,
539 struct mtk_aes_rec
*aes
)
541 return mtk_aes_complete(cryp
, aes
, 0);
544 static int mtk_aes_start(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
546 struct ablkcipher_request
*req
= ablkcipher_request_cast(aes
->areq
);
547 struct mtk_aes_reqctx
*rctx
= ablkcipher_request_ctx(req
);
549 mtk_aes_set_mode(aes
, rctx
);
550 aes
->resume
= mtk_aes_transfer_complete
;
552 return mtk_aes_dma(cryp
, aes
, req
->src
, req
->dst
, req
->nbytes
);
555 static inline struct mtk_aes_ctr_ctx
*
556 mtk_aes_ctr_ctx_cast(struct mtk_aes_base_ctx
*ctx
)
558 return container_of(ctx
, struct mtk_aes_ctr_ctx
, base
);
561 static int mtk_aes_ctr_transfer(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
563 struct mtk_aes_base_ctx
*ctx
= aes
->ctx
;
564 struct mtk_aes_ctr_ctx
*cctx
= mtk_aes_ctr_ctx_cast(ctx
);
565 struct ablkcipher_request
*req
= ablkcipher_request_cast(aes
->areq
);
566 struct scatterlist
*src
, *dst
;
567 u32 start
, end
, ctr
, blocks
;
569 bool fragmented
= false;
571 /* Check for transfer completion. */
572 cctx
->offset
+= aes
->total
;
573 if (cctx
->offset
>= req
->nbytes
)
574 return mtk_aes_transfer_complete(cryp
, aes
);
576 /* Compute data length. */
577 datalen
= req
->nbytes
- cctx
->offset
;
578 blocks
= DIV_ROUND_UP(datalen
, AES_BLOCK_SIZE
);
579 ctr
= be32_to_cpu(cctx
->iv
[3]);
581 /* Check 32bit counter overflow. */
583 end
= start
+ blocks
- 1;
586 datalen
= AES_BLOCK_SIZE
* -start
;
590 /* Jump to offset. */
591 src
= scatterwalk_ffwd(cctx
->src
, req
->src
, cctx
->offset
);
592 dst
= ((req
->src
== req
->dst
) ? src
:
593 scatterwalk_ffwd(cctx
->dst
, req
->dst
, cctx
->offset
));
595 /* Write IVs into transform state buffer. */
596 mtk_aes_write_state_le(ctx
->info
.state
+ ctx
->keylen
, cctx
->iv
,
599 if (unlikely(fragmented
)) {
601 * Increment the counter manually to cope with the hardware
604 cctx
->iv
[3] = cpu_to_be32(ctr
);
605 crypto_inc((u8
*)cctx
->iv
, AES_BLOCK_SIZE
);
608 return mtk_aes_dma(cryp
, aes
, src
, dst
, datalen
);
611 static int mtk_aes_ctr_start(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
613 struct mtk_aes_ctr_ctx
*cctx
= mtk_aes_ctr_ctx_cast(aes
->ctx
);
614 struct ablkcipher_request
*req
= ablkcipher_request_cast(aes
->areq
);
615 struct mtk_aes_reqctx
*rctx
= ablkcipher_request_ctx(req
);
617 mtk_aes_set_mode(aes
, rctx
);
619 memcpy(cctx
->iv
, req
->info
, AES_BLOCK_SIZE
);
622 aes
->resume
= mtk_aes_ctr_transfer
;
624 return mtk_aes_ctr_transfer(cryp
, aes
);
627 /* Check and set the AES key to transform state buffer */
628 static int mtk_aes_setkey(struct crypto_ablkcipher
*tfm
,
629 const u8
*key
, u32 keylen
)
631 struct mtk_aes_base_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
634 case AES_KEYSIZE_128
:
635 ctx
->keymode
= AES_TFM_128BITS
;
637 case AES_KEYSIZE_192
:
638 ctx
->keymode
= AES_TFM_192BITS
;
640 case AES_KEYSIZE_256
:
641 ctx
->keymode
= AES_TFM_256BITS
;
645 crypto_ablkcipher_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
649 ctx
->keylen
= SIZE_IN_WORDS(keylen
);
650 mtk_aes_write_state_le(ctx
->info
.state
, (const u32
*)key
, keylen
);
655 static int mtk_aes_crypt(struct ablkcipher_request
*req
, u64 mode
)
657 struct mtk_aes_base_ctx
*ctx
;
658 struct mtk_aes_reqctx
*rctx
;
660 ctx
= crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req
));
661 rctx
= ablkcipher_request_ctx(req
);
664 return mtk_aes_handle_queue(ctx
->cryp
, !(mode
& AES_FLAGS_ENCRYPT
),
668 static int mtk_aes_ecb_encrypt(struct ablkcipher_request
*req
)
670 return mtk_aes_crypt(req
, AES_FLAGS_ENCRYPT
| AES_FLAGS_ECB
);
673 static int mtk_aes_ecb_decrypt(struct ablkcipher_request
*req
)
675 return mtk_aes_crypt(req
, AES_FLAGS_ECB
);
678 static int mtk_aes_cbc_encrypt(struct ablkcipher_request
*req
)
680 return mtk_aes_crypt(req
, AES_FLAGS_ENCRYPT
| AES_FLAGS_CBC
);
683 static int mtk_aes_cbc_decrypt(struct ablkcipher_request
*req
)
685 return mtk_aes_crypt(req
, AES_FLAGS_CBC
);
688 static int mtk_aes_ctr_encrypt(struct ablkcipher_request
*req
)
690 return mtk_aes_crypt(req
, AES_FLAGS_ENCRYPT
| AES_FLAGS_CTR
);
693 static int mtk_aes_ctr_decrypt(struct ablkcipher_request
*req
)
695 return mtk_aes_crypt(req
, AES_FLAGS_CTR
);
698 static int mtk_aes_cra_init(struct crypto_tfm
*tfm
)
700 struct mtk_aes_ctx
*ctx
= crypto_tfm_ctx(tfm
);
701 struct mtk_cryp
*cryp
= NULL
;
703 cryp
= mtk_aes_find_dev(&ctx
->base
);
705 pr_err("can't find crypto device\n");
709 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct mtk_aes_reqctx
);
710 ctx
->base
.start
= mtk_aes_start
;
714 static int mtk_aes_ctr_cra_init(struct crypto_tfm
*tfm
)
716 struct mtk_aes_ctx
*ctx
= crypto_tfm_ctx(tfm
);
717 struct mtk_cryp
*cryp
= NULL
;
719 cryp
= mtk_aes_find_dev(&ctx
->base
);
721 pr_err("can't find crypto device\n");
725 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct mtk_aes_reqctx
);
726 ctx
->base
.start
= mtk_aes_ctr_start
;
730 static struct crypto_alg aes_algs
[] = {
732 .cra_name
= "cbc(aes)",
733 .cra_driver_name
= "cbc-aes-mtk",
735 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
737 .cra_init
= mtk_aes_cra_init
,
738 .cra_blocksize
= AES_BLOCK_SIZE
,
739 .cra_ctxsize
= sizeof(struct mtk_aes_ctx
),
740 .cra_alignmask
= 0xf,
741 .cra_type
= &crypto_ablkcipher_type
,
742 .cra_module
= THIS_MODULE
,
743 .cra_u
.ablkcipher
= {
744 .min_keysize
= AES_MIN_KEY_SIZE
,
745 .max_keysize
= AES_MAX_KEY_SIZE
,
746 .setkey
= mtk_aes_setkey
,
747 .encrypt
= mtk_aes_cbc_encrypt
,
748 .decrypt
= mtk_aes_cbc_decrypt
,
749 .ivsize
= AES_BLOCK_SIZE
,
753 .cra_name
= "ecb(aes)",
754 .cra_driver_name
= "ecb-aes-mtk",
756 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
758 .cra_init
= mtk_aes_cra_init
,
759 .cra_blocksize
= AES_BLOCK_SIZE
,
760 .cra_ctxsize
= sizeof(struct mtk_aes_ctx
),
761 .cra_alignmask
= 0xf,
762 .cra_type
= &crypto_ablkcipher_type
,
763 .cra_module
= THIS_MODULE
,
764 .cra_u
.ablkcipher
= {
765 .min_keysize
= AES_MIN_KEY_SIZE
,
766 .max_keysize
= AES_MAX_KEY_SIZE
,
767 .setkey
= mtk_aes_setkey
,
768 .encrypt
= mtk_aes_ecb_encrypt
,
769 .decrypt
= mtk_aes_ecb_decrypt
,
773 .cra_name
= "ctr(aes)",
774 .cra_driver_name
= "ctr-aes-mtk",
776 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
778 .cra_init
= mtk_aes_ctr_cra_init
,
780 .cra_ctxsize
= sizeof(struct mtk_aes_ctr_ctx
),
781 .cra_alignmask
= 0xf,
782 .cra_type
= &crypto_ablkcipher_type
,
783 .cra_module
= THIS_MODULE
,
784 .cra_u
.ablkcipher
= {
785 .min_keysize
= AES_MIN_KEY_SIZE
,
786 .max_keysize
= AES_MAX_KEY_SIZE
,
787 .ivsize
= AES_BLOCK_SIZE
,
788 .setkey
= mtk_aes_setkey
,
789 .encrypt
= mtk_aes_ctr_encrypt
,
790 .decrypt
= mtk_aes_ctr_decrypt
,
795 static inline struct mtk_aes_gcm_ctx
*
796 mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx
*ctx
)
798 return container_of(ctx
, struct mtk_aes_gcm_ctx
, base
);
802 * Engine will verify and compare tag automatically, so we just need
803 * to check returned status which stored in the result descriptor.
805 static int mtk_aes_gcm_tag_verify(struct mtk_cryp
*cryp
,
806 struct mtk_aes_rec
*aes
)
808 u32 status
= cryp
->ring
[aes
->id
]->res_prev
->ct
;
810 return mtk_aes_complete(cryp
, aes
, (status
& AES_AUTH_TAG_ERR
) ?
814 /* Initialize transform information of GCM mode */
815 static void mtk_aes_gcm_info_init(struct mtk_cryp
*cryp
,
816 struct mtk_aes_rec
*aes
,
819 struct aead_request
*req
= aead_request_cast(aes
->areq
);
820 struct mtk_aes_base_ctx
*ctx
= aes
->ctx
;
821 struct mtk_aes_gcm_ctx
*gctx
= mtk_aes_gcm_ctx_cast(ctx
);
822 struct mtk_aes_info
*info
= &ctx
->info
;
823 u32 ivsize
= crypto_aead_ivsize(crypto_aead_reqtfm(req
));
826 ctx
->ct_hdr
= AES_CT_CTRL_HDR
| len
;
828 info
->cmd
[cnt
++] = AES_GCM_CMD0
| cpu_to_le32(req
->assoclen
);
829 info
->cmd
[cnt
++] = AES_GCM_CMD1
| cpu_to_le32(req
->assoclen
);
830 info
->cmd
[cnt
++] = AES_GCM_CMD2
;
831 info
->cmd
[cnt
++] = AES_GCM_CMD3
| cpu_to_le32(gctx
->textlen
);
833 if (aes
->flags
& AES_FLAGS_ENCRYPT
) {
834 info
->cmd
[cnt
++] = AES_GCM_CMD4
| cpu_to_le32(gctx
->authsize
);
835 info
->tfm
[0] = AES_TFM_GCM_OUT
;
837 info
->cmd
[cnt
++] = AES_GCM_CMD5
| cpu_to_le32(gctx
->authsize
);
838 info
->cmd
[cnt
++] = AES_GCM_CMD6
| cpu_to_le32(gctx
->authsize
);
839 info
->tfm
[0] = AES_TFM_GCM_IN
;
843 info
->tfm
[0] |= AES_TFM_GHASH_DIGEST
| AES_TFM_GHASH
| AES_TFM_SIZE(
844 ctx
->keylen
+ SIZE_IN_WORDS(AES_BLOCK_SIZE
+ ivsize
)) |
846 info
->tfm
[1] = AES_TFM_CTR_INIT
| AES_TFM_IV_CTR_MODE
| AES_TFM_3IV
|
849 mtk_aes_write_state_le(info
->state
+ ctx
->keylen
+ SIZE_IN_WORDS(
850 AES_BLOCK_SIZE
), (const u32
*)req
->iv
, ivsize
);
853 static int mtk_aes_gcm_dma(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
,
854 struct scatterlist
*src
, struct scatterlist
*dst
,
857 bool src_aligned
, dst_aligned
;
863 src_aligned
= mtk_aes_check_aligned(src
, len
, &aes
->src
);
865 dst_aligned
= src_aligned
;
867 dst_aligned
= mtk_aes_check_aligned(dst
, len
, &aes
->dst
);
869 if (!src_aligned
|| !dst_aligned
) {
870 if (aes
->total
> AES_BUF_SIZE
)
871 return mtk_aes_complete(cryp
, aes
, -ENOMEM
);
874 sg_copy_to_buffer(src
, sg_nents(src
), aes
->buf
, len
);
875 aes
->src
.sg
= &aes
->aligned_sg
;
877 aes
->src
.remainder
= 0;
881 aes
->dst
.sg
= &aes
->aligned_sg
;
883 aes
->dst
.remainder
= 0;
886 sg_init_table(&aes
->aligned_sg
, 1);
887 sg_set_buf(&aes
->aligned_sg
, aes
->buf
, aes
->total
);
890 mtk_aes_gcm_info_init(cryp
, aes
, len
);
892 return mtk_aes_map(cryp
, aes
);
896 static int mtk_aes_gcm_start(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
898 struct mtk_aes_gcm_ctx
*gctx
= mtk_aes_gcm_ctx_cast(aes
->ctx
);
899 struct aead_request
*req
= aead_request_cast(aes
->areq
);
900 struct mtk_aes_reqctx
*rctx
= aead_request_ctx(req
);
901 u32 len
= req
->assoclen
+ req
->cryptlen
;
903 mtk_aes_set_mode(aes
, rctx
);
905 if (aes
->flags
& AES_FLAGS_ENCRYPT
) {
908 aes
->resume
= mtk_aes_transfer_complete
;
909 /* Compute total process length. */
910 aes
->total
= len
+ gctx
->authsize
;
911 /* Compute text length. */
912 gctx
->textlen
= req
->cryptlen
;
913 /* Hardware will append authenticated tag to output buffer */
914 scatterwalk_map_and_copy(tag
, req
->dst
, len
, gctx
->authsize
, 1);
916 aes
->resume
= mtk_aes_gcm_tag_verify
;
918 gctx
->textlen
= req
->cryptlen
- gctx
->authsize
;
921 return mtk_aes_gcm_dma(cryp
, aes
, req
->src
, req
->dst
, len
);
924 static int mtk_aes_gcm_crypt(struct aead_request
*req
, u64 mode
)
926 struct mtk_aes_base_ctx
*ctx
= crypto_aead_ctx(crypto_aead_reqtfm(req
));
927 struct mtk_aes_gcm_ctx
*gctx
= mtk_aes_gcm_ctx_cast(ctx
);
928 struct mtk_aes_reqctx
*rctx
= aead_request_ctx(req
);
930 /* Empty messages are not supported yet */
931 if (!gctx
->textlen
&& !req
->assoclen
)
934 rctx
->mode
= AES_FLAGS_GCM
| mode
;
936 return mtk_aes_handle_queue(ctx
->cryp
, !!(mode
& AES_FLAGS_ENCRYPT
),
941 * Because of the hardware limitation, we need to pre-calculate key(H)
942 * for the GHASH operation. The result of the encryption operation
943 * need to be stored in the transform state buffer.
945 static int mtk_aes_gcm_setkey(struct crypto_aead
*aead
, const u8
*key
,
948 struct mtk_aes_base_ctx
*ctx
= crypto_aead_ctx(aead
);
949 struct mtk_aes_gcm_ctx
*gctx
= mtk_aes_gcm_ctx_cast(ctx
);
950 struct crypto_skcipher
*ctr
= gctx
->ctr
;
955 struct crypto_wait wait
;
957 struct scatterlist sg
[1];
958 struct skcipher_request req
;
963 case AES_KEYSIZE_128
:
964 ctx
->keymode
= AES_TFM_128BITS
;
966 case AES_KEYSIZE_192
:
967 ctx
->keymode
= AES_TFM_192BITS
;
969 case AES_KEYSIZE_256
:
970 ctx
->keymode
= AES_TFM_256BITS
;
974 crypto_aead_set_flags(aead
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
978 ctx
->keylen
= SIZE_IN_WORDS(keylen
);
980 /* Same as crypto_gcm_setkey() from crypto/gcm.c */
981 crypto_skcipher_clear_flags(ctr
, CRYPTO_TFM_REQ_MASK
);
982 crypto_skcipher_set_flags(ctr
, crypto_aead_get_flags(aead
) &
983 CRYPTO_TFM_REQ_MASK
);
984 err
= crypto_skcipher_setkey(ctr
, key
, keylen
);
985 crypto_aead_set_flags(aead
, crypto_skcipher_get_flags(ctr
) &
986 CRYPTO_TFM_RES_MASK
);
990 data
= kzalloc(sizeof(*data
) + crypto_skcipher_reqsize(ctr
),
995 crypto_init_wait(&data
->wait
);
996 sg_init_one(data
->sg
, &data
->hash
, AES_BLOCK_SIZE
);
997 skcipher_request_set_tfm(&data
->req
, ctr
);
998 skcipher_request_set_callback(&data
->req
, CRYPTO_TFM_REQ_MAY_SLEEP
|
999 CRYPTO_TFM_REQ_MAY_BACKLOG
,
1000 crypto_req_done
, &data
->wait
);
1001 skcipher_request_set_crypt(&data
->req
, data
->sg
, data
->sg
,
1002 AES_BLOCK_SIZE
, data
->iv
);
1004 err
= crypto_wait_req(crypto_skcipher_encrypt(&data
->req
),
1009 /* Write key into state buffer */
1010 mtk_aes_write_state_le(ctx
->info
.state
, (const u32
*)key
, keylen
);
1011 /* Write key(H) into state buffer */
1012 mtk_aes_write_state_be(ctx
->info
.state
+ ctx
->keylen
, data
->hash
,
1019 static int mtk_aes_gcm_setauthsize(struct crypto_aead
*aead
,
1022 struct mtk_aes_base_ctx
*ctx
= crypto_aead_ctx(aead
);
1023 struct mtk_aes_gcm_ctx
*gctx
= mtk_aes_gcm_ctx_cast(ctx
);
1025 /* Same as crypto_gcm_authsize() from crypto/gcm.c */
1035 gctx
->authsize
= authsize
;
1039 static int mtk_aes_gcm_encrypt(struct aead_request
*req
)
1041 return mtk_aes_gcm_crypt(req
, AES_FLAGS_ENCRYPT
);
1044 static int mtk_aes_gcm_decrypt(struct aead_request
*req
)
1046 return mtk_aes_gcm_crypt(req
, 0);
1049 static int mtk_aes_gcm_init(struct crypto_aead
*aead
)
1051 struct mtk_aes_gcm_ctx
*ctx
= crypto_aead_ctx(aead
);
1052 struct mtk_cryp
*cryp
= NULL
;
1054 cryp
= mtk_aes_find_dev(&ctx
->base
);
1056 pr_err("can't find crypto device\n");
1060 ctx
->ctr
= crypto_alloc_skcipher("ctr(aes)", 0,
1062 if (IS_ERR(ctx
->ctr
)) {
1063 pr_err("Error allocating ctr(aes)\n");
1064 return PTR_ERR(ctx
->ctr
);
1067 crypto_aead_set_reqsize(aead
, sizeof(struct mtk_aes_reqctx
));
1068 ctx
->base
.start
= mtk_aes_gcm_start
;
1072 static void mtk_aes_gcm_exit(struct crypto_aead
*aead
)
1074 struct mtk_aes_gcm_ctx
*ctx
= crypto_aead_ctx(aead
);
1076 crypto_free_skcipher(ctx
->ctr
);
1079 static struct aead_alg aes_gcm_alg
= {
1080 .setkey
= mtk_aes_gcm_setkey
,
1081 .setauthsize
= mtk_aes_gcm_setauthsize
,
1082 .encrypt
= mtk_aes_gcm_encrypt
,
1083 .decrypt
= mtk_aes_gcm_decrypt
,
1084 .init
= mtk_aes_gcm_init
,
1085 .exit
= mtk_aes_gcm_exit
,
1086 .ivsize
= GCM_AES_IV_SIZE
,
1087 .maxauthsize
= AES_BLOCK_SIZE
,
1090 .cra_name
= "gcm(aes)",
1091 .cra_driver_name
= "gcm-aes-mtk",
1092 .cra_priority
= 400,
1093 .cra_flags
= CRYPTO_ALG_ASYNC
,
1095 .cra_ctxsize
= sizeof(struct mtk_aes_gcm_ctx
),
1096 .cra_alignmask
= 0xf,
1097 .cra_module
= THIS_MODULE
,
1101 static void mtk_aes_queue_task(unsigned long data
)
1103 struct mtk_aes_rec
*aes
= (struct mtk_aes_rec
*)data
;
1105 mtk_aes_handle_queue(aes
->cryp
, aes
->id
, NULL
);
1108 static void mtk_aes_done_task(unsigned long data
)
1110 struct mtk_aes_rec
*aes
= (struct mtk_aes_rec
*)data
;
1111 struct mtk_cryp
*cryp
= aes
->cryp
;
1113 mtk_aes_unmap(cryp
, aes
);
1114 aes
->resume(cryp
, aes
);
1117 static irqreturn_t
mtk_aes_irq(int irq
, void *dev_id
)
1119 struct mtk_aes_rec
*aes
= (struct mtk_aes_rec
*)dev_id
;
1120 struct mtk_cryp
*cryp
= aes
->cryp
;
1121 u32 val
= mtk_aes_read(cryp
, RDR_STAT(aes
->id
));
1123 mtk_aes_write(cryp
, RDR_STAT(aes
->id
), val
);
1125 if (likely(AES_FLAGS_BUSY
& aes
->flags
)) {
1126 mtk_aes_write(cryp
, RDR_PROC_COUNT(aes
->id
), MTK_CNT_RST
);
1127 mtk_aes_write(cryp
, RDR_THRESH(aes
->id
),
1128 MTK_RDR_PROC_THRESH
| MTK_RDR_PROC_MODE
);
1130 tasklet_schedule(&aes
->done_task
);
1132 dev_warn(cryp
->dev
, "AES interrupt when no active requests.\n");
1138 * The purpose of creating encryption and decryption records is
1139 * to process outbound/inbound data in parallel, it can improve
1140 * performance in most use cases, such as IPSec VPN, especially
1141 * under heavy network traffic.
1143 static int mtk_aes_record_init(struct mtk_cryp
*cryp
)
1145 struct mtk_aes_rec
**aes
= cryp
->aes
;
1146 int i
, err
= -ENOMEM
;
1148 for (i
= 0; i
< MTK_REC_NUM
; i
++) {
1149 aes
[i
] = kzalloc(sizeof(**aes
), GFP_KERNEL
);
1153 aes
[i
]->buf
= (void *)__get_free_pages(GFP_KERNEL
,
1158 aes
[i
]->cryp
= cryp
;
1160 spin_lock_init(&aes
[i
]->lock
);
1161 crypto_init_queue(&aes
[i
]->queue
, AES_QUEUE_SIZE
);
1163 tasklet_init(&aes
[i
]->queue_task
, mtk_aes_queue_task
,
1164 (unsigned long)aes
[i
]);
1165 tasklet_init(&aes
[i
]->done_task
, mtk_aes_done_task
,
1166 (unsigned long)aes
[i
]);
1169 /* Link to ring0 and ring1 respectively */
1170 aes
[0]->id
= MTK_RING0
;
1171 aes
[1]->id
= MTK_RING1
;
1177 free_page((unsigned long)aes
[i
]->buf
);
1184 static void mtk_aes_record_free(struct mtk_cryp
*cryp
)
1188 for (i
= 0; i
< MTK_REC_NUM
; i
++) {
1189 tasklet_kill(&cryp
->aes
[i
]->done_task
);
1190 tasklet_kill(&cryp
->aes
[i
]->queue_task
);
1192 free_page((unsigned long)cryp
->aes
[i
]->buf
);
1193 kfree(cryp
->aes
[i
]);
1197 static void mtk_aes_unregister_algs(void)
1201 crypto_unregister_aead(&aes_gcm_alg
);
1203 for (i
= 0; i
< ARRAY_SIZE(aes_algs
); i
++)
1204 crypto_unregister_alg(&aes_algs
[i
]);
1207 static int mtk_aes_register_algs(void)
1211 for (i
= 0; i
< ARRAY_SIZE(aes_algs
); i
++) {
1212 err
= crypto_register_alg(&aes_algs
[i
]);
1217 err
= crypto_register_aead(&aes_gcm_alg
);
1225 crypto_unregister_alg(&aes_algs
[i
]);
1230 int mtk_cipher_alg_register(struct mtk_cryp
*cryp
)
1234 INIT_LIST_HEAD(&cryp
->aes_list
);
1236 /* Initialize two cipher records */
1237 ret
= mtk_aes_record_init(cryp
);
1241 ret
= devm_request_irq(cryp
->dev
, cryp
->irq
[MTK_RING0
], mtk_aes_irq
,
1242 0, "mtk-aes", cryp
->aes
[0]);
1244 dev_err(cryp
->dev
, "unable to request AES irq.\n");
1248 ret
= devm_request_irq(cryp
->dev
, cryp
->irq
[MTK_RING1
], mtk_aes_irq
,
1249 0, "mtk-aes", cryp
->aes
[1]);
1251 dev_err(cryp
->dev
, "unable to request AES irq.\n");
1255 /* Enable ring0 and ring1 interrupt */
1256 mtk_aes_write(cryp
, AIC_ENABLE_SET(MTK_RING0
), MTK_IRQ_RDR0
);
1257 mtk_aes_write(cryp
, AIC_ENABLE_SET(MTK_RING1
), MTK_IRQ_RDR1
);
1259 spin_lock(&mtk_aes
.lock
);
1260 list_add_tail(&cryp
->aes_list
, &mtk_aes
.dev_list
);
1261 spin_unlock(&mtk_aes
.lock
);
1263 ret
= mtk_aes_register_algs();
1270 spin_lock(&mtk_aes
.lock
);
1271 list_del(&cryp
->aes_list
);
1272 spin_unlock(&mtk_aes
.lock
);
1274 mtk_aes_record_free(cryp
);
1277 dev_err(cryp
->dev
, "mtk-aes initialization failed.\n");
1281 void mtk_cipher_alg_release(struct mtk_cryp
*cryp
)
1283 spin_lock(&mtk_aes
.lock
);
1284 list_del(&cryp
->aes_list
);
1285 spin_unlock(&mtk_aes
.lock
);
1287 mtk_aes_unregister_algs();
1288 mtk_aes_record_free(cryp
);