1 // SPDX-License-Identifier: GPL-2.0-only
5 * Driver for EIP97 AES acceleration.
7 * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
9 * Some ideas are from atmel-aes.c drivers.
12 #include <crypto/aes.h>
13 #include <crypto/gcm.h>
14 #include <crypto/internal/skcipher.h>
15 #include "mtk-platform.h"
17 #define AES_QUEUE_SIZE 512
18 #define AES_BUF_ORDER 2
19 #define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \
20 & ~(AES_BLOCK_SIZE - 1))
21 #define AES_MAX_STATE_BUF_SIZE SIZE_IN_WORDS(AES_KEYSIZE_256 + \
23 #define AES_MAX_CT_SIZE 6
25 #define AES_CT_CTRL_HDR cpu_to_le32(0x00220000)
27 /* AES-CBC/ECB/CTR/OFB/CFB command token */
28 #define AES_CMD0 cpu_to_le32(0x05000000)
29 #define AES_CMD1 cpu_to_le32(0x2d060000)
30 #define AES_CMD2 cpu_to_le32(0xe4a63806)
31 /* AES-GCM command token */
32 #define AES_GCM_CMD0 cpu_to_le32(0x0b000000)
33 #define AES_GCM_CMD1 cpu_to_le32(0xa0800000)
34 #define AES_GCM_CMD2 cpu_to_le32(0x25000010)
35 #define AES_GCM_CMD3 cpu_to_le32(0x0f020000)
36 #define AES_GCM_CMD4 cpu_to_le32(0x21e60000)
37 #define AES_GCM_CMD5 cpu_to_le32(0x40e60000)
38 #define AES_GCM_CMD6 cpu_to_le32(0xd0070000)
40 /* AES transform information word 0 fields */
41 #define AES_TFM_BASIC_OUT cpu_to_le32(0x4 << 0)
42 #define AES_TFM_BASIC_IN cpu_to_le32(0x5 << 0)
43 #define AES_TFM_GCM_OUT cpu_to_le32(0x6 << 0)
44 #define AES_TFM_GCM_IN cpu_to_le32(0xf << 0)
45 #define AES_TFM_SIZE(x) cpu_to_le32((x) << 8)
46 #define AES_TFM_128BITS cpu_to_le32(0xb << 16)
47 #define AES_TFM_192BITS cpu_to_le32(0xd << 16)
48 #define AES_TFM_256BITS cpu_to_le32(0xf << 16)
49 #define AES_TFM_GHASH_DIGEST cpu_to_le32(0x2 << 21)
50 #define AES_TFM_GHASH cpu_to_le32(0x4 << 23)
51 /* AES transform information word 1 fields */
52 #define AES_TFM_ECB cpu_to_le32(0x0 << 0)
53 #define AES_TFM_CBC cpu_to_le32(0x1 << 0)
54 #define AES_TFM_OFB cpu_to_le32(0x4 << 0)
55 #define AES_TFM_CFB128 cpu_to_le32(0x5 << 0)
56 #define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */
57 #define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */
58 #define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */
59 #define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */
60 #define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10)
61 #define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17)
64 #define AES_FLAGS_CIPHER_MSK GENMASK(4, 0)
65 #define AES_FLAGS_ECB BIT(0)
66 #define AES_FLAGS_CBC BIT(1)
67 #define AES_FLAGS_CTR BIT(2)
68 #define AES_FLAGS_OFB BIT(3)
69 #define AES_FLAGS_CFB128 BIT(4)
70 #define AES_FLAGS_GCM BIT(5)
71 #define AES_FLAGS_ENCRYPT BIT(6)
72 #define AES_FLAGS_BUSY BIT(7)
74 #define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26))
77 * mtk_aes_info - hardware information of AES
78 * @cmd: command token, hardware instruction
79 * @tfm: transform state of cipher algorithm.
80 * @state: contains keys and initial vectors.
82 * Memory layout of GCM buffer:
84 * | AES KEY | 128/196/256 bits
86 * | HASH KEY | a string 128 zero bits encrypted using the block cipher
91 * The engine requires all these info to do:
92 * - Commands decoding and control of the engine's data path.
93 * - Coordinating hardware data fetch and store operations.
94 * - Result token construction and output.
97 __le32 cmd
[AES_MAX_CT_SIZE
];
99 __le32 state
[AES_MAX_STATE_BUF_SIZE
];
102 struct mtk_aes_reqctx
{
106 struct mtk_aes_base_ctx
{
107 struct mtk_cryp
*cryp
;
114 struct mtk_aes_info info
;
123 struct mtk_aes_base_ctx base
;
126 struct mtk_aes_ctr_ctx
{
127 struct mtk_aes_base_ctx base
;
129 u32 iv
[AES_BLOCK_SIZE
/ sizeof(u32
)];
131 struct scatterlist src
[2];
132 struct scatterlist dst
[2];
135 struct mtk_aes_gcm_ctx
{
136 struct mtk_aes_base_ctx base
;
141 struct crypto_skcipher
*ctr
;
145 struct list_head dev_list
;
146 /* Device list lock */
150 static struct mtk_aes_drv mtk_aes
= {
151 .dev_list
= LIST_HEAD_INIT(mtk_aes
.dev_list
),
152 .lock
= __SPIN_LOCK_UNLOCKED(mtk_aes
.lock
),
155 static inline u32
mtk_aes_read(struct mtk_cryp
*cryp
, u32 offset
)
157 return readl_relaxed(cryp
->base
+ offset
);
160 static inline void mtk_aes_write(struct mtk_cryp
*cryp
,
161 u32 offset
, u32 value
)
163 writel_relaxed(value
, cryp
->base
+ offset
);
166 static struct mtk_cryp
*mtk_aes_find_dev(struct mtk_aes_base_ctx
*ctx
)
168 struct mtk_cryp
*cryp
= NULL
;
169 struct mtk_cryp
*tmp
;
171 spin_lock_bh(&mtk_aes
.lock
);
173 list_for_each_entry(tmp
, &mtk_aes
.dev_list
, aes_list
) {
181 spin_unlock_bh(&mtk_aes
.lock
);
186 static inline size_t mtk_aes_padlen(size_t len
)
188 len
&= AES_BLOCK_SIZE
- 1;
189 return len
? AES_BLOCK_SIZE
- len
: 0;
192 static bool mtk_aes_check_aligned(struct scatterlist
*sg
, size_t len
,
193 struct mtk_aes_dma
*dma
)
197 if (!IS_ALIGNED(len
, AES_BLOCK_SIZE
))
200 for (nents
= 0; sg
; sg
= sg_next(sg
), ++nents
) {
201 if (!IS_ALIGNED(sg
->offset
, sizeof(u32
)))
204 if (len
<= sg
->length
) {
205 if (!IS_ALIGNED(len
, AES_BLOCK_SIZE
))
208 dma
->nents
= nents
+ 1;
209 dma
->remainder
= sg
->length
- len
;
214 if (!IS_ALIGNED(sg
->length
, AES_BLOCK_SIZE
))
223 static inline void mtk_aes_set_mode(struct mtk_aes_rec
*aes
,
224 const struct mtk_aes_reqctx
*rctx
)
226 /* Clear all but persistent flags and set request flags. */
227 aes
->flags
= (aes
->flags
& AES_FLAGS_BUSY
) | rctx
->mode
;
230 static inline void mtk_aes_restore_sg(const struct mtk_aes_dma
*dma
)
232 struct scatterlist
*sg
= dma
->sg
;
233 int nents
= dma
->nents
;
238 while (--nents
> 0 && sg
)
244 sg
->length
+= dma
->remainder
;
247 static inline void mtk_aes_write_state_le(__le32
*dst
, const u32
*src
, u32 size
)
251 for (i
= 0; i
< SIZE_IN_WORDS(size
); i
++)
252 dst
[i
] = cpu_to_le32(src
[i
]);
255 static inline void mtk_aes_write_state_be(__be32
*dst
, const u32
*src
, u32 size
)
259 for (i
= 0; i
< SIZE_IN_WORDS(size
); i
++)
260 dst
[i
] = cpu_to_be32(src
[i
]);
263 static inline int mtk_aes_complete(struct mtk_cryp
*cryp
,
264 struct mtk_aes_rec
*aes
,
267 aes
->flags
&= ~AES_FLAGS_BUSY
;
268 aes
->areq
->complete(aes
->areq
, err
);
269 /* Handle new request */
270 tasklet_schedule(&aes
->queue_task
);
275 * Write descriptors for processing. This will configure the engine, load
276 * the transform information and then start the packet processing.
278 static int mtk_aes_xmit(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
280 struct mtk_ring
*ring
= cryp
->ring
[aes
->id
];
281 struct mtk_desc
*cmd
= NULL
, *res
= NULL
;
282 struct scatterlist
*ssg
= aes
->src
.sg
, *dsg
= aes
->dst
.sg
;
283 u32 slen
= aes
->src
.sg_len
, dlen
= aes
->dst
.sg_len
;
286 /* Write command descriptors */
287 for (nents
= 0; nents
< slen
; ++nents
, ssg
= sg_next(ssg
)) {
288 cmd
= ring
->cmd_next
;
289 cmd
->hdr
= MTK_DESC_BUF_LEN(ssg
->length
);
290 cmd
->buf
= cpu_to_le32(sg_dma_address(ssg
));
293 cmd
->hdr
|= MTK_DESC_FIRST
|
294 MTK_DESC_CT_LEN(aes
->ctx
->ct_size
);
295 cmd
->ct
= cpu_to_le32(aes
->ctx
->ct_dma
);
296 cmd
->ct_hdr
= aes
->ctx
->ct_hdr
;
297 cmd
->tfm
= cpu_to_le32(aes
->ctx
->tfm_dma
);
300 /* Shift ring buffer and check boundary */
301 if (++ring
->cmd_next
== ring
->cmd_base
+ MTK_DESC_NUM
)
302 ring
->cmd_next
= ring
->cmd_base
;
304 cmd
->hdr
|= MTK_DESC_LAST
;
306 /* Prepare result descriptors */
307 for (nents
= 0; nents
< dlen
; ++nents
, dsg
= sg_next(dsg
)) {
308 res
= ring
->res_next
;
309 res
->hdr
= MTK_DESC_BUF_LEN(dsg
->length
);
310 res
->buf
= cpu_to_le32(sg_dma_address(dsg
));
313 res
->hdr
|= MTK_DESC_FIRST
;
315 /* Shift ring buffer and check boundary */
316 if (++ring
->res_next
== ring
->res_base
+ MTK_DESC_NUM
)
317 ring
->res_next
= ring
->res_base
;
319 res
->hdr
|= MTK_DESC_LAST
;
321 /* Pointer to current result descriptor */
322 ring
->res_prev
= res
;
324 /* Prepare enough space for authenticated tag */
325 if (aes
->flags
& AES_FLAGS_GCM
)
326 res
->hdr
+= AES_BLOCK_SIZE
;
329 * Make sure that all changes to the DMA ring are done before we
333 /* Start DMA transfer */
334 mtk_aes_write(cryp
, RDR_PREP_COUNT(aes
->id
), MTK_DESC_CNT(dlen
));
335 mtk_aes_write(cryp
, CDR_PREP_COUNT(aes
->id
), MTK_DESC_CNT(slen
));
340 static void mtk_aes_unmap(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
342 struct mtk_aes_base_ctx
*ctx
= aes
->ctx
;
344 dma_unmap_single(cryp
->dev
, ctx
->ct_dma
, sizeof(ctx
->info
),
347 if (aes
->src
.sg
== aes
->dst
.sg
) {
348 dma_unmap_sg(cryp
->dev
, aes
->src
.sg
, aes
->src
.nents
,
351 if (aes
->src
.sg
!= &aes
->aligned_sg
)
352 mtk_aes_restore_sg(&aes
->src
);
354 dma_unmap_sg(cryp
->dev
, aes
->dst
.sg
, aes
->dst
.nents
,
357 if (aes
->dst
.sg
!= &aes
->aligned_sg
)
358 mtk_aes_restore_sg(&aes
->dst
);
360 dma_unmap_sg(cryp
->dev
, aes
->src
.sg
, aes
->src
.nents
,
363 if (aes
->src
.sg
!= &aes
->aligned_sg
)
364 mtk_aes_restore_sg(&aes
->src
);
367 if (aes
->dst
.sg
== &aes
->aligned_sg
)
368 sg_copy_from_buffer(aes
->real_dst
, sg_nents(aes
->real_dst
),
369 aes
->buf
, aes
->total
);
372 static int mtk_aes_map(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
374 struct mtk_aes_base_ctx
*ctx
= aes
->ctx
;
375 struct mtk_aes_info
*info
= &ctx
->info
;
377 ctx
->ct_dma
= dma_map_single(cryp
->dev
, info
, sizeof(*info
),
379 if (unlikely(dma_mapping_error(cryp
->dev
, ctx
->ct_dma
)))
382 ctx
->tfm_dma
= ctx
->ct_dma
+ sizeof(info
->cmd
);
384 if (aes
->src
.sg
== aes
->dst
.sg
) {
385 aes
->src
.sg_len
= dma_map_sg(cryp
->dev
, aes
->src
.sg
,
388 aes
->dst
.sg_len
= aes
->src
.sg_len
;
389 if (unlikely(!aes
->src
.sg_len
))
392 aes
->src
.sg_len
= dma_map_sg(cryp
->dev
, aes
->src
.sg
,
393 aes
->src
.nents
, DMA_TO_DEVICE
);
394 if (unlikely(!aes
->src
.sg_len
))
397 aes
->dst
.sg_len
= dma_map_sg(cryp
->dev
, aes
->dst
.sg
,
398 aes
->dst
.nents
, DMA_FROM_DEVICE
);
399 if (unlikely(!aes
->dst
.sg_len
)) {
400 dma_unmap_sg(cryp
->dev
, aes
->src
.sg
, aes
->src
.nents
,
406 return mtk_aes_xmit(cryp
, aes
);
409 dma_unmap_single(cryp
->dev
, ctx
->ct_dma
, sizeof(*info
), DMA_TO_DEVICE
);
411 return mtk_aes_complete(cryp
, aes
, -EINVAL
);
414 /* Initialize transform information of CBC/ECB/CTR/OFB/CFB mode */
415 static void mtk_aes_info_init(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
,
418 struct skcipher_request
*req
= skcipher_request_cast(aes
->areq
);
419 struct mtk_aes_base_ctx
*ctx
= aes
->ctx
;
420 struct mtk_aes_info
*info
= &ctx
->info
;
423 ctx
->ct_hdr
= AES_CT_CTRL_HDR
| cpu_to_le32(len
);
424 info
->cmd
[cnt
++] = AES_CMD0
| cpu_to_le32(len
);
425 info
->cmd
[cnt
++] = AES_CMD1
;
427 info
->tfm
[0] = AES_TFM_SIZE(ctx
->keylen
) | ctx
->keymode
;
428 if (aes
->flags
& AES_FLAGS_ENCRYPT
)
429 info
->tfm
[0] |= AES_TFM_BASIC_OUT
;
431 info
->tfm
[0] |= AES_TFM_BASIC_IN
;
433 switch (aes
->flags
& AES_FLAGS_CIPHER_MSK
) {
435 info
->tfm
[1] = AES_TFM_CBC
;
438 info
->tfm
[1] = AES_TFM_ECB
;
441 info
->tfm
[1] = AES_TFM_CTR_LOAD
;
444 info
->tfm
[1] = AES_TFM_OFB
;
446 case AES_FLAGS_CFB128
:
447 info
->tfm
[1] = AES_TFM_CFB128
;
450 /* Should not happen... */
454 mtk_aes_write_state_le(info
->state
+ ctx
->keylen
, (void *)req
->iv
,
457 info
->tfm
[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE
));
458 info
->tfm
[1] |= AES_TFM_FULL_IV
;
459 info
->cmd
[cnt
++] = AES_CMD2
;
464 static int mtk_aes_dma(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
,
465 struct scatterlist
*src
, struct scatterlist
*dst
,
469 bool src_aligned
, dst_aligned
;
476 src_aligned
= mtk_aes_check_aligned(src
, len
, &aes
->src
);
478 dst_aligned
= src_aligned
;
480 dst_aligned
= mtk_aes_check_aligned(dst
, len
, &aes
->dst
);
482 if (!src_aligned
|| !dst_aligned
) {
483 padlen
= mtk_aes_padlen(len
);
485 if (len
+ padlen
> AES_BUF_SIZE
)
486 return mtk_aes_complete(cryp
, aes
, -ENOMEM
);
489 sg_copy_to_buffer(src
, sg_nents(src
), aes
->buf
, len
);
490 aes
->src
.sg
= &aes
->aligned_sg
;
492 aes
->src
.remainder
= 0;
496 aes
->dst
.sg
= &aes
->aligned_sg
;
498 aes
->dst
.remainder
= 0;
501 sg_init_table(&aes
->aligned_sg
, 1);
502 sg_set_buf(&aes
->aligned_sg
, aes
->buf
, len
+ padlen
);
505 mtk_aes_info_init(cryp
, aes
, len
+ padlen
);
507 return mtk_aes_map(cryp
, aes
);
510 static int mtk_aes_handle_queue(struct mtk_cryp
*cryp
, u8 id
,
511 struct crypto_async_request
*new_areq
)
513 struct mtk_aes_rec
*aes
= cryp
->aes
[id
];
514 struct crypto_async_request
*areq
, *backlog
;
515 struct mtk_aes_base_ctx
*ctx
;
519 spin_lock_irqsave(&aes
->lock
, flags
);
521 ret
= crypto_enqueue_request(&aes
->queue
, new_areq
);
522 if (aes
->flags
& AES_FLAGS_BUSY
) {
523 spin_unlock_irqrestore(&aes
->lock
, flags
);
526 backlog
= crypto_get_backlog(&aes
->queue
);
527 areq
= crypto_dequeue_request(&aes
->queue
);
529 aes
->flags
|= AES_FLAGS_BUSY
;
530 spin_unlock_irqrestore(&aes
->lock
, flags
);
536 backlog
->complete(backlog
, -EINPROGRESS
);
538 ctx
= crypto_tfm_ctx(areq
->tfm
);
539 /* Write key into state buffer */
540 memcpy(ctx
->info
.state
, ctx
->key
, sizeof(ctx
->key
));
545 return ctx
->start(cryp
, aes
);
548 static int mtk_aes_transfer_complete(struct mtk_cryp
*cryp
,
549 struct mtk_aes_rec
*aes
)
551 return mtk_aes_complete(cryp
, aes
, 0);
554 static int mtk_aes_start(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
556 struct skcipher_request
*req
= skcipher_request_cast(aes
->areq
);
557 struct mtk_aes_reqctx
*rctx
= skcipher_request_ctx(req
);
559 mtk_aes_set_mode(aes
, rctx
);
560 aes
->resume
= mtk_aes_transfer_complete
;
562 return mtk_aes_dma(cryp
, aes
, req
->src
, req
->dst
, req
->cryptlen
);
565 static inline struct mtk_aes_ctr_ctx
*
566 mtk_aes_ctr_ctx_cast(struct mtk_aes_base_ctx
*ctx
)
568 return container_of(ctx
, struct mtk_aes_ctr_ctx
, base
);
571 static int mtk_aes_ctr_transfer(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
573 struct mtk_aes_base_ctx
*ctx
= aes
->ctx
;
574 struct mtk_aes_ctr_ctx
*cctx
= mtk_aes_ctr_ctx_cast(ctx
);
575 struct skcipher_request
*req
= skcipher_request_cast(aes
->areq
);
576 struct scatterlist
*src
, *dst
;
577 u32 start
, end
, ctr
, blocks
;
579 bool fragmented
= false;
581 /* Check for transfer completion. */
582 cctx
->offset
+= aes
->total
;
583 if (cctx
->offset
>= req
->cryptlen
)
584 return mtk_aes_transfer_complete(cryp
, aes
);
586 /* Compute data length. */
587 datalen
= req
->cryptlen
- cctx
->offset
;
588 blocks
= DIV_ROUND_UP(datalen
, AES_BLOCK_SIZE
);
589 ctr
= be32_to_cpu(cctx
->iv
[3]);
591 /* Check 32bit counter overflow. */
593 end
= start
+ blocks
- 1;
596 datalen
= AES_BLOCK_SIZE
* -start
;
600 /* Jump to offset. */
601 src
= scatterwalk_ffwd(cctx
->src
, req
->src
, cctx
->offset
);
602 dst
= ((req
->src
== req
->dst
) ? src
:
603 scatterwalk_ffwd(cctx
->dst
, req
->dst
, cctx
->offset
));
605 /* Write IVs into transform state buffer. */
606 mtk_aes_write_state_le(ctx
->info
.state
+ ctx
->keylen
, cctx
->iv
,
609 if (unlikely(fragmented
)) {
611 * Increment the counter manually to cope with the hardware
614 cctx
->iv
[3] = cpu_to_be32(ctr
);
615 crypto_inc((u8
*)cctx
->iv
, AES_BLOCK_SIZE
);
618 return mtk_aes_dma(cryp
, aes
, src
, dst
, datalen
);
621 static int mtk_aes_ctr_start(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
623 struct mtk_aes_ctr_ctx
*cctx
= mtk_aes_ctr_ctx_cast(aes
->ctx
);
624 struct skcipher_request
*req
= skcipher_request_cast(aes
->areq
);
625 struct mtk_aes_reqctx
*rctx
= skcipher_request_ctx(req
);
627 mtk_aes_set_mode(aes
, rctx
);
629 memcpy(cctx
->iv
, req
->iv
, AES_BLOCK_SIZE
);
632 aes
->resume
= mtk_aes_ctr_transfer
;
634 return mtk_aes_ctr_transfer(cryp
, aes
);
637 /* Check and set the AES key to transform state buffer */
638 static int mtk_aes_setkey(struct crypto_skcipher
*tfm
,
639 const u8
*key
, u32 keylen
)
641 struct mtk_aes_base_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
644 case AES_KEYSIZE_128
:
645 ctx
->keymode
= AES_TFM_128BITS
;
647 case AES_KEYSIZE_192
:
648 ctx
->keymode
= AES_TFM_192BITS
;
650 case AES_KEYSIZE_256
:
651 ctx
->keymode
= AES_TFM_256BITS
;
658 ctx
->keylen
= SIZE_IN_WORDS(keylen
);
659 mtk_aes_write_state_le(ctx
->key
, (const u32
*)key
, keylen
);
664 static int mtk_aes_crypt(struct skcipher_request
*req
, u64 mode
)
666 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
667 struct mtk_aes_base_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
668 struct mtk_aes_reqctx
*rctx
;
669 struct mtk_cryp
*cryp
;
671 cryp
= mtk_aes_find_dev(ctx
);
675 rctx
= skcipher_request_ctx(req
);
678 return mtk_aes_handle_queue(cryp
, !(mode
& AES_FLAGS_ENCRYPT
),
682 static int mtk_aes_ecb_encrypt(struct skcipher_request
*req
)
684 return mtk_aes_crypt(req
, AES_FLAGS_ENCRYPT
| AES_FLAGS_ECB
);
687 static int mtk_aes_ecb_decrypt(struct skcipher_request
*req
)
689 return mtk_aes_crypt(req
, AES_FLAGS_ECB
);
692 static int mtk_aes_cbc_encrypt(struct skcipher_request
*req
)
694 return mtk_aes_crypt(req
, AES_FLAGS_ENCRYPT
| AES_FLAGS_CBC
);
697 static int mtk_aes_cbc_decrypt(struct skcipher_request
*req
)
699 return mtk_aes_crypt(req
, AES_FLAGS_CBC
);
702 static int mtk_aes_ctr_encrypt(struct skcipher_request
*req
)
704 return mtk_aes_crypt(req
, AES_FLAGS_ENCRYPT
| AES_FLAGS_CTR
);
707 static int mtk_aes_ctr_decrypt(struct skcipher_request
*req
)
709 return mtk_aes_crypt(req
, AES_FLAGS_CTR
);
712 static int mtk_aes_ofb_encrypt(struct skcipher_request
*req
)
714 return mtk_aes_crypt(req
, AES_FLAGS_ENCRYPT
| AES_FLAGS_OFB
);
717 static int mtk_aes_ofb_decrypt(struct skcipher_request
*req
)
719 return mtk_aes_crypt(req
, AES_FLAGS_OFB
);
722 static int mtk_aes_cfb_encrypt(struct skcipher_request
*req
)
724 return mtk_aes_crypt(req
, AES_FLAGS_ENCRYPT
| AES_FLAGS_CFB128
);
727 static int mtk_aes_cfb_decrypt(struct skcipher_request
*req
)
729 return mtk_aes_crypt(req
, AES_FLAGS_CFB128
);
732 static int mtk_aes_init_tfm(struct crypto_skcipher
*tfm
)
734 struct mtk_aes_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
736 crypto_skcipher_set_reqsize(tfm
, sizeof(struct mtk_aes_reqctx
));
737 ctx
->base
.start
= mtk_aes_start
;
741 static int mtk_aes_ctr_init_tfm(struct crypto_skcipher
*tfm
)
743 struct mtk_aes_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
745 crypto_skcipher_set_reqsize(tfm
, sizeof(struct mtk_aes_reqctx
));
746 ctx
->base
.start
= mtk_aes_ctr_start
;
750 static struct skcipher_alg aes_algs
[] = {
752 .base
.cra_name
= "cbc(aes)",
753 .base
.cra_driver_name
= "cbc-aes-mtk",
754 .base
.cra_priority
= 400,
755 .base
.cra_flags
= CRYPTO_ALG_ASYNC
,
756 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
757 .base
.cra_ctxsize
= sizeof(struct mtk_aes_ctx
),
758 .base
.cra_alignmask
= 0xf,
759 .base
.cra_module
= THIS_MODULE
,
761 .min_keysize
= AES_MIN_KEY_SIZE
,
762 .max_keysize
= AES_MAX_KEY_SIZE
,
763 .setkey
= mtk_aes_setkey
,
764 .encrypt
= mtk_aes_cbc_encrypt
,
765 .decrypt
= mtk_aes_cbc_decrypt
,
766 .ivsize
= AES_BLOCK_SIZE
,
767 .init
= mtk_aes_init_tfm
,
770 .base
.cra_name
= "ecb(aes)",
771 .base
.cra_driver_name
= "ecb-aes-mtk",
772 .base
.cra_priority
= 400,
773 .base
.cra_flags
= CRYPTO_ALG_ASYNC
,
774 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
775 .base
.cra_ctxsize
= sizeof(struct mtk_aes_ctx
),
776 .base
.cra_alignmask
= 0xf,
777 .base
.cra_module
= THIS_MODULE
,
779 .min_keysize
= AES_MIN_KEY_SIZE
,
780 .max_keysize
= AES_MAX_KEY_SIZE
,
781 .setkey
= mtk_aes_setkey
,
782 .encrypt
= mtk_aes_ecb_encrypt
,
783 .decrypt
= mtk_aes_ecb_decrypt
,
784 .init
= mtk_aes_init_tfm
,
787 .base
.cra_name
= "ctr(aes)",
788 .base
.cra_driver_name
= "ctr-aes-mtk",
789 .base
.cra_priority
= 400,
790 .base
.cra_flags
= CRYPTO_ALG_ASYNC
,
791 .base
.cra_blocksize
= 1,
792 .base
.cra_ctxsize
= sizeof(struct mtk_aes_ctx
),
793 .base
.cra_alignmask
= 0xf,
794 .base
.cra_module
= THIS_MODULE
,
796 .min_keysize
= AES_MIN_KEY_SIZE
,
797 .max_keysize
= AES_MAX_KEY_SIZE
,
798 .ivsize
= AES_BLOCK_SIZE
,
799 .setkey
= mtk_aes_setkey
,
800 .encrypt
= mtk_aes_ctr_encrypt
,
801 .decrypt
= mtk_aes_ctr_decrypt
,
802 .init
= mtk_aes_ctr_init_tfm
,
805 .base
.cra_name
= "ofb(aes)",
806 .base
.cra_driver_name
= "ofb-aes-mtk",
807 .base
.cra_priority
= 400,
808 .base
.cra_flags
= CRYPTO_ALG_ASYNC
,
809 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
810 .base
.cra_ctxsize
= sizeof(struct mtk_aes_ctx
),
811 .base
.cra_alignmask
= 0xf,
812 .base
.cra_module
= THIS_MODULE
,
814 .min_keysize
= AES_MIN_KEY_SIZE
,
815 .max_keysize
= AES_MAX_KEY_SIZE
,
816 .ivsize
= AES_BLOCK_SIZE
,
817 .setkey
= mtk_aes_setkey
,
818 .encrypt
= mtk_aes_ofb_encrypt
,
819 .decrypt
= mtk_aes_ofb_decrypt
,
822 .base
.cra_name
= "cfb(aes)",
823 .base
.cra_driver_name
= "cfb-aes-mtk",
824 .base
.cra_priority
= 400,
825 .base
.cra_flags
= CRYPTO_ALG_ASYNC
,
826 .base
.cra_blocksize
= 1,
827 .base
.cra_ctxsize
= sizeof(struct mtk_aes_ctx
),
828 .base
.cra_alignmask
= 0xf,
829 .base
.cra_module
= THIS_MODULE
,
831 .min_keysize
= AES_MIN_KEY_SIZE
,
832 .max_keysize
= AES_MAX_KEY_SIZE
,
833 .ivsize
= AES_BLOCK_SIZE
,
834 .setkey
= mtk_aes_setkey
,
835 .encrypt
= mtk_aes_cfb_encrypt
,
836 .decrypt
= mtk_aes_cfb_decrypt
,
840 static inline struct mtk_aes_gcm_ctx
*
841 mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx
*ctx
)
843 return container_of(ctx
, struct mtk_aes_gcm_ctx
, base
);
847 * Engine will verify and compare tag automatically, so we just need
848 * to check returned status which stored in the result descriptor.
850 static int mtk_aes_gcm_tag_verify(struct mtk_cryp
*cryp
,
851 struct mtk_aes_rec
*aes
)
853 u32 status
= cryp
->ring
[aes
->id
]->res_prev
->ct
;
855 return mtk_aes_complete(cryp
, aes
, (status
& AES_AUTH_TAG_ERR
) ?
859 /* Initialize transform information of GCM mode */
860 static void mtk_aes_gcm_info_init(struct mtk_cryp
*cryp
,
861 struct mtk_aes_rec
*aes
,
864 struct aead_request
*req
= aead_request_cast(aes
->areq
);
865 struct mtk_aes_base_ctx
*ctx
= aes
->ctx
;
866 struct mtk_aes_gcm_ctx
*gctx
= mtk_aes_gcm_ctx_cast(ctx
);
867 struct mtk_aes_info
*info
= &ctx
->info
;
868 u32 ivsize
= crypto_aead_ivsize(crypto_aead_reqtfm(req
));
871 ctx
->ct_hdr
= AES_CT_CTRL_HDR
| len
;
873 info
->cmd
[cnt
++] = AES_GCM_CMD0
| cpu_to_le32(req
->assoclen
);
874 info
->cmd
[cnt
++] = AES_GCM_CMD1
| cpu_to_le32(req
->assoclen
);
875 info
->cmd
[cnt
++] = AES_GCM_CMD2
;
876 info
->cmd
[cnt
++] = AES_GCM_CMD3
| cpu_to_le32(gctx
->textlen
);
878 if (aes
->flags
& AES_FLAGS_ENCRYPT
) {
879 info
->cmd
[cnt
++] = AES_GCM_CMD4
| cpu_to_le32(gctx
->authsize
);
880 info
->tfm
[0] = AES_TFM_GCM_OUT
;
882 info
->cmd
[cnt
++] = AES_GCM_CMD5
| cpu_to_le32(gctx
->authsize
);
883 info
->cmd
[cnt
++] = AES_GCM_CMD6
| cpu_to_le32(gctx
->authsize
);
884 info
->tfm
[0] = AES_TFM_GCM_IN
;
888 info
->tfm
[0] |= AES_TFM_GHASH_DIGEST
| AES_TFM_GHASH
| AES_TFM_SIZE(
889 ctx
->keylen
+ SIZE_IN_WORDS(AES_BLOCK_SIZE
+ ivsize
)) |
891 info
->tfm
[1] = AES_TFM_CTR_INIT
| AES_TFM_IV_CTR_MODE
| AES_TFM_3IV
|
894 mtk_aes_write_state_le(info
->state
+ ctx
->keylen
+ SIZE_IN_WORDS(
895 AES_BLOCK_SIZE
), (const u32
*)req
->iv
, ivsize
);
898 static int mtk_aes_gcm_dma(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
,
899 struct scatterlist
*src
, struct scatterlist
*dst
,
902 bool src_aligned
, dst_aligned
;
908 src_aligned
= mtk_aes_check_aligned(src
, len
, &aes
->src
);
910 dst_aligned
= src_aligned
;
912 dst_aligned
= mtk_aes_check_aligned(dst
, len
, &aes
->dst
);
914 if (!src_aligned
|| !dst_aligned
) {
915 if (aes
->total
> AES_BUF_SIZE
)
916 return mtk_aes_complete(cryp
, aes
, -ENOMEM
);
919 sg_copy_to_buffer(src
, sg_nents(src
), aes
->buf
, len
);
920 aes
->src
.sg
= &aes
->aligned_sg
;
922 aes
->src
.remainder
= 0;
926 aes
->dst
.sg
= &aes
->aligned_sg
;
928 aes
->dst
.remainder
= 0;
931 sg_init_table(&aes
->aligned_sg
, 1);
932 sg_set_buf(&aes
->aligned_sg
, aes
->buf
, aes
->total
);
935 mtk_aes_gcm_info_init(cryp
, aes
, len
);
937 return mtk_aes_map(cryp
, aes
);
941 static int mtk_aes_gcm_start(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
943 struct mtk_aes_gcm_ctx
*gctx
= mtk_aes_gcm_ctx_cast(aes
->ctx
);
944 struct aead_request
*req
= aead_request_cast(aes
->areq
);
945 struct mtk_aes_reqctx
*rctx
= aead_request_ctx(req
);
946 u32 len
= req
->assoclen
+ req
->cryptlen
;
948 mtk_aes_set_mode(aes
, rctx
);
950 if (aes
->flags
& AES_FLAGS_ENCRYPT
) {
953 aes
->resume
= mtk_aes_transfer_complete
;
954 /* Compute total process length. */
955 aes
->total
= len
+ gctx
->authsize
;
956 /* Hardware will append authenticated tag to output buffer */
957 scatterwalk_map_and_copy(tag
, req
->dst
, len
, gctx
->authsize
, 1);
959 aes
->resume
= mtk_aes_gcm_tag_verify
;
963 return mtk_aes_gcm_dma(cryp
, aes
, req
->src
, req
->dst
, len
);
966 static int mtk_aes_gcm_crypt(struct aead_request
*req
, u64 mode
)
968 struct mtk_aes_base_ctx
*ctx
= crypto_aead_ctx(crypto_aead_reqtfm(req
));
969 struct mtk_aes_gcm_ctx
*gctx
= mtk_aes_gcm_ctx_cast(ctx
);
970 struct mtk_aes_reqctx
*rctx
= aead_request_ctx(req
);
971 struct mtk_cryp
*cryp
;
972 bool enc
= !!(mode
& AES_FLAGS_ENCRYPT
);
974 cryp
= mtk_aes_find_dev(ctx
);
978 /* Compute text length. */
979 gctx
->textlen
= req
->cryptlen
- (enc
? 0 : gctx
->authsize
);
981 /* Empty messages are not supported yet */
982 if (!gctx
->textlen
&& !req
->assoclen
)
985 rctx
->mode
= AES_FLAGS_GCM
| mode
;
987 return mtk_aes_handle_queue(cryp
, enc
, &req
->base
);
991 * Because of the hardware limitation, we need to pre-calculate key(H)
992 * for the GHASH operation. The result of the encryption operation
993 * need to be stored in the transform state buffer.
995 static int mtk_aes_gcm_setkey(struct crypto_aead
*aead
, const u8
*key
,
998 struct mtk_aes_base_ctx
*ctx
= crypto_aead_ctx(aead
);
999 struct mtk_aes_gcm_ctx
*gctx
= mtk_aes_gcm_ctx_cast(ctx
);
1000 struct crypto_skcipher
*ctr
= gctx
->ctr
;
1005 struct crypto_wait wait
;
1007 struct scatterlist sg
[1];
1008 struct skcipher_request req
;
1013 case AES_KEYSIZE_128
:
1014 ctx
->keymode
= AES_TFM_128BITS
;
1016 case AES_KEYSIZE_192
:
1017 ctx
->keymode
= AES_TFM_192BITS
;
1019 case AES_KEYSIZE_256
:
1020 ctx
->keymode
= AES_TFM_256BITS
;
1027 ctx
->keylen
= SIZE_IN_WORDS(keylen
);
1029 /* Same as crypto_gcm_setkey() from crypto/gcm.c */
1030 crypto_skcipher_clear_flags(ctr
, CRYPTO_TFM_REQ_MASK
);
1031 crypto_skcipher_set_flags(ctr
, crypto_aead_get_flags(aead
) &
1032 CRYPTO_TFM_REQ_MASK
);
1033 err
= crypto_skcipher_setkey(ctr
, key
, keylen
);
1037 data
= kzalloc(sizeof(*data
) + crypto_skcipher_reqsize(ctr
),
1042 crypto_init_wait(&data
->wait
);
1043 sg_init_one(data
->sg
, &data
->hash
, AES_BLOCK_SIZE
);
1044 skcipher_request_set_tfm(&data
->req
, ctr
);
1045 skcipher_request_set_callback(&data
->req
, CRYPTO_TFM_REQ_MAY_SLEEP
|
1046 CRYPTO_TFM_REQ_MAY_BACKLOG
,
1047 crypto_req_done
, &data
->wait
);
1048 skcipher_request_set_crypt(&data
->req
, data
->sg
, data
->sg
,
1049 AES_BLOCK_SIZE
, data
->iv
);
1051 err
= crypto_wait_req(crypto_skcipher_encrypt(&data
->req
),
1056 mtk_aes_write_state_le(ctx
->key
, (const u32
*)key
, keylen
);
1057 mtk_aes_write_state_be(ctx
->key
+ ctx
->keylen
, data
->hash
,
1064 static int mtk_aes_gcm_setauthsize(struct crypto_aead
*aead
,
1067 struct mtk_aes_base_ctx
*ctx
= crypto_aead_ctx(aead
);
1068 struct mtk_aes_gcm_ctx
*gctx
= mtk_aes_gcm_ctx_cast(ctx
);
1070 /* Same as crypto_gcm_authsize() from crypto/gcm.c */
1080 gctx
->authsize
= authsize
;
1084 static int mtk_aes_gcm_encrypt(struct aead_request
*req
)
1086 return mtk_aes_gcm_crypt(req
, AES_FLAGS_ENCRYPT
);
1089 static int mtk_aes_gcm_decrypt(struct aead_request
*req
)
1091 return mtk_aes_gcm_crypt(req
, 0);
1094 static int mtk_aes_gcm_init(struct crypto_aead
*aead
)
1096 struct mtk_aes_gcm_ctx
*ctx
= crypto_aead_ctx(aead
);
1098 ctx
->ctr
= crypto_alloc_skcipher("ctr(aes)", 0,
1100 if (IS_ERR(ctx
->ctr
)) {
1101 pr_err("Error allocating ctr(aes)\n");
1102 return PTR_ERR(ctx
->ctr
);
1105 crypto_aead_set_reqsize(aead
, sizeof(struct mtk_aes_reqctx
));
1106 ctx
->base
.start
= mtk_aes_gcm_start
;
1110 static void mtk_aes_gcm_exit(struct crypto_aead
*aead
)
1112 struct mtk_aes_gcm_ctx
*ctx
= crypto_aead_ctx(aead
);
1114 crypto_free_skcipher(ctx
->ctr
);
1117 static struct aead_alg aes_gcm_alg
= {
1118 .setkey
= mtk_aes_gcm_setkey
,
1119 .setauthsize
= mtk_aes_gcm_setauthsize
,
1120 .encrypt
= mtk_aes_gcm_encrypt
,
1121 .decrypt
= mtk_aes_gcm_decrypt
,
1122 .init
= mtk_aes_gcm_init
,
1123 .exit
= mtk_aes_gcm_exit
,
1124 .ivsize
= GCM_AES_IV_SIZE
,
1125 .maxauthsize
= AES_BLOCK_SIZE
,
1128 .cra_name
= "gcm(aes)",
1129 .cra_driver_name
= "gcm-aes-mtk",
1130 .cra_priority
= 400,
1131 .cra_flags
= CRYPTO_ALG_ASYNC
,
1133 .cra_ctxsize
= sizeof(struct mtk_aes_gcm_ctx
),
1134 .cra_alignmask
= 0xf,
1135 .cra_module
= THIS_MODULE
,
1139 static void mtk_aes_queue_task(unsigned long data
)
1141 struct mtk_aes_rec
*aes
= (struct mtk_aes_rec
*)data
;
1143 mtk_aes_handle_queue(aes
->cryp
, aes
->id
, NULL
);
1146 static void mtk_aes_done_task(unsigned long data
)
1148 struct mtk_aes_rec
*aes
= (struct mtk_aes_rec
*)data
;
1149 struct mtk_cryp
*cryp
= aes
->cryp
;
1151 mtk_aes_unmap(cryp
, aes
);
1152 aes
->resume(cryp
, aes
);
1155 static irqreturn_t
mtk_aes_irq(int irq
, void *dev_id
)
1157 struct mtk_aes_rec
*aes
= (struct mtk_aes_rec
*)dev_id
;
1158 struct mtk_cryp
*cryp
= aes
->cryp
;
1159 u32 val
= mtk_aes_read(cryp
, RDR_STAT(aes
->id
));
1161 mtk_aes_write(cryp
, RDR_STAT(aes
->id
), val
);
1163 if (likely(AES_FLAGS_BUSY
& aes
->flags
)) {
1164 mtk_aes_write(cryp
, RDR_PROC_COUNT(aes
->id
), MTK_CNT_RST
);
1165 mtk_aes_write(cryp
, RDR_THRESH(aes
->id
),
1166 MTK_RDR_PROC_THRESH
| MTK_RDR_PROC_MODE
);
1168 tasklet_schedule(&aes
->done_task
);
1170 dev_warn(cryp
->dev
, "AES interrupt when no active requests.\n");
1176 * The purpose of creating encryption and decryption records is
1177 * to process outbound/inbound data in parallel, it can improve
1178 * performance in most use cases, such as IPSec VPN, especially
1179 * under heavy network traffic.
1181 static int mtk_aes_record_init(struct mtk_cryp
*cryp
)
1183 struct mtk_aes_rec
**aes
= cryp
->aes
;
1184 int i
, err
= -ENOMEM
;
1186 for (i
= 0; i
< MTK_REC_NUM
; i
++) {
1187 aes
[i
] = kzalloc(sizeof(**aes
), GFP_KERNEL
);
1191 aes
[i
]->buf
= (void *)__get_free_pages(GFP_KERNEL
,
1196 aes
[i
]->cryp
= cryp
;
1198 spin_lock_init(&aes
[i
]->lock
);
1199 crypto_init_queue(&aes
[i
]->queue
, AES_QUEUE_SIZE
);
1201 tasklet_init(&aes
[i
]->queue_task
, mtk_aes_queue_task
,
1202 (unsigned long)aes
[i
]);
1203 tasklet_init(&aes
[i
]->done_task
, mtk_aes_done_task
,
1204 (unsigned long)aes
[i
]);
1207 /* Link to ring0 and ring1 respectively */
1208 aes
[0]->id
= MTK_RING0
;
1209 aes
[1]->id
= MTK_RING1
;
1215 free_page((unsigned long)aes
[i
]->buf
);
1222 static void mtk_aes_record_free(struct mtk_cryp
*cryp
)
1226 for (i
= 0; i
< MTK_REC_NUM
; i
++) {
1227 tasklet_kill(&cryp
->aes
[i
]->done_task
);
1228 tasklet_kill(&cryp
->aes
[i
]->queue_task
);
1230 free_page((unsigned long)cryp
->aes
[i
]->buf
);
1231 kfree(cryp
->aes
[i
]);
1235 static void mtk_aes_unregister_algs(void)
1239 crypto_unregister_aead(&aes_gcm_alg
);
1241 for (i
= 0; i
< ARRAY_SIZE(aes_algs
); i
++)
1242 crypto_unregister_skcipher(&aes_algs
[i
]);
1245 static int mtk_aes_register_algs(void)
1249 for (i
= 0; i
< ARRAY_SIZE(aes_algs
); i
++) {
1250 err
= crypto_register_skcipher(&aes_algs
[i
]);
1255 err
= crypto_register_aead(&aes_gcm_alg
);
1263 crypto_unregister_skcipher(&aes_algs
[i
]);
1268 int mtk_cipher_alg_register(struct mtk_cryp
*cryp
)
1272 INIT_LIST_HEAD(&cryp
->aes_list
);
1274 /* Initialize two cipher records */
1275 ret
= mtk_aes_record_init(cryp
);
1279 ret
= devm_request_irq(cryp
->dev
, cryp
->irq
[MTK_RING0
], mtk_aes_irq
,
1280 0, "mtk-aes", cryp
->aes
[0]);
1282 dev_err(cryp
->dev
, "unable to request AES irq.\n");
1286 ret
= devm_request_irq(cryp
->dev
, cryp
->irq
[MTK_RING1
], mtk_aes_irq
,
1287 0, "mtk-aes", cryp
->aes
[1]);
1289 dev_err(cryp
->dev
, "unable to request AES irq.\n");
1293 /* Enable ring0 and ring1 interrupt */
1294 mtk_aes_write(cryp
, AIC_ENABLE_SET(MTK_RING0
), MTK_IRQ_RDR0
);
1295 mtk_aes_write(cryp
, AIC_ENABLE_SET(MTK_RING1
), MTK_IRQ_RDR1
);
1297 spin_lock(&mtk_aes
.lock
);
1298 list_add_tail(&cryp
->aes_list
, &mtk_aes
.dev_list
);
1299 spin_unlock(&mtk_aes
.lock
);
1301 ret
= mtk_aes_register_algs();
1308 spin_lock(&mtk_aes
.lock
);
1309 list_del(&cryp
->aes_list
);
1310 spin_unlock(&mtk_aes
.lock
);
1312 mtk_aes_record_free(cryp
);
1315 dev_err(cryp
->dev
, "mtk-aes initialization failed.\n");
1319 void mtk_cipher_alg_release(struct mtk_cryp
*cryp
)
1321 spin_lock(&mtk_aes
.lock
);
1322 list_del(&cryp
->aes_list
);
1323 spin_unlock(&mtk_aes
.lock
);
1325 mtk_aes_unregister_algs();
1326 mtk_aes_record_free(cryp
);