1 // SPDX-License-Identifier: GPL-2.0-only
5 * Driver for EIP97 AES acceleration.
7 * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
9 * Some ideas are from atmel-aes.c drivers.
12 #include <crypto/aes.h>
13 #include <crypto/gcm.h>
14 #include <crypto/internal/skcipher.h>
15 #include "mtk-platform.h"
17 #define AES_QUEUE_SIZE 512
18 #define AES_BUF_ORDER 2
19 #define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \
20 & ~(AES_BLOCK_SIZE - 1))
21 #define AES_MAX_STATE_BUF_SIZE SIZE_IN_WORDS(AES_KEYSIZE_256 + \
23 #define AES_MAX_CT_SIZE 6
25 #define AES_CT_CTRL_HDR cpu_to_le32(0x00220000)
27 /* AES-CBC/ECB/CTR/OFB/CFB command token */
28 #define AES_CMD0 cpu_to_le32(0x05000000)
29 #define AES_CMD1 cpu_to_le32(0x2d060000)
30 #define AES_CMD2 cpu_to_le32(0xe4a63806)
31 /* AES-GCM command token */
32 #define AES_GCM_CMD0 cpu_to_le32(0x0b000000)
33 #define AES_GCM_CMD1 cpu_to_le32(0xa0800000)
34 #define AES_GCM_CMD2 cpu_to_le32(0x25000010)
35 #define AES_GCM_CMD3 cpu_to_le32(0x0f020000)
36 #define AES_GCM_CMD4 cpu_to_le32(0x21e60000)
37 #define AES_GCM_CMD5 cpu_to_le32(0x40e60000)
38 #define AES_GCM_CMD6 cpu_to_le32(0xd0070000)
40 /* AES transform information word 0 fields */
41 #define AES_TFM_BASIC_OUT cpu_to_le32(0x4 << 0)
42 #define AES_TFM_BASIC_IN cpu_to_le32(0x5 << 0)
43 #define AES_TFM_GCM_OUT cpu_to_le32(0x6 << 0)
44 #define AES_TFM_GCM_IN cpu_to_le32(0xf << 0)
45 #define AES_TFM_SIZE(x) cpu_to_le32((x) << 8)
46 #define AES_TFM_128BITS cpu_to_le32(0xb << 16)
47 #define AES_TFM_192BITS cpu_to_le32(0xd << 16)
48 #define AES_TFM_256BITS cpu_to_le32(0xf << 16)
49 #define AES_TFM_GHASH_DIGEST cpu_to_le32(0x2 << 21)
50 #define AES_TFM_GHASH cpu_to_le32(0x4 << 23)
51 /* AES transform information word 1 fields */
52 #define AES_TFM_ECB cpu_to_le32(0x0 << 0)
53 #define AES_TFM_CBC cpu_to_le32(0x1 << 0)
54 #define AES_TFM_OFB cpu_to_le32(0x4 << 0)
55 #define AES_TFM_CFB128 cpu_to_le32(0x5 << 0)
56 #define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */
57 #define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */
58 #define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */
59 #define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */
60 #define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10)
61 #define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17)
64 #define AES_FLAGS_CIPHER_MSK GENMASK(4, 0)
65 #define AES_FLAGS_ECB BIT(0)
66 #define AES_FLAGS_CBC BIT(1)
67 #define AES_FLAGS_CTR BIT(2)
68 #define AES_FLAGS_OFB BIT(3)
69 #define AES_FLAGS_CFB128 BIT(4)
70 #define AES_FLAGS_GCM BIT(5)
71 #define AES_FLAGS_ENCRYPT BIT(6)
72 #define AES_FLAGS_BUSY BIT(7)
74 #define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26))
77 * mtk_aes_info - hardware information of AES
78 * @cmd: command token, hardware instruction
79 * @tfm: transform state of cipher algorithm.
80 * @state: contains keys and initial vectors.
82 * Memory layout of GCM buffer:
84 * | AES KEY | 128/196/256 bits
86 * | HASH KEY | a string 128 zero bits encrypted using the block cipher
91 * The engine requires all these info to do:
92 * - Commands decoding and control of the engine's data path.
93 * - Coordinating hardware data fetch and store operations.
94 * - Result token construction and output.
97 __le32 cmd
[AES_MAX_CT_SIZE
];
99 __le32 state
[AES_MAX_STATE_BUF_SIZE
];
102 struct mtk_aes_reqctx
{
106 struct mtk_aes_base_ctx
{
107 struct mtk_cryp
*cryp
;
114 struct mtk_aes_info info
;
123 struct mtk_aes_base_ctx base
;
126 struct mtk_aes_ctr_ctx
{
127 struct mtk_aes_base_ctx base
;
129 __be32 iv
[AES_BLOCK_SIZE
/ sizeof(u32
)];
131 struct scatterlist src
[2];
132 struct scatterlist dst
[2];
135 struct mtk_aes_gcm_ctx
{
136 struct mtk_aes_base_ctx base
;
143 struct list_head dev_list
;
144 /* Device list lock */
148 static struct mtk_aes_drv mtk_aes
= {
149 .dev_list
= LIST_HEAD_INIT(mtk_aes
.dev_list
),
150 .lock
= __SPIN_LOCK_UNLOCKED(mtk_aes
.lock
),
153 static inline u32
mtk_aes_read(struct mtk_cryp
*cryp
, u32 offset
)
155 return readl_relaxed(cryp
->base
+ offset
);
158 static inline void mtk_aes_write(struct mtk_cryp
*cryp
,
159 u32 offset
, u32 value
)
161 writel_relaxed(value
, cryp
->base
+ offset
);
164 static struct mtk_cryp
*mtk_aes_find_dev(struct mtk_aes_base_ctx
*ctx
)
166 struct mtk_cryp
*cryp
= NULL
;
167 struct mtk_cryp
*tmp
;
169 spin_lock_bh(&mtk_aes
.lock
);
171 list_for_each_entry(tmp
, &mtk_aes
.dev_list
, aes_list
) {
179 spin_unlock_bh(&mtk_aes
.lock
);
184 static inline size_t mtk_aes_padlen(size_t len
)
186 len
&= AES_BLOCK_SIZE
- 1;
187 return len
? AES_BLOCK_SIZE
- len
: 0;
190 static bool mtk_aes_check_aligned(struct scatterlist
*sg
, size_t len
,
191 struct mtk_aes_dma
*dma
)
195 if (!IS_ALIGNED(len
, AES_BLOCK_SIZE
))
198 for (nents
= 0; sg
; sg
= sg_next(sg
), ++nents
) {
199 if (!IS_ALIGNED(sg
->offset
, sizeof(u32
)))
202 if (len
<= sg
->length
) {
203 if (!IS_ALIGNED(len
, AES_BLOCK_SIZE
))
206 dma
->nents
= nents
+ 1;
207 dma
->remainder
= sg
->length
- len
;
212 if (!IS_ALIGNED(sg
->length
, AES_BLOCK_SIZE
))
221 static inline void mtk_aes_set_mode(struct mtk_aes_rec
*aes
,
222 const struct mtk_aes_reqctx
*rctx
)
224 /* Clear all but persistent flags and set request flags. */
225 aes
->flags
= (aes
->flags
& AES_FLAGS_BUSY
) | rctx
->mode
;
228 static inline void mtk_aes_restore_sg(const struct mtk_aes_dma
*dma
)
230 struct scatterlist
*sg
= dma
->sg
;
231 int nents
= dma
->nents
;
236 while (--nents
> 0 && sg
)
242 sg
->length
+= dma
->remainder
;
245 static inline int mtk_aes_complete(struct mtk_cryp
*cryp
,
246 struct mtk_aes_rec
*aes
,
249 aes
->flags
&= ~AES_FLAGS_BUSY
;
250 aes
->areq
->complete(aes
->areq
, err
);
251 /* Handle new request */
252 tasklet_schedule(&aes
->queue_task
);
257 * Write descriptors for processing. This will configure the engine, load
258 * the transform information and then start the packet processing.
260 static int mtk_aes_xmit(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
262 struct mtk_ring
*ring
= cryp
->ring
[aes
->id
];
263 struct mtk_desc
*cmd
= NULL
, *res
= NULL
;
264 struct scatterlist
*ssg
= aes
->src
.sg
, *dsg
= aes
->dst
.sg
;
265 u32 slen
= aes
->src
.sg_len
, dlen
= aes
->dst
.sg_len
;
268 /* Write command descriptors */
269 for (nents
= 0; nents
< slen
; ++nents
, ssg
= sg_next(ssg
)) {
270 cmd
= ring
->cmd_next
;
271 cmd
->hdr
= MTK_DESC_BUF_LEN(ssg
->length
);
272 cmd
->buf
= cpu_to_le32(sg_dma_address(ssg
));
275 cmd
->hdr
|= MTK_DESC_FIRST
|
276 MTK_DESC_CT_LEN(aes
->ctx
->ct_size
);
277 cmd
->ct
= cpu_to_le32(aes
->ctx
->ct_dma
);
278 cmd
->ct_hdr
= aes
->ctx
->ct_hdr
;
279 cmd
->tfm
= cpu_to_le32(aes
->ctx
->tfm_dma
);
282 /* Shift ring buffer and check boundary */
283 if (++ring
->cmd_next
== ring
->cmd_base
+ MTK_DESC_NUM
)
284 ring
->cmd_next
= ring
->cmd_base
;
286 cmd
->hdr
|= MTK_DESC_LAST
;
288 /* Prepare result descriptors */
289 for (nents
= 0; nents
< dlen
; ++nents
, dsg
= sg_next(dsg
)) {
290 res
= ring
->res_next
;
291 res
->hdr
= MTK_DESC_BUF_LEN(dsg
->length
);
292 res
->buf
= cpu_to_le32(sg_dma_address(dsg
));
295 res
->hdr
|= MTK_DESC_FIRST
;
297 /* Shift ring buffer and check boundary */
298 if (++ring
->res_next
== ring
->res_base
+ MTK_DESC_NUM
)
299 ring
->res_next
= ring
->res_base
;
301 res
->hdr
|= MTK_DESC_LAST
;
303 /* Pointer to current result descriptor */
304 ring
->res_prev
= res
;
306 /* Prepare enough space for authenticated tag */
307 if (aes
->flags
& AES_FLAGS_GCM
)
308 le32_add_cpu(&res
->hdr
, AES_BLOCK_SIZE
);
311 * Make sure that all changes to the DMA ring are done before we
315 /* Start DMA transfer */
316 mtk_aes_write(cryp
, RDR_PREP_COUNT(aes
->id
), MTK_DESC_CNT(dlen
));
317 mtk_aes_write(cryp
, CDR_PREP_COUNT(aes
->id
), MTK_DESC_CNT(slen
));
322 static void mtk_aes_unmap(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
324 struct mtk_aes_base_ctx
*ctx
= aes
->ctx
;
326 dma_unmap_single(cryp
->dev
, ctx
->ct_dma
, sizeof(ctx
->info
),
329 if (aes
->src
.sg
== aes
->dst
.sg
) {
330 dma_unmap_sg(cryp
->dev
, aes
->src
.sg
, aes
->src
.nents
,
333 if (aes
->src
.sg
!= &aes
->aligned_sg
)
334 mtk_aes_restore_sg(&aes
->src
);
336 dma_unmap_sg(cryp
->dev
, aes
->dst
.sg
, aes
->dst
.nents
,
339 if (aes
->dst
.sg
!= &aes
->aligned_sg
)
340 mtk_aes_restore_sg(&aes
->dst
);
342 dma_unmap_sg(cryp
->dev
, aes
->src
.sg
, aes
->src
.nents
,
345 if (aes
->src
.sg
!= &aes
->aligned_sg
)
346 mtk_aes_restore_sg(&aes
->src
);
349 if (aes
->dst
.sg
== &aes
->aligned_sg
)
350 sg_copy_from_buffer(aes
->real_dst
, sg_nents(aes
->real_dst
),
351 aes
->buf
, aes
->total
);
354 static int mtk_aes_map(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
356 struct mtk_aes_base_ctx
*ctx
= aes
->ctx
;
357 struct mtk_aes_info
*info
= &ctx
->info
;
359 ctx
->ct_dma
= dma_map_single(cryp
->dev
, info
, sizeof(*info
),
361 if (unlikely(dma_mapping_error(cryp
->dev
, ctx
->ct_dma
)))
364 ctx
->tfm_dma
= ctx
->ct_dma
+ sizeof(info
->cmd
);
366 if (aes
->src
.sg
== aes
->dst
.sg
) {
367 aes
->src
.sg_len
= dma_map_sg(cryp
->dev
, aes
->src
.sg
,
370 aes
->dst
.sg_len
= aes
->src
.sg_len
;
371 if (unlikely(!aes
->src
.sg_len
))
374 aes
->src
.sg_len
= dma_map_sg(cryp
->dev
, aes
->src
.sg
,
375 aes
->src
.nents
, DMA_TO_DEVICE
);
376 if (unlikely(!aes
->src
.sg_len
))
379 aes
->dst
.sg_len
= dma_map_sg(cryp
->dev
, aes
->dst
.sg
,
380 aes
->dst
.nents
, DMA_FROM_DEVICE
);
381 if (unlikely(!aes
->dst
.sg_len
)) {
382 dma_unmap_sg(cryp
->dev
, aes
->src
.sg
, aes
->src
.nents
,
388 return mtk_aes_xmit(cryp
, aes
);
391 dma_unmap_single(cryp
->dev
, ctx
->ct_dma
, sizeof(*info
), DMA_TO_DEVICE
);
393 return mtk_aes_complete(cryp
, aes
, -EINVAL
);
396 /* Initialize transform information of CBC/ECB/CTR/OFB/CFB mode */
397 static void mtk_aes_info_init(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
,
400 struct skcipher_request
*req
= skcipher_request_cast(aes
->areq
);
401 struct mtk_aes_base_ctx
*ctx
= aes
->ctx
;
402 struct mtk_aes_info
*info
= &ctx
->info
;
405 ctx
->ct_hdr
= AES_CT_CTRL_HDR
| cpu_to_le32(len
);
406 info
->cmd
[cnt
++] = AES_CMD0
| cpu_to_le32(len
);
407 info
->cmd
[cnt
++] = AES_CMD1
;
409 info
->tfm
[0] = AES_TFM_SIZE(ctx
->keylen
) | ctx
->keymode
;
410 if (aes
->flags
& AES_FLAGS_ENCRYPT
)
411 info
->tfm
[0] |= AES_TFM_BASIC_OUT
;
413 info
->tfm
[0] |= AES_TFM_BASIC_IN
;
415 switch (aes
->flags
& AES_FLAGS_CIPHER_MSK
) {
417 info
->tfm
[1] = AES_TFM_CBC
;
420 info
->tfm
[1] = AES_TFM_ECB
;
423 info
->tfm
[1] = AES_TFM_CTR_LOAD
;
426 info
->tfm
[1] = AES_TFM_OFB
;
428 case AES_FLAGS_CFB128
:
429 info
->tfm
[1] = AES_TFM_CFB128
;
432 /* Should not happen... */
436 memcpy(info
->state
+ ctx
->keylen
, req
->iv
, AES_BLOCK_SIZE
);
438 le32_add_cpu(&info
->tfm
[0],
439 le32_to_cpu(AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE
))));
440 info
->tfm
[1] |= AES_TFM_FULL_IV
;
441 info
->cmd
[cnt
++] = AES_CMD2
;
446 static int mtk_aes_dma(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
,
447 struct scatterlist
*src
, struct scatterlist
*dst
,
451 bool src_aligned
, dst_aligned
;
458 src_aligned
= mtk_aes_check_aligned(src
, len
, &aes
->src
);
460 dst_aligned
= src_aligned
;
462 dst_aligned
= mtk_aes_check_aligned(dst
, len
, &aes
->dst
);
464 if (!src_aligned
|| !dst_aligned
) {
465 padlen
= mtk_aes_padlen(len
);
467 if (len
+ padlen
> AES_BUF_SIZE
)
468 return mtk_aes_complete(cryp
, aes
, -ENOMEM
);
471 sg_copy_to_buffer(src
, sg_nents(src
), aes
->buf
, len
);
472 aes
->src
.sg
= &aes
->aligned_sg
;
474 aes
->src
.remainder
= 0;
478 aes
->dst
.sg
= &aes
->aligned_sg
;
480 aes
->dst
.remainder
= 0;
483 sg_init_table(&aes
->aligned_sg
, 1);
484 sg_set_buf(&aes
->aligned_sg
, aes
->buf
, len
+ padlen
);
487 mtk_aes_info_init(cryp
, aes
, len
+ padlen
);
489 return mtk_aes_map(cryp
, aes
);
492 static int mtk_aes_handle_queue(struct mtk_cryp
*cryp
, u8 id
,
493 struct crypto_async_request
*new_areq
)
495 struct mtk_aes_rec
*aes
= cryp
->aes
[id
];
496 struct crypto_async_request
*areq
, *backlog
;
497 struct mtk_aes_base_ctx
*ctx
;
501 spin_lock_irqsave(&aes
->lock
, flags
);
503 ret
= crypto_enqueue_request(&aes
->queue
, new_areq
);
504 if (aes
->flags
& AES_FLAGS_BUSY
) {
505 spin_unlock_irqrestore(&aes
->lock
, flags
);
508 backlog
= crypto_get_backlog(&aes
->queue
);
509 areq
= crypto_dequeue_request(&aes
->queue
);
511 aes
->flags
|= AES_FLAGS_BUSY
;
512 spin_unlock_irqrestore(&aes
->lock
, flags
);
518 backlog
->complete(backlog
, -EINPROGRESS
);
520 ctx
= crypto_tfm_ctx(areq
->tfm
);
521 /* Write key into state buffer */
522 memcpy(ctx
->info
.state
, ctx
->key
, sizeof(ctx
->key
));
527 return ctx
->start(cryp
, aes
);
530 static int mtk_aes_transfer_complete(struct mtk_cryp
*cryp
,
531 struct mtk_aes_rec
*aes
)
533 return mtk_aes_complete(cryp
, aes
, 0);
536 static int mtk_aes_start(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
538 struct skcipher_request
*req
= skcipher_request_cast(aes
->areq
);
539 struct mtk_aes_reqctx
*rctx
= skcipher_request_ctx(req
);
541 mtk_aes_set_mode(aes
, rctx
);
542 aes
->resume
= mtk_aes_transfer_complete
;
544 return mtk_aes_dma(cryp
, aes
, req
->src
, req
->dst
, req
->cryptlen
);
547 static inline struct mtk_aes_ctr_ctx
*
548 mtk_aes_ctr_ctx_cast(struct mtk_aes_base_ctx
*ctx
)
550 return container_of(ctx
, struct mtk_aes_ctr_ctx
, base
);
553 static int mtk_aes_ctr_transfer(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
555 struct mtk_aes_base_ctx
*ctx
= aes
->ctx
;
556 struct mtk_aes_ctr_ctx
*cctx
= mtk_aes_ctr_ctx_cast(ctx
);
557 struct skcipher_request
*req
= skcipher_request_cast(aes
->areq
);
558 struct scatterlist
*src
, *dst
;
559 u32 start
, end
, ctr
, blocks
;
561 bool fragmented
= false;
563 /* Check for transfer completion. */
564 cctx
->offset
+= aes
->total
;
565 if (cctx
->offset
>= req
->cryptlen
)
566 return mtk_aes_transfer_complete(cryp
, aes
);
568 /* Compute data length. */
569 datalen
= req
->cryptlen
- cctx
->offset
;
570 blocks
= DIV_ROUND_UP(datalen
, AES_BLOCK_SIZE
);
571 ctr
= be32_to_cpu(cctx
->iv
[3]);
573 /* Check 32bit counter overflow. */
575 end
= start
+ blocks
- 1;
578 datalen
= AES_BLOCK_SIZE
* -start
;
582 /* Jump to offset. */
583 src
= scatterwalk_ffwd(cctx
->src
, req
->src
, cctx
->offset
);
584 dst
= ((req
->src
== req
->dst
) ? src
:
585 scatterwalk_ffwd(cctx
->dst
, req
->dst
, cctx
->offset
));
587 /* Write IVs into transform state buffer. */
588 memcpy(ctx
->info
.state
+ ctx
->keylen
, cctx
->iv
, AES_BLOCK_SIZE
);
590 if (unlikely(fragmented
)) {
592 * Increment the counter manually to cope with the hardware
595 cctx
->iv
[3] = cpu_to_be32(ctr
);
596 crypto_inc((u8
*)cctx
->iv
, AES_BLOCK_SIZE
);
599 return mtk_aes_dma(cryp
, aes
, src
, dst
, datalen
);
602 static int mtk_aes_ctr_start(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
604 struct mtk_aes_ctr_ctx
*cctx
= mtk_aes_ctr_ctx_cast(aes
->ctx
);
605 struct skcipher_request
*req
= skcipher_request_cast(aes
->areq
);
606 struct mtk_aes_reqctx
*rctx
= skcipher_request_ctx(req
);
608 mtk_aes_set_mode(aes
, rctx
);
610 memcpy(cctx
->iv
, req
->iv
, AES_BLOCK_SIZE
);
613 aes
->resume
= mtk_aes_ctr_transfer
;
615 return mtk_aes_ctr_transfer(cryp
, aes
);
618 /* Check and set the AES key to transform state buffer */
619 static int mtk_aes_setkey(struct crypto_skcipher
*tfm
,
620 const u8
*key
, u32 keylen
)
622 struct mtk_aes_base_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
625 case AES_KEYSIZE_128
:
626 ctx
->keymode
= AES_TFM_128BITS
;
628 case AES_KEYSIZE_192
:
629 ctx
->keymode
= AES_TFM_192BITS
;
631 case AES_KEYSIZE_256
:
632 ctx
->keymode
= AES_TFM_256BITS
;
639 ctx
->keylen
= SIZE_IN_WORDS(keylen
);
640 memcpy(ctx
->key
, key
, keylen
);
645 static int mtk_aes_crypt(struct skcipher_request
*req
, u64 mode
)
647 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
648 struct mtk_aes_base_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
649 struct mtk_aes_reqctx
*rctx
;
650 struct mtk_cryp
*cryp
;
652 cryp
= mtk_aes_find_dev(ctx
);
656 rctx
= skcipher_request_ctx(req
);
659 return mtk_aes_handle_queue(cryp
, !(mode
& AES_FLAGS_ENCRYPT
),
663 static int mtk_aes_ecb_encrypt(struct skcipher_request
*req
)
665 return mtk_aes_crypt(req
, AES_FLAGS_ENCRYPT
| AES_FLAGS_ECB
);
668 static int mtk_aes_ecb_decrypt(struct skcipher_request
*req
)
670 return mtk_aes_crypt(req
, AES_FLAGS_ECB
);
673 static int mtk_aes_cbc_encrypt(struct skcipher_request
*req
)
675 return mtk_aes_crypt(req
, AES_FLAGS_ENCRYPT
| AES_FLAGS_CBC
);
678 static int mtk_aes_cbc_decrypt(struct skcipher_request
*req
)
680 return mtk_aes_crypt(req
, AES_FLAGS_CBC
);
683 static int mtk_aes_ctr_encrypt(struct skcipher_request
*req
)
685 return mtk_aes_crypt(req
, AES_FLAGS_ENCRYPT
| AES_FLAGS_CTR
);
688 static int mtk_aes_ctr_decrypt(struct skcipher_request
*req
)
690 return mtk_aes_crypt(req
, AES_FLAGS_CTR
);
693 static int mtk_aes_ofb_encrypt(struct skcipher_request
*req
)
695 return mtk_aes_crypt(req
, AES_FLAGS_ENCRYPT
| AES_FLAGS_OFB
);
698 static int mtk_aes_ofb_decrypt(struct skcipher_request
*req
)
700 return mtk_aes_crypt(req
, AES_FLAGS_OFB
);
703 static int mtk_aes_cfb_encrypt(struct skcipher_request
*req
)
705 return mtk_aes_crypt(req
, AES_FLAGS_ENCRYPT
| AES_FLAGS_CFB128
);
708 static int mtk_aes_cfb_decrypt(struct skcipher_request
*req
)
710 return mtk_aes_crypt(req
, AES_FLAGS_CFB128
);
713 static int mtk_aes_init_tfm(struct crypto_skcipher
*tfm
)
715 struct mtk_aes_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
717 crypto_skcipher_set_reqsize(tfm
, sizeof(struct mtk_aes_reqctx
));
718 ctx
->base
.start
= mtk_aes_start
;
722 static int mtk_aes_ctr_init_tfm(struct crypto_skcipher
*tfm
)
724 struct mtk_aes_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
726 crypto_skcipher_set_reqsize(tfm
, sizeof(struct mtk_aes_reqctx
));
727 ctx
->base
.start
= mtk_aes_ctr_start
;
731 static struct skcipher_alg aes_algs
[] = {
733 .base
.cra_name
= "cbc(aes)",
734 .base
.cra_driver_name
= "cbc-aes-mtk",
735 .base
.cra_priority
= 400,
736 .base
.cra_flags
= CRYPTO_ALG_ASYNC
,
737 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
738 .base
.cra_ctxsize
= sizeof(struct mtk_aes_ctx
),
739 .base
.cra_alignmask
= 0xf,
740 .base
.cra_module
= THIS_MODULE
,
742 .min_keysize
= AES_MIN_KEY_SIZE
,
743 .max_keysize
= AES_MAX_KEY_SIZE
,
744 .setkey
= mtk_aes_setkey
,
745 .encrypt
= mtk_aes_cbc_encrypt
,
746 .decrypt
= mtk_aes_cbc_decrypt
,
747 .ivsize
= AES_BLOCK_SIZE
,
748 .init
= mtk_aes_init_tfm
,
751 .base
.cra_name
= "ecb(aes)",
752 .base
.cra_driver_name
= "ecb-aes-mtk",
753 .base
.cra_priority
= 400,
754 .base
.cra_flags
= CRYPTO_ALG_ASYNC
,
755 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
756 .base
.cra_ctxsize
= sizeof(struct mtk_aes_ctx
),
757 .base
.cra_alignmask
= 0xf,
758 .base
.cra_module
= THIS_MODULE
,
760 .min_keysize
= AES_MIN_KEY_SIZE
,
761 .max_keysize
= AES_MAX_KEY_SIZE
,
762 .setkey
= mtk_aes_setkey
,
763 .encrypt
= mtk_aes_ecb_encrypt
,
764 .decrypt
= mtk_aes_ecb_decrypt
,
765 .init
= mtk_aes_init_tfm
,
768 .base
.cra_name
= "ctr(aes)",
769 .base
.cra_driver_name
= "ctr-aes-mtk",
770 .base
.cra_priority
= 400,
771 .base
.cra_flags
= CRYPTO_ALG_ASYNC
,
772 .base
.cra_blocksize
= 1,
773 .base
.cra_ctxsize
= sizeof(struct mtk_aes_ctx
),
774 .base
.cra_alignmask
= 0xf,
775 .base
.cra_module
= THIS_MODULE
,
777 .min_keysize
= AES_MIN_KEY_SIZE
,
778 .max_keysize
= AES_MAX_KEY_SIZE
,
779 .ivsize
= AES_BLOCK_SIZE
,
780 .setkey
= mtk_aes_setkey
,
781 .encrypt
= mtk_aes_ctr_encrypt
,
782 .decrypt
= mtk_aes_ctr_decrypt
,
783 .init
= mtk_aes_ctr_init_tfm
,
786 .base
.cra_name
= "ofb(aes)",
787 .base
.cra_driver_name
= "ofb-aes-mtk",
788 .base
.cra_priority
= 400,
789 .base
.cra_flags
= CRYPTO_ALG_ASYNC
,
790 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
791 .base
.cra_ctxsize
= sizeof(struct mtk_aes_ctx
),
792 .base
.cra_alignmask
= 0xf,
793 .base
.cra_module
= THIS_MODULE
,
795 .min_keysize
= AES_MIN_KEY_SIZE
,
796 .max_keysize
= AES_MAX_KEY_SIZE
,
797 .ivsize
= AES_BLOCK_SIZE
,
798 .setkey
= mtk_aes_setkey
,
799 .encrypt
= mtk_aes_ofb_encrypt
,
800 .decrypt
= mtk_aes_ofb_decrypt
,
803 .base
.cra_name
= "cfb(aes)",
804 .base
.cra_driver_name
= "cfb-aes-mtk",
805 .base
.cra_priority
= 400,
806 .base
.cra_flags
= CRYPTO_ALG_ASYNC
,
807 .base
.cra_blocksize
= 1,
808 .base
.cra_ctxsize
= sizeof(struct mtk_aes_ctx
),
809 .base
.cra_alignmask
= 0xf,
810 .base
.cra_module
= THIS_MODULE
,
812 .min_keysize
= AES_MIN_KEY_SIZE
,
813 .max_keysize
= AES_MAX_KEY_SIZE
,
814 .ivsize
= AES_BLOCK_SIZE
,
815 .setkey
= mtk_aes_setkey
,
816 .encrypt
= mtk_aes_cfb_encrypt
,
817 .decrypt
= mtk_aes_cfb_decrypt
,
821 static inline struct mtk_aes_gcm_ctx
*
822 mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx
*ctx
)
824 return container_of(ctx
, struct mtk_aes_gcm_ctx
, base
);
828 * Engine will verify and compare tag automatically, so we just need
829 * to check returned status which stored in the result descriptor.
831 static int mtk_aes_gcm_tag_verify(struct mtk_cryp
*cryp
,
832 struct mtk_aes_rec
*aes
)
834 __le32 status
= cryp
->ring
[aes
->id
]->res_prev
->ct
;
836 return mtk_aes_complete(cryp
, aes
, (status
& AES_AUTH_TAG_ERR
) ?
840 /* Initialize transform information of GCM mode */
841 static void mtk_aes_gcm_info_init(struct mtk_cryp
*cryp
,
842 struct mtk_aes_rec
*aes
,
845 struct aead_request
*req
= aead_request_cast(aes
->areq
);
846 struct mtk_aes_base_ctx
*ctx
= aes
->ctx
;
847 struct mtk_aes_gcm_ctx
*gctx
= mtk_aes_gcm_ctx_cast(ctx
);
848 struct mtk_aes_info
*info
= &ctx
->info
;
849 u32 ivsize
= crypto_aead_ivsize(crypto_aead_reqtfm(req
));
852 ctx
->ct_hdr
= AES_CT_CTRL_HDR
| cpu_to_le32(len
);
854 info
->cmd
[cnt
++] = AES_GCM_CMD0
| cpu_to_le32(req
->assoclen
);
855 info
->cmd
[cnt
++] = AES_GCM_CMD1
| cpu_to_le32(req
->assoclen
);
856 info
->cmd
[cnt
++] = AES_GCM_CMD2
;
857 info
->cmd
[cnt
++] = AES_GCM_CMD3
| cpu_to_le32(gctx
->textlen
);
859 if (aes
->flags
& AES_FLAGS_ENCRYPT
) {
860 info
->cmd
[cnt
++] = AES_GCM_CMD4
| cpu_to_le32(gctx
->authsize
);
861 info
->tfm
[0] = AES_TFM_GCM_OUT
;
863 info
->cmd
[cnt
++] = AES_GCM_CMD5
| cpu_to_le32(gctx
->authsize
);
864 info
->cmd
[cnt
++] = AES_GCM_CMD6
| cpu_to_le32(gctx
->authsize
);
865 info
->tfm
[0] = AES_TFM_GCM_IN
;
869 info
->tfm
[0] |= AES_TFM_GHASH_DIGEST
| AES_TFM_GHASH
| AES_TFM_SIZE(
870 ctx
->keylen
+ SIZE_IN_WORDS(AES_BLOCK_SIZE
+ ivsize
)) |
872 info
->tfm
[1] = AES_TFM_CTR_INIT
| AES_TFM_IV_CTR_MODE
| AES_TFM_3IV
|
875 memcpy(info
->state
+ ctx
->keylen
+ SIZE_IN_WORDS(AES_BLOCK_SIZE
),
879 static int mtk_aes_gcm_dma(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
,
880 struct scatterlist
*src
, struct scatterlist
*dst
,
883 bool src_aligned
, dst_aligned
;
889 src_aligned
= mtk_aes_check_aligned(src
, len
, &aes
->src
);
891 dst_aligned
= src_aligned
;
893 dst_aligned
= mtk_aes_check_aligned(dst
, len
, &aes
->dst
);
895 if (!src_aligned
|| !dst_aligned
) {
896 if (aes
->total
> AES_BUF_SIZE
)
897 return mtk_aes_complete(cryp
, aes
, -ENOMEM
);
900 sg_copy_to_buffer(src
, sg_nents(src
), aes
->buf
, len
);
901 aes
->src
.sg
= &aes
->aligned_sg
;
903 aes
->src
.remainder
= 0;
907 aes
->dst
.sg
= &aes
->aligned_sg
;
909 aes
->dst
.remainder
= 0;
912 sg_init_table(&aes
->aligned_sg
, 1);
913 sg_set_buf(&aes
->aligned_sg
, aes
->buf
, aes
->total
);
916 mtk_aes_gcm_info_init(cryp
, aes
, len
);
918 return mtk_aes_map(cryp
, aes
);
922 static int mtk_aes_gcm_start(struct mtk_cryp
*cryp
, struct mtk_aes_rec
*aes
)
924 struct mtk_aes_gcm_ctx
*gctx
= mtk_aes_gcm_ctx_cast(aes
->ctx
);
925 struct aead_request
*req
= aead_request_cast(aes
->areq
);
926 struct mtk_aes_reqctx
*rctx
= aead_request_ctx(req
);
927 u32 len
= req
->assoclen
+ req
->cryptlen
;
929 mtk_aes_set_mode(aes
, rctx
);
931 if (aes
->flags
& AES_FLAGS_ENCRYPT
) {
934 aes
->resume
= mtk_aes_transfer_complete
;
935 /* Compute total process length. */
936 aes
->total
= len
+ gctx
->authsize
;
937 /* Hardware will append authenticated tag to output buffer */
938 scatterwalk_map_and_copy(tag
, req
->dst
, len
, gctx
->authsize
, 1);
940 aes
->resume
= mtk_aes_gcm_tag_verify
;
944 return mtk_aes_gcm_dma(cryp
, aes
, req
->src
, req
->dst
, len
);
947 static int mtk_aes_gcm_crypt(struct aead_request
*req
, u64 mode
)
949 struct mtk_aes_base_ctx
*ctx
= crypto_aead_ctx(crypto_aead_reqtfm(req
));
950 struct mtk_aes_gcm_ctx
*gctx
= mtk_aes_gcm_ctx_cast(ctx
);
951 struct mtk_aes_reqctx
*rctx
= aead_request_ctx(req
);
952 struct mtk_cryp
*cryp
;
953 bool enc
= !!(mode
& AES_FLAGS_ENCRYPT
);
955 cryp
= mtk_aes_find_dev(ctx
);
959 /* Compute text length. */
960 gctx
->textlen
= req
->cryptlen
- (enc
? 0 : gctx
->authsize
);
962 /* Empty messages are not supported yet */
963 if (!gctx
->textlen
&& !req
->assoclen
)
966 rctx
->mode
= AES_FLAGS_GCM
| mode
;
968 return mtk_aes_handle_queue(cryp
, enc
, &req
->base
);
972 * Because of the hardware limitation, we need to pre-calculate key(H)
973 * for the GHASH operation. The result of the encryption operation
974 * need to be stored in the transform state buffer.
976 static int mtk_aes_gcm_setkey(struct crypto_aead
*aead
, const u8
*key
,
979 struct mtk_aes_base_ctx
*ctx
= crypto_aead_ctx(aead
);
981 u32 x32
[SIZE_IN_WORDS(AES_BLOCK_SIZE
)];
982 u8 x8
[AES_BLOCK_SIZE
];
984 struct crypto_aes_ctx aes_ctx
;
989 case AES_KEYSIZE_128
:
990 ctx
->keymode
= AES_TFM_128BITS
;
992 case AES_KEYSIZE_192
:
993 ctx
->keymode
= AES_TFM_192BITS
;
995 case AES_KEYSIZE_256
:
996 ctx
->keymode
= AES_TFM_256BITS
;
1003 ctx
->keylen
= SIZE_IN_WORDS(keylen
);
1005 err
= aes_expandkey(&aes_ctx
, key
, keylen
);
1009 aes_encrypt(&aes_ctx
, hash
.x8
, hash
.x8
);
1010 memzero_explicit(&aes_ctx
, sizeof(aes_ctx
));
1012 memcpy(ctx
->key
, key
, keylen
);
1014 /* Why do we need to do this? */
1015 for (i
= 0; i
< SIZE_IN_WORDS(AES_BLOCK_SIZE
); i
++)
1016 hash
.x32
[i
] = swab32(hash
.x32
[i
]);
1018 memcpy(ctx
->key
+ ctx
->keylen
, &hash
, AES_BLOCK_SIZE
);
1023 static int mtk_aes_gcm_setauthsize(struct crypto_aead
*aead
,
1026 struct mtk_aes_base_ctx
*ctx
= crypto_aead_ctx(aead
);
1027 struct mtk_aes_gcm_ctx
*gctx
= mtk_aes_gcm_ctx_cast(ctx
);
1029 /* Same as crypto_gcm_authsize() from crypto/gcm.c */
1039 gctx
->authsize
= authsize
;
1043 static int mtk_aes_gcm_encrypt(struct aead_request
*req
)
1045 return mtk_aes_gcm_crypt(req
, AES_FLAGS_ENCRYPT
);
1048 static int mtk_aes_gcm_decrypt(struct aead_request
*req
)
1050 return mtk_aes_gcm_crypt(req
, 0);
1053 static int mtk_aes_gcm_init(struct crypto_aead
*aead
)
1055 struct mtk_aes_gcm_ctx
*ctx
= crypto_aead_ctx(aead
);
1057 crypto_aead_set_reqsize(aead
, sizeof(struct mtk_aes_reqctx
));
1058 ctx
->base
.start
= mtk_aes_gcm_start
;
1062 static struct aead_alg aes_gcm_alg
= {
1063 .setkey
= mtk_aes_gcm_setkey
,
1064 .setauthsize
= mtk_aes_gcm_setauthsize
,
1065 .encrypt
= mtk_aes_gcm_encrypt
,
1066 .decrypt
= mtk_aes_gcm_decrypt
,
1067 .init
= mtk_aes_gcm_init
,
1068 .ivsize
= GCM_AES_IV_SIZE
,
1069 .maxauthsize
= AES_BLOCK_SIZE
,
1072 .cra_name
= "gcm(aes)",
1073 .cra_driver_name
= "gcm-aes-mtk",
1074 .cra_priority
= 400,
1075 .cra_flags
= CRYPTO_ALG_ASYNC
,
1077 .cra_ctxsize
= sizeof(struct mtk_aes_gcm_ctx
),
1078 .cra_alignmask
= 0xf,
1079 .cra_module
= THIS_MODULE
,
1083 static void mtk_aes_queue_task(unsigned long data
)
1085 struct mtk_aes_rec
*aes
= (struct mtk_aes_rec
*)data
;
1087 mtk_aes_handle_queue(aes
->cryp
, aes
->id
, NULL
);
1090 static void mtk_aes_done_task(unsigned long data
)
1092 struct mtk_aes_rec
*aes
= (struct mtk_aes_rec
*)data
;
1093 struct mtk_cryp
*cryp
= aes
->cryp
;
1095 mtk_aes_unmap(cryp
, aes
);
1096 aes
->resume(cryp
, aes
);
1099 static irqreturn_t
mtk_aes_irq(int irq
, void *dev_id
)
1101 struct mtk_aes_rec
*aes
= (struct mtk_aes_rec
*)dev_id
;
1102 struct mtk_cryp
*cryp
= aes
->cryp
;
1103 u32 val
= mtk_aes_read(cryp
, RDR_STAT(aes
->id
));
1105 mtk_aes_write(cryp
, RDR_STAT(aes
->id
), val
);
1107 if (likely(AES_FLAGS_BUSY
& aes
->flags
)) {
1108 mtk_aes_write(cryp
, RDR_PROC_COUNT(aes
->id
), MTK_CNT_RST
);
1109 mtk_aes_write(cryp
, RDR_THRESH(aes
->id
),
1110 MTK_RDR_PROC_THRESH
| MTK_RDR_PROC_MODE
);
1112 tasklet_schedule(&aes
->done_task
);
1114 dev_warn(cryp
->dev
, "AES interrupt when no active requests.\n");
1120 * The purpose of creating encryption and decryption records is
1121 * to process outbound/inbound data in parallel, it can improve
1122 * performance in most use cases, such as IPSec VPN, especially
1123 * under heavy network traffic.
1125 static int mtk_aes_record_init(struct mtk_cryp
*cryp
)
1127 struct mtk_aes_rec
**aes
= cryp
->aes
;
1128 int i
, err
= -ENOMEM
;
1130 for (i
= 0; i
< MTK_REC_NUM
; i
++) {
1131 aes
[i
] = kzalloc(sizeof(**aes
), GFP_KERNEL
);
1135 aes
[i
]->buf
= (void *)__get_free_pages(GFP_KERNEL
,
1140 aes
[i
]->cryp
= cryp
;
1142 spin_lock_init(&aes
[i
]->lock
);
1143 crypto_init_queue(&aes
[i
]->queue
, AES_QUEUE_SIZE
);
1145 tasklet_init(&aes
[i
]->queue_task
, mtk_aes_queue_task
,
1146 (unsigned long)aes
[i
]);
1147 tasklet_init(&aes
[i
]->done_task
, mtk_aes_done_task
,
1148 (unsigned long)aes
[i
]);
1151 /* Link to ring0 and ring1 respectively */
1152 aes
[0]->id
= MTK_RING0
;
1153 aes
[1]->id
= MTK_RING1
;
1159 free_page((unsigned long)aes
[i
]->buf
);
1166 static void mtk_aes_record_free(struct mtk_cryp
*cryp
)
1170 for (i
= 0; i
< MTK_REC_NUM
; i
++) {
1171 tasklet_kill(&cryp
->aes
[i
]->done_task
);
1172 tasklet_kill(&cryp
->aes
[i
]->queue_task
);
1174 free_page((unsigned long)cryp
->aes
[i
]->buf
);
1175 kfree(cryp
->aes
[i
]);
1179 static void mtk_aes_unregister_algs(void)
1183 crypto_unregister_aead(&aes_gcm_alg
);
1185 for (i
= 0; i
< ARRAY_SIZE(aes_algs
); i
++)
1186 crypto_unregister_skcipher(&aes_algs
[i
]);
1189 static int mtk_aes_register_algs(void)
1193 for (i
= 0; i
< ARRAY_SIZE(aes_algs
); i
++) {
1194 err
= crypto_register_skcipher(&aes_algs
[i
]);
1199 err
= crypto_register_aead(&aes_gcm_alg
);
1207 crypto_unregister_skcipher(&aes_algs
[i
]);
1212 int mtk_cipher_alg_register(struct mtk_cryp
*cryp
)
1216 INIT_LIST_HEAD(&cryp
->aes_list
);
1218 /* Initialize two cipher records */
1219 ret
= mtk_aes_record_init(cryp
);
1223 ret
= devm_request_irq(cryp
->dev
, cryp
->irq
[MTK_RING0
], mtk_aes_irq
,
1224 0, "mtk-aes", cryp
->aes
[0]);
1226 dev_err(cryp
->dev
, "unable to request AES irq.\n");
1230 ret
= devm_request_irq(cryp
->dev
, cryp
->irq
[MTK_RING1
], mtk_aes_irq
,
1231 0, "mtk-aes", cryp
->aes
[1]);
1233 dev_err(cryp
->dev
, "unable to request AES irq.\n");
1237 /* Enable ring0 and ring1 interrupt */
1238 mtk_aes_write(cryp
, AIC_ENABLE_SET(MTK_RING0
), MTK_IRQ_RDR0
);
1239 mtk_aes_write(cryp
, AIC_ENABLE_SET(MTK_RING1
), MTK_IRQ_RDR1
);
1241 spin_lock(&mtk_aes
.lock
);
1242 list_add_tail(&cryp
->aes_list
, &mtk_aes
.dev_list
);
1243 spin_unlock(&mtk_aes
.lock
);
1245 ret
= mtk_aes_register_algs();
1252 spin_lock(&mtk_aes
.lock
);
1253 list_del(&cryp
->aes_list
);
1254 spin_unlock(&mtk_aes
.lock
);
1256 mtk_aes_record_free(cryp
);
1259 dev_err(cryp
->dev
, "mtk-aes initialization failed.\n");
1263 void mtk_cipher_alg_release(struct mtk_cryp
*cryp
)
1265 spin_lock(&mtk_aes
.lock
);
1266 list_del(&cryp
->aes_list
);
1267 spin_unlock(&mtk_aes
.lock
);
1269 mtk_aes_unregister_algs();
1270 mtk_aes_record_free(cryp
);