1 // SPDX-License-Identifier: GPL-2.0-only
2 // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
4 * Crypto driver to handle block cipher algorithms using NVIDIA Security Engine.
8 #include <linux/dma-mapping.h>
9 #include <linux/module.h>
10 #include <linux/of_device.h>
11 #include <linux/platform_device.h>
13 #include <crypto/aead.h>
14 #include <crypto/aes.h>
15 #include <crypto/engine.h>
16 #include <crypto/gcm.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/xts.h>
19 #include <crypto/internal/aead.h>
20 #include <crypto/internal/hash.h>
21 #include <crypto/internal/skcipher.h>
25 struct tegra_aes_ctx
{
33 struct tegra_aes_reqctx
{
34 struct tegra_se_datbuf datbuf
;
42 struct tegra_aead_ctx
{
44 unsigned int authsize
;
50 struct tegra_aead_reqctx
{
51 struct tegra_se_datbuf inbuf
;
52 struct tegra_se_datbuf outbuf
;
53 struct scatterlist
*src_sg
;
54 struct scatterlist
*dst_sg
;
55 unsigned int assoclen
;
56 unsigned int cryptlen
;
57 unsigned int authsize
;
66 struct tegra_cmac_ctx
{
70 struct crypto_shash
*fallback_tfm
;
73 struct tegra_cmac_reqctx
{
74 struct scatterlist
*src_sg
;
75 struct tegra_se_datbuf datbuf
;
76 struct tegra_se_datbuf residue
;
77 unsigned int total_len
;
78 unsigned int blk_size
;
84 u32 result
[CMAC_RESULT_REG_COUNT
];
87 /* increment counter (128-bit int) */
88 static void ctr_iv_inc(__u8
*counter
, __u8 bits
, __u32 nums
)
92 nums
+= counter
[bits
];
93 counter
[bits
] = nums
& 0xff;
95 } while (bits
&& nums
);
98 static void tegra_cbc_iv_copyback(struct skcipher_request
*req
, struct tegra_aes_ctx
*ctx
)
100 struct tegra_aes_reqctx
*rctx
= skcipher_request_ctx(req
);
103 offset
= req
->cryptlen
- ctx
->ivsize
;
106 memcpy(req
->iv
, rctx
->datbuf
.buf
+ offset
, ctx
->ivsize
);
108 scatterwalk_map_and_copy(req
->iv
, req
->src
, offset
, ctx
->ivsize
, 0);
111 static void tegra_aes_update_iv(struct skcipher_request
*req
, struct tegra_aes_ctx
*ctx
)
115 if (ctx
->alg
== SE_ALG_CBC
) {
116 tegra_cbc_iv_copyback(req
, ctx
);
117 } else if (ctx
->alg
== SE_ALG_CTR
) {
118 num
= req
->cryptlen
/ ctx
->ivsize
;
119 if (req
->cryptlen
% ctx
->ivsize
)
122 ctr_iv_inc(req
->iv
, ctx
->ivsize
, num
);
126 static int tegra234_aes_crypto_cfg(u32 alg
, bool encrypt
)
132 case SE_ALG_GCM_FINAL
:
136 return SE_CRYPTO_CFG_CBC_ENCRYPT
;
138 return SE_CRYPTO_CFG_CBC_DECRYPT
;
141 return SE_CRYPTO_CFG_ECB_ENCRYPT
;
143 return SE_CRYPTO_CFG_ECB_DECRYPT
;
146 return SE_CRYPTO_CFG_XTS_ENCRYPT
;
148 return SE_CRYPTO_CFG_XTS_DECRYPT
;
151 return SE_CRYPTO_CFG_CTR
;
153 return SE_CRYPTO_CFG_CBC_MAC
;
162 static int tegra234_aes_cfg(u32 alg
, bool encrypt
)
170 return SE_CFG_AES_ENCRYPT
;
172 return SE_CFG_AES_DECRYPT
;
176 return SE_CFG_GMAC_ENCRYPT
;
178 return SE_CFG_GMAC_DECRYPT
;
182 return SE_CFG_GCM_ENCRYPT
;
184 return SE_CFG_GCM_DECRYPT
;
186 case SE_ALG_GCM_FINAL
:
188 return SE_CFG_GCM_FINAL_ENCRYPT
;
190 return SE_CFG_GCM_FINAL_DECRYPT
;
196 return SE_AES_ENC_ALG_AES_ENC
|
202 static unsigned int tegra_aes_prep_cmd(struct tegra_aes_ctx
*ctx
,
203 struct tegra_aes_reqctx
*rctx
)
205 unsigned int data_count
, res_bits
, i
= 0, j
;
206 struct tegra_se
*se
= ctx
->se
;
207 u32
*cpuvaddr
= se
->cmdbuf
->addr
;
208 dma_addr_t addr
= rctx
->datbuf
.addr
;
210 data_count
= rctx
->len
/ AES_BLOCK_SIZE
;
211 res_bits
= (rctx
->len
% AES_BLOCK_SIZE
) * 8;
214 * Hardware processes data_count + 1 blocks.
215 * Reduce 1 block if there is no residue
221 cpuvaddr
[i
++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT
);
222 cpuvaddr
[i
++] = se_host1x_opcode_incr_w(se
->hw
->regs
->linear_ctr
);
223 for (j
= 0; j
< SE_CRYPTO_CTR_REG_COUNT
; j
++)
224 cpuvaddr
[i
++] = rctx
->iv
[j
];
227 cpuvaddr
[i
++] = se_host1x_opcode_nonincr(se
->hw
->regs
->last_blk
, 1);
228 cpuvaddr
[i
++] = SE_LAST_BLOCK_VAL(data_count
) |
229 SE_LAST_BLOCK_RES_BITS(res_bits
);
231 cpuvaddr
[i
++] = se_host1x_opcode_incr(se
->hw
->regs
->config
, 6);
232 cpuvaddr
[i
++] = rctx
->config
;
233 cpuvaddr
[i
++] = rctx
->crypto_config
;
235 /* Source address setting */
236 cpuvaddr
[i
++] = lower_32_bits(addr
);
237 cpuvaddr
[i
++] = SE_ADDR_HI_MSB(upper_32_bits(addr
)) | SE_ADDR_HI_SZ(rctx
->len
);
239 /* Destination address setting */
240 cpuvaddr
[i
++] = lower_32_bits(addr
);
241 cpuvaddr
[i
++] = SE_ADDR_HI_MSB(upper_32_bits(addr
)) |
242 SE_ADDR_HI_SZ(rctx
->len
);
244 cpuvaddr
[i
++] = se_host1x_opcode_nonincr(se
->hw
->regs
->op
, 1);
245 cpuvaddr
[i
++] = SE_AES_OP_WRSTALL
| SE_AES_OP_LASTBUF
|
248 cpuvaddr
[i
++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
249 cpuvaddr
[i
++] = host1x_uclass_incr_syncpt_cond_f(1) |
250 host1x_uclass_incr_syncpt_indx_f(se
->syncpt_id
);
252 dev_dbg(se
->dev
, "cfg %#x crypto cfg %#x\n", rctx
->config
, rctx
->crypto_config
);
257 static int tegra_aes_do_one_req(struct crypto_engine
*engine
, void *areq
)
259 struct skcipher_request
*req
= container_of(areq
, struct skcipher_request
, base
);
260 struct tegra_aes_ctx
*ctx
= crypto_skcipher_ctx(crypto_skcipher_reqtfm(req
));
261 struct tegra_aes_reqctx
*rctx
= skcipher_request_ctx(req
);
262 struct tegra_se
*se
= ctx
->se
;
266 rctx
->datbuf
.buf
= dma_alloc_coherent(se
->dev
, SE_AES_BUFLEN
,
267 &rctx
->datbuf
.addr
, GFP_KERNEL
);
268 if (!rctx
->datbuf
.buf
)
271 rctx
->datbuf
.size
= SE_AES_BUFLEN
;
272 rctx
->iv
= (u32
*)req
->iv
;
273 rctx
->len
= req
->cryptlen
;
275 /* Pad input to AES Block size */
276 if (ctx
->alg
!= SE_ALG_XTS
) {
277 if (rctx
->len
% AES_BLOCK_SIZE
)
278 rctx
->len
+= AES_BLOCK_SIZE
- (rctx
->len
% AES_BLOCK_SIZE
);
281 scatterwalk_map_and_copy(rctx
->datbuf
.buf
, req
->src
, 0, req
->cryptlen
, 0);
283 /* Prepare the command and submit for execution */
284 cmdlen
= tegra_aes_prep_cmd(ctx
, rctx
);
285 ret
= tegra_se_host1x_submit(se
, cmdlen
);
287 /* Copy the result */
288 tegra_aes_update_iv(req
, ctx
);
289 scatterwalk_map_and_copy(rctx
->datbuf
.buf
, req
->dst
, 0, req
->cryptlen
, 1);
291 /* Free the buffer */
292 dma_free_coherent(ctx
->se
->dev
, SE_AES_BUFLEN
,
293 rctx
->datbuf
.buf
, rctx
->datbuf
.addr
);
295 crypto_finalize_skcipher_request(se
->engine
, req
, ret
);
300 static int tegra_aes_cra_init(struct crypto_skcipher
*tfm
)
302 struct tegra_aes_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
303 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
304 struct tegra_se_alg
*se_alg
;
308 se_alg
= container_of(alg
, struct tegra_se_alg
, alg
.skcipher
.base
);
310 crypto_skcipher_set_reqsize(tfm
, sizeof(struct tegra_aes_reqctx
));
312 ctx
->ivsize
= crypto_skcipher_ivsize(tfm
);
313 ctx
->se
= se_alg
->se_dev
;
317 algname
= crypto_tfm_alg_name(&tfm
->base
);
318 ret
= se_algname_to_algid(algname
);
320 dev_err(ctx
->se
->dev
, "invalid algorithm\n");
329 static void tegra_aes_cra_exit(struct crypto_skcipher
*tfm
)
331 struct tegra_aes_ctx
*ctx
= crypto_tfm_ctx(&tfm
->base
);
334 tegra_key_invalidate(ctx
->se
, ctx
->key1_id
, ctx
->alg
);
337 tegra_key_invalidate(ctx
->se
, ctx
->key2_id
, ctx
->alg
);
340 static int tegra_aes_setkey(struct crypto_skcipher
*tfm
,
341 const u8
*key
, u32 keylen
)
343 struct tegra_aes_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
345 if (aes_check_keylen(keylen
)) {
346 dev_dbg(ctx
->se
->dev
, "invalid key length (%d)\n", keylen
);
350 return tegra_key_submit(ctx
->se
, key
, keylen
, ctx
->alg
, &ctx
->key1_id
);
353 static int tegra_xts_setkey(struct crypto_skcipher
*tfm
,
354 const u8
*key
, u32 keylen
)
356 struct tegra_aes_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
357 u32 len
= keylen
/ 2;
360 ret
= xts_verify_key(tfm
, key
, keylen
);
361 if (ret
|| aes_check_keylen(len
)) {
362 dev_dbg(ctx
->se
->dev
, "invalid key length (%d)\n", keylen
);
366 ret
= tegra_key_submit(ctx
->se
, key
, len
,
367 ctx
->alg
, &ctx
->key1_id
);
371 return tegra_key_submit(ctx
->se
, key
+ len
, len
,
372 ctx
->alg
, &ctx
->key2_id
);
377 static int tegra_aes_kac_manifest(u32 user
, u32 alg
, u32 keylen
)
381 manifest
= SE_KAC_USER_NS
;
387 manifest
|= SE_KAC_ENC
;
390 manifest
|= SE_KAC_XTS
;
393 manifest
|= SE_KAC_GCM
;
396 manifest
|= SE_KAC_CMAC
;
399 manifest
|= SE_KAC_ENC
;
406 case AES_KEYSIZE_128
:
407 manifest
|= SE_KAC_SIZE_128
;
409 case AES_KEYSIZE_192
:
410 manifest
|= SE_KAC_SIZE_192
;
412 case AES_KEYSIZE_256
:
413 manifest
|= SE_KAC_SIZE_256
;
422 static int tegra_aes_crypt(struct skcipher_request
*req
, bool encrypt
)
425 struct crypto_skcipher
*tfm
;
426 struct tegra_aes_ctx
*ctx
;
427 struct tegra_aes_reqctx
*rctx
;
429 tfm
= crypto_skcipher_reqtfm(req
);
430 ctx
= crypto_skcipher_ctx(tfm
);
431 rctx
= skcipher_request_ctx(req
);
433 if (ctx
->alg
!= SE_ALG_XTS
) {
434 if (!IS_ALIGNED(req
->cryptlen
, crypto_skcipher_blocksize(tfm
))) {
435 dev_dbg(ctx
->se
->dev
, "invalid length (%d)", req
->cryptlen
);
438 } else if (req
->cryptlen
< XTS_BLOCK_SIZE
) {
439 dev_dbg(ctx
->se
->dev
, "invalid length (%d)", req
->cryptlen
);
446 rctx
->encrypt
= encrypt
;
447 rctx
->config
= tegra234_aes_cfg(ctx
->alg
, encrypt
);
448 rctx
->crypto_config
= tegra234_aes_crypto_cfg(ctx
->alg
, encrypt
);
449 rctx
->crypto_config
|= SE_AES_KEY_INDEX(ctx
->key1_id
);
452 rctx
->crypto_config
|= SE_AES_KEY2_INDEX(ctx
->key2_id
);
454 return crypto_transfer_skcipher_request_to_engine(ctx
->se
->engine
, req
);
457 static int tegra_aes_encrypt(struct skcipher_request
*req
)
459 return tegra_aes_crypt(req
, true);
462 static int tegra_aes_decrypt(struct skcipher_request
*req
)
464 return tegra_aes_crypt(req
, false);
467 static struct tegra_se_alg tegra_aes_algs
[] = {
469 .alg
.skcipher
.op
.do_one_request
= tegra_aes_do_one_req
,
470 .alg
.skcipher
.base
= {
471 .init
= tegra_aes_cra_init
,
472 .exit
= tegra_aes_cra_exit
,
473 .setkey
= tegra_aes_setkey
,
474 .encrypt
= tegra_aes_encrypt
,
475 .decrypt
= tegra_aes_decrypt
,
476 .min_keysize
= AES_MIN_KEY_SIZE
,
477 .max_keysize
= AES_MAX_KEY_SIZE
,
478 .ivsize
= AES_BLOCK_SIZE
,
480 .cra_name
= "cbc(aes)",
481 .cra_driver_name
= "cbc-aes-tegra",
483 .cra_flags
= CRYPTO_ALG_TYPE_SKCIPHER
| CRYPTO_ALG_ASYNC
,
484 .cra_blocksize
= AES_BLOCK_SIZE
,
485 .cra_ctxsize
= sizeof(struct tegra_aes_ctx
),
486 .cra_alignmask
= 0xf,
487 .cra_module
= THIS_MODULE
,
491 .alg
.skcipher
.op
.do_one_request
= tegra_aes_do_one_req
,
492 .alg
.skcipher
.base
= {
493 .init
= tegra_aes_cra_init
,
494 .exit
= tegra_aes_cra_exit
,
495 .setkey
= tegra_aes_setkey
,
496 .encrypt
= tegra_aes_encrypt
,
497 .decrypt
= tegra_aes_decrypt
,
498 .min_keysize
= AES_MIN_KEY_SIZE
,
499 .max_keysize
= AES_MAX_KEY_SIZE
,
501 .cra_name
= "ecb(aes)",
502 .cra_driver_name
= "ecb-aes-tegra",
504 .cra_flags
= CRYPTO_ALG_TYPE_SKCIPHER
| CRYPTO_ALG_ASYNC
,
505 .cra_blocksize
= AES_BLOCK_SIZE
,
506 .cra_ctxsize
= sizeof(struct tegra_aes_ctx
),
507 .cra_alignmask
= 0xf,
508 .cra_module
= THIS_MODULE
,
512 .alg
.skcipher
.op
.do_one_request
= tegra_aes_do_one_req
,
513 .alg
.skcipher
.base
= {
514 .init
= tegra_aes_cra_init
,
515 .exit
= tegra_aes_cra_exit
,
516 .setkey
= tegra_aes_setkey
,
517 .encrypt
= tegra_aes_encrypt
,
518 .decrypt
= tegra_aes_decrypt
,
519 .min_keysize
= AES_MIN_KEY_SIZE
,
520 .max_keysize
= AES_MAX_KEY_SIZE
,
521 .ivsize
= AES_BLOCK_SIZE
,
523 .cra_name
= "ctr(aes)",
524 .cra_driver_name
= "ctr-aes-tegra",
526 .cra_flags
= CRYPTO_ALG_TYPE_SKCIPHER
| CRYPTO_ALG_ASYNC
,
528 .cra_ctxsize
= sizeof(struct tegra_aes_ctx
),
529 .cra_alignmask
= 0xf,
530 .cra_module
= THIS_MODULE
,
534 .alg
.skcipher
.op
.do_one_request
= tegra_aes_do_one_req
,
535 .alg
.skcipher
.base
= {
536 .init
= tegra_aes_cra_init
,
537 .exit
= tegra_aes_cra_exit
,
538 .setkey
= tegra_xts_setkey
,
539 .encrypt
= tegra_aes_encrypt
,
540 .decrypt
= tegra_aes_decrypt
,
541 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
542 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
543 .ivsize
= AES_BLOCK_SIZE
,
545 .cra_name
= "xts(aes)",
546 .cra_driver_name
= "xts-aes-tegra",
548 .cra_blocksize
= AES_BLOCK_SIZE
,
549 .cra_ctxsize
= sizeof(struct tegra_aes_ctx
),
550 .cra_alignmask
= (__alignof__(u64
) - 1),
551 .cra_module
= THIS_MODULE
,
557 static unsigned int tegra_gmac_prep_cmd(struct tegra_aead_ctx
*ctx
,
558 struct tegra_aead_reqctx
*rctx
)
560 unsigned int data_count
, res_bits
, i
= 0;
561 struct tegra_se
*se
= ctx
->se
;
562 u32
*cpuvaddr
= se
->cmdbuf
->addr
;
564 data_count
= (rctx
->assoclen
/ AES_BLOCK_SIZE
);
565 res_bits
= (rctx
->assoclen
% AES_BLOCK_SIZE
) * 8;
568 * Hardware processes data_count + 1 blocks.
569 * Reduce 1 block if there is no residue
574 cpuvaddr
[i
++] = se_host1x_opcode_nonincr(se
->hw
->regs
->last_blk
, 1);
575 cpuvaddr
[i
++] = SE_LAST_BLOCK_VAL(data_count
) |
576 SE_LAST_BLOCK_RES_BITS(res_bits
);
578 cpuvaddr
[i
++] = se_host1x_opcode_incr(se
->hw
->regs
->config
, 4);
579 cpuvaddr
[i
++] = rctx
->config
;
580 cpuvaddr
[i
++] = rctx
->crypto_config
;
581 cpuvaddr
[i
++] = lower_32_bits(rctx
->inbuf
.addr
);
582 cpuvaddr
[i
++] = SE_ADDR_HI_MSB(upper_32_bits(rctx
->inbuf
.addr
)) |
583 SE_ADDR_HI_SZ(rctx
->assoclen
);
585 cpuvaddr
[i
++] = se_host1x_opcode_nonincr(se
->hw
->regs
->op
, 1);
586 cpuvaddr
[i
++] = SE_AES_OP_WRSTALL
| SE_AES_OP_FINAL
|
587 SE_AES_OP_INIT
| SE_AES_OP_LASTBUF
|
590 cpuvaddr
[i
++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
591 cpuvaddr
[i
++] = host1x_uclass_incr_syncpt_cond_f(1) |
592 host1x_uclass_incr_syncpt_indx_f(se
->syncpt_id
);
597 static unsigned int tegra_gcm_crypt_prep_cmd(struct tegra_aead_ctx
*ctx
,
598 struct tegra_aead_reqctx
*rctx
)
600 unsigned int data_count
, res_bits
, i
= 0, j
;
601 struct tegra_se
*se
= ctx
->se
;
602 u32
*cpuvaddr
= se
->cmdbuf
->addr
, op
;
604 data_count
= (rctx
->cryptlen
/ AES_BLOCK_SIZE
);
605 res_bits
= (rctx
->cryptlen
% AES_BLOCK_SIZE
) * 8;
606 op
= SE_AES_OP_WRSTALL
| SE_AES_OP_FINAL
|
607 SE_AES_OP_LASTBUF
| SE_AES_OP_START
;
610 * If there is no assoc data,
611 * this will be the init command
614 op
|= SE_AES_OP_INIT
;
617 * Hardware processes data_count + 1 blocks.
618 * Reduce 1 block if there is no residue
623 cpuvaddr
[i
++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT
);
624 cpuvaddr
[i
++] = se_host1x_opcode_incr_w(se
->hw
->regs
->linear_ctr
);
625 for (j
= 0; j
< SE_CRYPTO_CTR_REG_COUNT
; j
++)
626 cpuvaddr
[i
++] = rctx
->iv
[j
];
628 cpuvaddr
[i
++] = se_host1x_opcode_nonincr(se
->hw
->regs
->last_blk
, 1);
629 cpuvaddr
[i
++] = SE_LAST_BLOCK_VAL(data_count
) |
630 SE_LAST_BLOCK_RES_BITS(res_bits
);
632 cpuvaddr
[i
++] = se_host1x_opcode_incr(se
->hw
->regs
->config
, 6);
633 cpuvaddr
[i
++] = rctx
->config
;
634 cpuvaddr
[i
++] = rctx
->crypto_config
;
637 cpuvaddr
[i
++] = lower_32_bits(rctx
->inbuf
.addr
);
638 cpuvaddr
[i
++] = SE_ADDR_HI_MSB(upper_32_bits(rctx
->inbuf
.addr
)) |
639 SE_ADDR_HI_SZ(rctx
->cryptlen
);
641 /* Destination Address */
642 cpuvaddr
[i
++] = lower_32_bits(rctx
->outbuf
.addr
);
643 cpuvaddr
[i
++] = SE_ADDR_HI_MSB(upper_32_bits(rctx
->outbuf
.addr
)) |
644 SE_ADDR_HI_SZ(rctx
->cryptlen
);
646 cpuvaddr
[i
++] = se_host1x_opcode_nonincr(se
->hw
->regs
->op
, 1);
649 cpuvaddr
[i
++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
650 cpuvaddr
[i
++] = host1x_uclass_incr_syncpt_cond_f(1) |
651 host1x_uclass_incr_syncpt_indx_f(se
->syncpt_id
);
653 dev_dbg(se
->dev
, "cfg %#x crypto cfg %#x\n", rctx
->config
, rctx
->crypto_config
);
657 static int tegra_gcm_prep_final_cmd(struct tegra_se
*se
, u32
*cpuvaddr
,
658 struct tegra_aead_reqctx
*rctx
)
660 unsigned int i
= 0, j
;
663 op
= SE_AES_OP_WRSTALL
| SE_AES_OP_FINAL
|
664 SE_AES_OP_LASTBUF
| SE_AES_OP_START
;
667 * Set init for zero sized vector
669 if (!rctx
->assoclen
&& !rctx
->cryptlen
)
670 op
|= SE_AES_OP_INIT
;
672 cpuvaddr
[i
++] = se_host1x_opcode_incr(se
->hw
->regs
->aad_len
, 2);
673 cpuvaddr
[i
++] = rctx
->assoclen
* 8;
676 cpuvaddr
[i
++] = se_host1x_opcode_incr(se
->hw
->regs
->cryp_msg_len
, 2);
677 cpuvaddr
[i
++] = rctx
->cryptlen
* 8;
680 cpuvaddr
[i
++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT
);
681 cpuvaddr
[i
++] = se_host1x_opcode_incr_w(se
->hw
->regs
->linear_ctr
);
682 for (j
= 0; j
< SE_CRYPTO_CTR_REG_COUNT
; j
++)
683 cpuvaddr
[i
++] = rctx
->iv
[j
];
685 cpuvaddr
[i
++] = se_host1x_opcode_incr(se
->hw
->regs
->config
, 6);
686 cpuvaddr
[i
++] = rctx
->config
;
687 cpuvaddr
[i
++] = rctx
->crypto_config
;
691 /* Destination Address */
692 cpuvaddr
[i
++] = lower_32_bits(rctx
->outbuf
.addr
);
693 cpuvaddr
[i
++] = SE_ADDR_HI_MSB(upper_32_bits(rctx
->outbuf
.addr
)) |
694 SE_ADDR_HI_SZ(0x10); /* HW always generates 128-bit tag */
696 cpuvaddr
[i
++] = se_host1x_opcode_nonincr(se
->hw
->regs
->op
, 1);
699 cpuvaddr
[i
++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
700 cpuvaddr
[i
++] = host1x_uclass_incr_syncpt_cond_f(1) |
701 host1x_uclass_incr_syncpt_indx_f(se
->syncpt_id
);
703 dev_dbg(se
->dev
, "cfg %#x crypto cfg %#x\n", rctx
->config
, rctx
->crypto_config
);
708 static int tegra_gcm_do_gmac(struct tegra_aead_ctx
*ctx
, struct tegra_aead_reqctx
*rctx
)
710 struct tegra_se
*se
= ctx
->se
;
713 scatterwalk_map_and_copy(rctx
->inbuf
.buf
,
714 rctx
->src_sg
, 0, rctx
->assoclen
, 0);
716 rctx
->config
= tegra234_aes_cfg(SE_ALG_GMAC
, rctx
->encrypt
);
717 rctx
->crypto_config
= tegra234_aes_crypto_cfg(SE_ALG_GMAC
, rctx
->encrypt
) |
718 SE_AES_KEY_INDEX(ctx
->key_id
);
720 cmdlen
= tegra_gmac_prep_cmd(ctx
, rctx
);
722 return tegra_se_host1x_submit(se
, cmdlen
);
725 static int tegra_gcm_do_crypt(struct tegra_aead_ctx
*ctx
, struct tegra_aead_reqctx
*rctx
)
727 struct tegra_se
*se
= ctx
->se
;
730 scatterwalk_map_and_copy(rctx
->inbuf
.buf
, rctx
->src_sg
,
731 rctx
->assoclen
, rctx
->cryptlen
, 0);
733 rctx
->config
= tegra234_aes_cfg(SE_ALG_GCM
, rctx
->encrypt
);
734 rctx
->crypto_config
= tegra234_aes_crypto_cfg(SE_ALG_GCM
, rctx
->encrypt
) |
735 SE_AES_KEY_INDEX(ctx
->key_id
);
737 /* Prepare command and submit */
738 cmdlen
= tegra_gcm_crypt_prep_cmd(ctx
, rctx
);
739 ret
= tegra_se_host1x_submit(se
, cmdlen
);
743 /* Copy the result */
744 scatterwalk_map_and_copy(rctx
->outbuf
.buf
, rctx
->dst_sg
,
745 rctx
->assoclen
, rctx
->cryptlen
, 1);
750 static int tegra_gcm_do_final(struct tegra_aead_ctx
*ctx
, struct tegra_aead_reqctx
*rctx
)
752 struct tegra_se
*se
= ctx
->se
;
753 u32
*cpuvaddr
= se
->cmdbuf
->addr
;
754 int cmdlen
, ret
, offset
;
756 rctx
->config
= tegra234_aes_cfg(SE_ALG_GCM_FINAL
, rctx
->encrypt
);
757 rctx
->crypto_config
= tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL
, rctx
->encrypt
) |
758 SE_AES_KEY_INDEX(ctx
->key_id
);
760 /* Prepare command and submit */
761 cmdlen
= tegra_gcm_prep_final_cmd(se
, cpuvaddr
, rctx
);
762 ret
= tegra_se_host1x_submit(se
, cmdlen
);
767 /* Copy the result */
768 offset
= rctx
->assoclen
+ rctx
->cryptlen
;
769 scatterwalk_map_and_copy(rctx
->outbuf
.buf
, rctx
->dst_sg
,
770 offset
, rctx
->authsize
, 1);
776 static int tegra_gcm_do_verify(struct tegra_se
*se
, struct tegra_aead_reqctx
*rctx
)
781 offset
= rctx
->assoclen
+ rctx
->cryptlen
;
782 scatterwalk_map_and_copy(mac
, rctx
->src_sg
, offset
, rctx
->authsize
, 0);
784 if (crypto_memneq(rctx
->outbuf
.buf
, mac
, rctx
->authsize
))
790 static inline int tegra_ccm_check_iv(const u8
*iv
)
792 /* iv[0] gives value of q-1
793 * 2 <= q <= 8 as per NIST 800-38C notation
794 * 2 <= L <= 8, so 1 <= L' <= 7. as per rfc 3610 notation
796 if (iv
[0] < 1 || iv
[0] > 7) {
797 pr_debug("ccm_check_iv failed %d\n", iv
[0]);
804 static unsigned int tegra_cbcmac_prep_cmd(struct tegra_aead_ctx
*ctx
,
805 struct tegra_aead_reqctx
*rctx
)
807 unsigned int data_count
, i
= 0;
808 struct tegra_se
*se
= ctx
->se
;
809 u32
*cpuvaddr
= se
->cmdbuf
->addr
;
811 data_count
= (rctx
->inbuf
.size
/ AES_BLOCK_SIZE
) - 1;
813 cpuvaddr
[i
++] = se_host1x_opcode_nonincr(se
->hw
->regs
->last_blk
, 1);
814 cpuvaddr
[i
++] = SE_LAST_BLOCK_VAL(data_count
);
816 cpuvaddr
[i
++] = se_host1x_opcode_incr(se
->hw
->regs
->config
, 6);
817 cpuvaddr
[i
++] = rctx
->config
;
818 cpuvaddr
[i
++] = rctx
->crypto_config
;
820 cpuvaddr
[i
++] = lower_32_bits(rctx
->inbuf
.addr
);
821 cpuvaddr
[i
++] = SE_ADDR_HI_MSB(upper_32_bits(rctx
->inbuf
.addr
)) |
822 SE_ADDR_HI_SZ(rctx
->inbuf
.size
);
824 cpuvaddr
[i
++] = lower_32_bits(rctx
->outbuf
.addr
);
825 cpuvaddr
[i
++] = SE_ADDR_HI_MSB(upper_32_bits(rctx
->outbuf
.addr
)) |
826 SE_ADDR_HI_SZ(0x10); /* HW always generates 128 bit tag */
828 cpuvaddr
[i
++] = se_host1x_opcode_nonincr(se
->hw
->regs
->op
, 1);
829 cpuvaddr
[i
++] = SE_AES_OP_WRSTALL
|
830 SE_AES_OP_LASTBUF
| SE_AES_OP_START
;
832 cpuvaddr
[i
++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
833 cpuvaddr
[i
++] = host1x_uclass_incr_syncpt_cond_f(1) |
834 host1x_uclass_incr_syncpt_indx_f(se
->syncpt_id
);
839 static unsigned int tegra_ctr_prep_cmd(struct tegra_aead_ctx
*ctx
,
840 struct tegra_aead_reqctx
*rctx
)
842 unsigned int i
= 0, j
;
843 struct tegra_se
*se
= ctx
->se
;
844 u32
*cpuvaddr
= se
->cmdbuf
->addr
;
846 cpuvaddr
[i
++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT
);
847 cpuvaddr
[i
++] = se_host1x_opcode_incr_w(se
->hw
->regs
->linear_ctr
);
848 for (j
= 0; j
< SE_CRYPTO_CTR_REG_COUNT
; j
++)
849 cpuvaddr
[i
++] = rctx
->iv
[j
];
851 cpuvaddr
[i
++] = se_host1x_opcode_nonincr(se
->hw
->regs
->last_blk
, 1);
852 cpuvaddr
[i
++] = (rctx
->inbuf
.size
/ AES_BLOCK_SIZE
) - 1;
853 cpuvaddr
[i
++] = se_host1x_opcode_incr(se
->hw
->regs
->config
, 6);
854 cpuvaddr
[i
++] = rctx
->config
;
855 cpuvaddr
[i
++] = rctx
->crypto_config
;
857 /* Source address setting */
858 cpuvaddr
[i
++] = lower_32_bits(rctx
->inbuf
.addr
);
859 cpuvaddr
[i
++] = SE_ADDR_HI_MSB(upper_32_bits(rctx
->inbuf
.addr
)) |
860 SE_ADDR_HI_SZ(rctx
->inbuf
.size
);
862 /* Destination address setting */
863 cpuvaddr
[i
++] = lower_32_bits(rctx
->outbuf
.addr
);
864 cpuvaddr
[i
++] = SE_ADDR_HI_MSB(upper_32_bits(rctx
->outbuf
.addr
)) |
865 SE_ADDR_HI_SZ(rctx
->inbuf
.size
);
867 cpuvaddr
[i
++] = se_host1x_opcode_nonincr(se
->hw
->regs
->op
, 1);
868 cpuvaddr
[i
++] = SE_AES_OP_WRSTALL
| SE_AES_OP_LASTBUF
|
871 cpuvaddr
[i
++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
872 cpuvaddr
[i
++] = host1x_uclass_incr_syncpt_cond_f(1) |
873 host1x_uclass_incr_syncpt_indx_f(se
->syncpt_id
);
875 dev_dbg(se
->dev
, "cfg %#x crypto cfg %#x\n",
876 rctx
->config
, rctx
->crypto_config
);
881 static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx
*ctx
, struct tegra_aead_reqctx
*rctx
)
883 struct tegra_se
*se
= ctx
->se
;
886 rctx
->config
= tegra234_aes_cfg(SE_ALG_CBC_MAC
, rctx
->encrypt
);
887 rctx
->crypto_config
= tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC
,
889 SE_AES_KEY_INDEX(ctx
->key_id
);
891 /* Prepare command and submit */
892 cmdlen
= tegra_cbcmac_prep_cmd(ctx
, rctx
);
894 return tegra_se_host1x_submit(se
, cmdlen
);
897 static int tegra_ccm_set_msg_len(u8
*block
, unsigned int msglen
, int csize
)
901 memset(block
, 0, csize
);
906 else if (msglen
> (1 << (8 * csize
)))
909 data
= cpu_to_be32(msglen
);
910 memcpy(block
- csize
, (u8
*)&data
+ 4 - csize
, csize
);
915 static int tegra_ccm_format_nonce(struct tegra_aead_reqctx
*rctx
, u8
*nonce
)
918 u8
*q_ptr
, *iv
= (u8
*)rctx
->iv
;
920 memcpy(nonce
, rctx
->iv
, 16);
922 /*** 1. Prepare Flags Octet ***/
924 /* Encode t (mac length) */
926 nonce
[0] |= (((t
- 2) / 2) << 3);
930 nonce
[0] |= (1 << 6);
932 /*** Encode Q - message length ***/
934 q_ptr
= nonce
+ 16 - q
;
936 return tegra_ccm_set_msg_len(q_ptr
, rctx
->cryptlen
, q
);
939 static int tegra_ccm_format_adata(u8
*adata
, unsigned int a
)
943 /* add control info for associated data
944 * RFC 3610 and NIST Special Publication 800-38C
947 *(__be16
*)adata
= cpu_to_be16(a
);
950 *(__be16
*)adata
= cpu_to_be16(0xfffe);
951 *(__be32
*)&adata
[2] = cpu_to_be32(a
);
958 static int tegra_ccm_add_padding(u8
*buf
, unsigned int len
)
960 unsigned int padlen
= 16 - (len
% 16);
961 u8 padding
[16] = {0};
966 memcpy(buf
, padding
, padlen
);
971 static int tegra_ccm_format_blocks(struct tegra_aead_reqctx
*rctx
)
973 unsigned int alen
= 0, offset
= 0;
974 u8 nonce
[16], adata
[16];
977 ret
= tegra_ccm_format_nonce(rctx
, nonce
);
981 memcpy(rctx
->inbuf
.buf
, nonce
, 16);
984 if (rctx
->assoclen
) {
985 alen
= tegra_ccm_format_adata(adata
, rctx
->assoclen
);
986 memcpy(rctx
->inbuf
.buf
+ offset
, adata
, alen
);
989 scatterwalk_map_and_copy(rctx
->inbuf
.buf
+ offset
,
990 rctx
->src_sg
, 0, rctx
->assoclen
, 0);
992 offset
+= rctx
->assoclen
;
993 offset
+= tegra_ccm_add_padding(rctx
->inbuf
.buf
+ offset
,
994 rctx
->assoclen
+ alen
);
1000 static int tegra_ccm_mac_result(struct tegra_se
*se
, struct tegra_aead_reqctx
*rctx
)
1005 /* Read and clear Result */
1006 for (i
= 0; i
< CMAC_RESULT_REG_COUNT
; i
++)
1007 result
[i
] = readl(se
->base
+ se
->hw
->regs
->result
+ (i
* 4));
1009 for (i
= 0; i
< CMAC_RESULT_REG_COUNT
; i
++)
1010 writel(0, se
->base
+ se
->hw
->regs
->result
+ (i
* 4));
1012 if (rctx
->encrypt
) {
1013 memcpy(rctx
->authdata
, result
, rctx
->authsize
);
1015 ret
= crypto_memneq(rctx
->authdata
, result
, rctx
->authsize
);
1023 static int tegra_ccm_ctr_result(struct tegra_se
*se
, struct tegra_aead_reqctx
*rctx
)
1026 scatterwalk_map_and_copy(rctx
->outbuf
.buf
+ 16, rctx
->dst_sg
,
1027 rctx
->assoclen
, rctx
->cryptlen
, 1);
1030 scatterwalk_map_and_copy(rctx
->outbuf
.buf
, rctx
->dst_sg
,
1031 rctx
->assoclen
+ rctx
->cryptlen
,
1034 memcpy(rctx
->authdata
, rctx
->outbuf
.buf
, rctx
->authsize
);
1039 static int tegra_ccm_compute_auth(struct tegra_aead_ctx
*ctx
, struct tegra_aead_reqctx
*rctx
)
1041 struct tegra_se
*se
= ctx
->se
;
1042 struct scatterlist
*sg
;
1045 offset
= tegra_ccm_format_blocks(rctx
);
1049 /* Copy plain text to the buffer */
1050 sg
= rctx
->encrypt
? rctx
->src_sg
: rctx
->dst_sg
;
1052 scatterwalk_map_and_copy(rctx
->inbuf
.buf
+ offset
,
1055 offset
+= rctx
->cryptlen
;
1056 offset
+= tegra_ccm_add_padding(rctx
->inbuf
.buf
+ offset
, rctx
->cryptlen
);
1058 rctx
->inbuf
.size
= offset
;
1060 ret
= tegra_ccm_do_cbcmac(ctx
, rctx
);
1064 return tegra_ccm_mac_result(se
, rctx
);
1067 static int tegra_ccm_do_ctr(struct tegra_aead_ctx
*ctx
, struct tegra_aead_reqctx
*rctx
)
1069 struct tegra_se
*se
= ctx
->se
;
1070 unsigned int cmdlen
, offset
= 0;
1071 struct scatterlist
*sg
= rctx
->src_sg
;
1074 rctx
->config
= tegra234_aes_cfg(SE_ALG_CTR
, rctx
->encrypt
);
1075 rctx
->crypto_config
= tegra234_aes_crypto_cfg(SE_ALG_CTR
, rctx
->encrypt
) |
1076 SE_AES_KEY_INDEX(ctx
->key_id
);
1078 /* Copy authdata in the top of buffer for encryption/decryption */
1080 memcpy(rctx
->inbuf
.buf
, rctx
->authdata
, rctx
->authsize
);
1082 scatterwalk_map_and_copy(rctx
->inbuf
.buf
, sg
,
1083 rctx
->assoclen
+ rctx
->cryptlen
,
1086 offset
+= rctx
->authsize
;
1087 offset
+= tegra_ccm_add_padding(rctx
->inbuf
.buf
+ offset
, rctx
->authsize
);
1089 /* If there is no cryptlen, proceed to submit the task */
1090 if (rctx
->cryptlen
) {
1091 scatterwalk_map_and_copy(rctx
->inbuf
.buf
+ offset
, sg
,
1092 rctx
->assoclen
, rctx
->cryptlen
, 0);
1093 offset
+= rctx
->cryptlen
;
1094 offset
+= tegra_ccm_add_padding(rctx
->inbuf
.buf
+ offset
, rctx
->cryptlen
);
1097 rctx
->inbuf
.size
= offset
;
1099 /* Prepare command and submit */
1100 cmdlen
= tegra_ctr_prep_cmd(ctx
, rctx
);
1101 ret
= tegra_se_host1x_submit(se
, cmdlen
);
1105 return tegra_ccm_ctr_result(se
, rctx
);
1108 static int tegra_ccm_crypt_init(struct aead_request
*req
, struct tegra_se
*se
,
1109 struct tegra_aead_reqctx
*rctx
)
1111 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1112 u8
*iv
= (u8
*)rctx
->iv
;
1115 rctx
->src_sg
= req
->src
;
1116 rctx
->dst_sg
= req
->dst
;
1117 rctx
->assoclen
= req
->assoclen
;
1118 rctx
->authsize
= crypto_aead_authsize(tfm
);
1120 memcpy(iv
, req
->iv
, 16);
1122 ret
= tegra_ccm_check_iv(iv
);
1126 /* Note: rfc 3610 and NIST 800-38C require counter (ctr_0) of
1127 * zero to encrypt auth tag.
1128 * req->iv has the formatted ctr_0 (i.e. Flags || N || 0).
1130 memset(iv
+ 15 - iv
[0], 0, iv
[0] + 1);
1132 /* Clear any previous result */
1133 for (i
= 0; i
< CMAC_RESULT_REG_COUNT
; i
++)
1134 writel(0, se
->base
+ se
->hw
->regs
->result
+ (i
* 4));
1139 static int tegra_ccm_do_one_req(struct crypto_engine
*engine
, void *areq
)
1141 struct aead_request
*req
= container_of(areq
, struct aead_request
, base
);
1142 struct tegra_aead_reqctx
*rctx
= aead_request_ctx(req
);
1143 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1144 struct tegra_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1145 struct tegra_se
*se
= ctx
->se
;
1148 /* Allocate buffers required */
1149 rctx
->inbuf
.buf
= dma_alloc_coherent(ctx
->se
->dev
, SE_AES_BUFLEN
,
1150 &rctx
->inbuf
.addr
, GFP_KERNEL
);
1151 if (!rctx
->inbuf
.buf
)
1154 rctx
->inbuf
.size
= SE_AES_BUFLEN
;
1156 rctx
->outbuf
.buf
= dma_alloc_coherent(ctx
->se
->dev
, SE_AES_BUFLEN
,
1157 &rctx
->outbuf
.addr
, GFP_KERNEL
);
1158 if (!rctx
->outbuf
.buf
) {
1163 rctx
->outbuf
.size
= SE_AES_BUFLEN
;
1165 ret
= tegra_ccm_crypt_init(req
, se
, rctx
);
1169 if (rctx
->encrypt
) {
1170 rctx
->cryptlen
= req
->cryptlen
;
1172 /* CBC MAC Operation */
1173 ret
= tegra_ccm_compute_auth(ctx
, rctx
);
1178 ret
= tegra_ccm_do_ctr(ctx
, rctx
);
1182 rctx
->cryptlen
= req
->cryptlen
- ctx
->authsize
;
1187 ret
= tegra_ccm_do_ctr(ctx
, rctx
);
1191 /* CBC MAC Operation */
1192 ret
= tegra_ccm_compute_auth(ctx
, rctx
);
1198 dma_free_coherent(ctx
->se
->dev
, SE_AES_BUFLEN
,
1199 rctx
->outbuf
.buf
, rctx
->outbuf
.addr
);
1202 dma_free_coherent(ctx
->se
->dev
, SE_AES_BUFLEN
,
1203 rctx
->inbuf
.buf
, rctx
->inbuf
.addr
);
1205 crypto_finalize_aead_request(ctx
->se
->engine
, req
, ret
);
1210 static int tegra_gcm_do_one_req(struct crypto_engine
*engine
, void *areq
)
1212 struct aead_request
*req
= container_of(areq
, struct aead_request
, base
);
1213 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1214 struct tegra_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1215 struct tegra_aead_reqctx
*rctx
= aead_request_ctx(req
);
1218 /* Allocate buffers required */
1219 rctx
->inbuf
.buf
= dma_alloc_coherent(ctx
->se
->dev
, SE_AES_BUFLEN
,
1220 &rctx
->inbuf
.addr
, GFP_KERNEL
);
1221 if (!rctx
->inbuf
.buf
)
1224 rctx
->inbuf
.size
= SE_AES_BUFLEN
;
1226 rctx
->outbuf
.buf
= dma_alloc_coherent(ctx
->se
->dev
, SE_AES_BUFLEN
,
1227 &rctx
->outbuf
.addr
, GFP_KERNEL
);
1228 if (!rctx
->outbuf
.buf
) {
1233 rctx
->outbuf
.size
= SE_AES_BUFLEN
;
1235 rctx
->src_sg
= req
->src
;
1236 rctx
->dst_sg
= req
->dst
;
1237 rctx
->assoclen
= req
->assoclen
;
1238 rctx
->authsize
= crypto_aead_authsize(tfm
);
1241 rctx
->cryptlen
= req
->cryptlen
;
1243 rctx
->cryptlen
= req
->cryptlen
- ctx
->authsize
;
1245 memcpy(rctx
->iv
, req
->iv
, GCM_AES_IV_SIZE
);
1246 rctx
->iv
[3] = (1 << 24);
1248 /* If there is associated data perform GMAC operation */
1249 if (rctx
->assoclen
) {
1250 ret
= tegra_gcm_do_gmac(ctx
, rctx
);
1255 /* GCM Encryption/Decryption operation */
1256 if (rctx
->cryptlen
) {
1257 ret
= tegra_gcm_do_crypt(ctx
, rctx
);
1262 /* GCM_FINAL operation */
1263 ret
= tegra_gcm_do_final(ctx
, rctx
);
1268 ret
= tegra_gcm_do_verify(ctx
->se
, rctx
);
1271 dma_free_coherent(ctx
->se
->dev
, SE_AES_BUFLEN
,
1272 rctx
->outbuf
.buf
, rctx
->outbuf
.addr
);
1275 dma_free_coherent(ctx
->se
->dev
, SE_AES_BUFLEN
,
1276 rctx
->inbuf
.buf
, rctx
->inbuf
.addr
);
1278 /* Finalize the request if there are no errors */
1279 crypto_finalize_aead_request(ctx
->se
->engine
, req
, ret
);
1284 static int tegra_aead_cra_init(struct crypto_aead
*tfm
)
1286 struct tegra_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1287 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
1288 struct tegra_se_alg
*se_alg
;
1289 const char *algname
;
1292 algname
= crypto_tfm_alg_name(&tfm
->base
);
1294 se_alg
= container_of(alg
, struct tegra_se_alg
, alg
.aead
.base
);
1296 crypto_aead_set_reqsize(tfm
, sizeof(struct tegra_aead_reqctx
));
1298 ctx
->se
= se_alg
->se_dev
;
1301 ret
= se_algname_to_algid(algname
);
1303 dev_err(ctx
->se
->dev
, "invalid algorithm\n");
1312 static int tegra_ccm_setauthsize(struct crypto_aead
*tfm
, unsigned int authsize
)
1314 struct tegra_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1329 ctx
->authsize
= authsize
;
1334 static int tegra_gcm_setauthsize(struct crypto_aead
*tfm
, unsigned int authsize
)
1336 struct tegra_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1339 ret
= crypto_gcm_check_authsize(authsize
);
1343 ctx
->authsize
= authsize
;
1348 static void tegra_aead_cra_exit(struct crypto_aead
*tfm
)
1350 struct tegra_aead_ctx
*ctx
= crypto_tfm_ctx(&tfm
->base
);
1353 tegra_key_invalidate(ctx
->se
, ctx
->key_id
, ctx
->alg
);
1356 static int tegra_aead_crypt(struct aead_request
*req
, bool encrypt
)
1358 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1359 struct tegra_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1360 struct tegra_aead_reqctx
*rctx
= aead_request_ctx(req
);
1362 rctx
->encrypt
= encrypt
;
1364 return crypto_transfer_aead_request_to_engine(ctx
->se
->engine
, req
);
1367 static int tegra_aead_encrypt(struct aead_request
*req
)
1369 return tegra_aead_crypt(req
, true);
1372 static int tegra_aead_decrypt(struct aead_request
*req
)
1374 return tegra_aead_crypt(req
, false);
1377 static int tegra_aead_setkey(struct crypto_aead
*tfm
,
1378 const u8
*key
, u32 keylen
)
1380 struct tegra_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1382 if (aes_check_keylen(keylen
)) {
1383 dev_dbg(ctx
->se
->dev
, "invalid key length (%d)\n", keylen
);
1387 return tegra_key_submit(ctx
->se
, key
, keylen
, ctx
->alg
, &ctx
->key_id
);
1390 static unsigned int tegra_cmac_prep_cmd(struct tegra_cmac_ctx
*ctx
,
1391 struct tegra_cmac_reqctx
*rctx
)
1393 unsigned int data_count
, res_bits
= 0, i
= 0, j
;
1394 struct tegra_se
*se
= ctx
->se
;
1395 u32
*cpuvaddr
= se
->cmdbuf
->addr
, op
;
1397 data_count
= (rctx
->datbuf
.size
/ AES_BLOCK_SIZE
);
1399 op
= SE_AES_OP_WRSTALL
| SE_AES_OP_START
| SE_AES_OP_LASTBUF
;
1401 if (!(rctx
->task
& SHA_UPDATE
)) {
1402 op
|= SE_AES_OP_FINAL
;
1403 res_bits
= (rctx
->datbuf
.size
% AES_BLOCK_SIZE
) * 8;
1406 if (!res_bits
&& data_count
)
1409 if (rctx
->task
& SHA_FIRST
) {
1410 rctx
->task
&= ~SHA_FIRST
;
1412 cpuvaddr
[i
++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT
);
1413 cpuvaddr
[i
++] = se_host1x_opcode_incr_w(se
->hw
->regs
->linear_ctr
);
1415 for (j
= 0; j
< SE_CRYPTO_CTR_REG_COUNT
; j
++)
1419 cpuvaddr
[i
++] = se_host1x_opcode_nonincr(se
->hw
->regs
->last_blk
, 1);
1420 cpuvaddr
[i
++] = SE_LAST_BLOCK_VAL(data_count
) |
1421 SE_LAST_BLOCK_RES_BITS(res_bits
);
1423 cpuvaddr
[i
++] = se_host1x_opcode_incr(se
->hw
->regs
->config
, 6);
1424 cpuvaddr
[i
++] = rctx
->config
;
1425 cpuvaddr
[i
++] = rctx
->crypto_config
;
1427 /* Source Address */
1428 cpuvaddr
[i
++] = lower_32_bits(rctx
->datbuf
.addr
);
1429 cpuvaddr
[i
++] = SE_ADDR_HI_MSB(upper_32_bits(rctx
->datbuf
.addr
)) |
1430 SE_ADDR_HI_SZ(rctx
->datbuf
.size
);
1432 cpuvaddr
[i
++] = SE_ADDR_HI_SZ(AES_BLOCK_SIZE
);
1434 cpuvaddr
[i
++] = se_host1x_opcode_nonincr(se
->hw
->regs
->op
, 1);
1437 cpuvaddr
[i
++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
1438 cpuvaddr
[i
++] = host1x_uclass_incr_syncpt_cond_f(1) |
1439 host1x_uclass_incr_syncpt_indx_f(se
->syncpt_id
);
1444 static void tegra_cmac_copy_result(struct tegra_se
*se
, struct tegra_cmac_reqctx
*rctx
)
1448 for (i
= 0; i
< CMAC_RESULT_REG_COUNT
; i
++)
1449 rctx
->result
[i
] = readl(se
->base
+ se
->hw
->regs
->result
+ (i
* 4));
1452 static void tegra_cmac_paste_result(struct tegra_se
*se
, struct tegra_cmac_reqctx
*rctx
)
1456 for (i
= 0; i
< CMAC_RESULT_REG_COUNT
; i
++)
1457 writel(rctx
->result
[i
],
1458 se
->base
+ se
->hw
->regs
->result
+ (i
* 4));
1461 static int tegra_cmac_do_update(struct ahash_request
*req
)
1463 struct tegra_cmac_reqctx
*rctx
= ahash_request_ctx(req
);
1464 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1465 struct tegra_cmac_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1466 struct tegra_se
*se
= ctx
->se
;
1467 unsigned int nblks
, nresidue
, cmdlen
;
1473 nresidue
= (req
->nbytes
+ rctx
->residue
.size
) % rctx
->blk_size
;
1474 nblks
= (req
->nbytes
+ rctx
->residue
.size
) / rctx
->blk_size
;
1477 * Reserve the last block as residue during final() to process.
1479 if (!nresidue
&& nblks
) {
1480 nresidue
+= rctx
->blk_size
;
1484 rctx
->src_sg
= req
->src
;
1485 rctx
->datbuf
.size
= (req
->nbytes
+ rctx
->residue
.size
) - nresidue
;
1486 rctx
->total_len
+= rctx
->datbuf
.size
;
1487 rctx
->config
= tegra234_aes_cfg(SE_ALG_CMAC
, 0);
1488 rctx
->crypto_config
= SE_AES_KEY_INDEX(ctx
->key_id
);
1491 * Keep one block and residue bytes in residue and
1492 * return. The bytes will be processed in final()
1495 scatterwalk_map_and_copy(rctx
->residue
.buf
+ rctx
->residue
.size
,
1496 rctx
->src_sg
, 0, req
->nbytes
, 0);
1498 rctx
->residue
.size
+= req
->nbytes
;
1502 /* Copy the previous residue first */
1503 if (rctx
->residue
.size
)
1504 memcpy(rctx
->datbuf
.buf
, rctx
->residue
.buf
, rctx
->residue
.size
);
1506 scatterwalk_map_and_copy(rctx
->datbuf
.buf
+ rctx
->residue
.size
,
1507 rctx
->src_sg
, 0, req
->nbytes
- nresidue
, 0);
1509 scatterwalk_map_and_copy(rctx
->residue
.buf
, rctx
->src_sg
,
1510 req
->nbytes
- nresidue
, nresidue
, 0);
1512 /* Update residue value with the residue after current block */
1513 rctx
->residue
.size
= nresidue
;
1516 * If this is not the first 'update' call, paste the previous copied
1517 * intermediate results to the registers so that it gets picked up.
1518 * This is to support the import/export functionality.
1520 if (!(rctx
->task
& SHA_FIRST
))
1521 tegra_cmac_paste_result(ctx
->se
, rctx
);
1523 cmdlen
= tegra_cmac_prep_cmd(ctx
, rctx
);
1525 ret
= tegra_se_host1x_submit(se
, cmdlen
);
1527 * If this is not the final update, copy the intermediate results
1528 * from the registers so that it can be used in the next 'update'
1529 * call. This is to support the import/export functionality.
1531 if (!(rctx
->task
& SHA_FINAL
))
1532 tegra_cmac_copy_result(ctx
->se
, rctx
);
1537 static int tegra_cmac_do_final(struct ahash_request
*req
)
1539 struct tegra_cmac_reqctx
*rctx
= ahash_request_ctx(req
);
1540 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1541 struct tegra_cmac_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1542 struct tegra_se
*se
= ctx
->se
;
1543 u32
*result
= (u32
*)req
->result
;
1544 int ret
= 0, i
, cmdlen
;
1546 if (!req
->nbytes
&& !rctx
->total_len
&& ctx
->fallback_tfm
) {
1547 return crypto_shash_tfm_digest(ctx
->fallback_tfm
,
1548 rctx
->datbuf
.buf
, 0, req
->result
);
1551 memcpy(rctx
->datbuf
.buf
, rctx
->residue
.buf
, rctx
->residue
.size
);
1552 rctx
->datbuf
.size
= rctx
->residue
.size
;
1553 rctx
->total_len
+= rctx
->residue
.size
;
1554 rctx
->config
= tegra234_aes_cfg(SE_ALG_CMAC
, 0);
1556 /* Prepare command and submit */
1557 cmdlen
= tegra_cmac_prep_cmd(ctx
, rctx
);
1558 ret
= tegra_se_host1x_submit(se
, cmdlen
);
1562 /* Read and clear Result register */
1563 for (i
= 0; i
< CMAC_RESULT_REG_COUNT
; i
++)
1564 result
[i
] = readl(se
->base
+ se
->hw
->regs
->result
+ (i
* 4));
1566 for (i
= 0; i
< CMAC_RESULT_REG_COUNT
; i
++)
1567 writel(0, se
->base
+ se
->hw
->regs
->result
+ (i
* 4));
1570 dma_free_coherent(se
->dev
, SE_SHA_BUFLEN
,
1571 rctx
->datbuf
.buf
, rctx
->datbuf
.addr
);
1572 dma_free_coherent(se
->dev
, crypto_ahash_blocksize(tfm
) * 2,
1573 rctx
->residue
.buf
, rctx
->residue
.addr
);
1577 static int tegra_cmac_do_one_req(struct crypto_engine
*engine
, void *areq
)
1579 struct ahash_request
*req
= ahash_request_cast(areq
);
1580 struct tegra_cmac_reqctx
*rctx
= ahash_request_ctx(req
);
1581 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1582 struct tegra_cmac_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1583 struct tegra_se
*se
= ctx
->se
;
1586 if (rctx
->task
& SHA_UPDATE
) {
1587 ret
= tegra_cmac_do_update(req
);
1588 rctx
->task
&= ~SHA_UPDATE
;
1591 if (rctx
->task
& SHA_FINAL
) {
1592 ret
= tegra_cmac_do_final(req
);
1593 rctx
->task
&= ~SHA_FINAL
;
1596 crypto_finalize_hash_request(se
->engine
, req
, ret
);
1601 static void tegra_cmac_init_fallback(struct crypto_ahash
*tfm
, struct tegra_cmac_ctx
*ctx
,
1602 const char *algname
)
1604 unsigned int statesize
;
1606 ctx
->fallback_tfm
= crypto_alloc_shash(algname
, 0, CRYPTO_ALG_NEED_FALLBACK
);
1608 if (IS_ERR(ctx
->fallback_tfm
)) {
1609 dev_warn(ctx
->se
->dev
, "failed to allocate fallback for %s\n", algname
);
1610 ctx
->fallback_tfm
= NULL
;
1614 statesize
= crypto_shash_statesize(ctx
->fallback_tfm
);
1616 if (statesize
> sizeof(struct tegra_cmac_reqctx
))
1617 crypto_ahash_set_statesize(tfm
, statesize
);
1620 static int tegra_cmac_cra_init(struct crypto_tfm
*tfm
)
1622 struct tegra_cmac_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1623 struct crypto_ahash
*ahash_tfm
= __crypto_ahash_cast(tfm
);
1624 struct ahash_alg
*alg
= __crypto_ahash_alg(tfm
->__crt_alg
);
1625 struct tegra_se_alg
*se_alg
;
1626 const char *algname
;
1629 algname
= crypto_tfm_alg_name(tfm
);
1630 se_alg
= container_of(alg
, struct tegra_se_alg
, alg
.ahash
.base
);
1632 crypto_ahash_set_reqsize(ahash_tfm
, sizeof(struct tegra_cmac_reqctx
));
1634 ctx
->se
= se_alg
->se_dev
;
1637 ret
= se_algname_to_algid(algname
);
1639 dev_err(ctx
->se
->dev
, "invalid algorithm\n");
1645 tegra_cmac_init_fallback(ahash_tfm
, ctx
, algname
);
1650 static void tegra_cmac_cra_exit(struct crypto_tfm
*tfm
)
1652 struct tegra_cmac_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1654 if (ctx
->fallback_tfm
)
1655 crypto_free_shash(ctx
->fallback_tfm
);
1657 tegra_key_invalidate(ctx
->se
, ctx
->key_id
, ctx
->alg
);
1660 static int tegra_cmac_init(struct ahash_request
*req
)
1662 struct tegra_cmac_reqctx
*rctx
= ahash_request_ctx(req
);
1663 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1664 struct tegra_cmac_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1665 struct tegra_se
*se
= ctx
->se
;
1668 rctx
->total_len
= 0;
1669 rctx
->datbuf
.size
= 0;
1670 rctx
->residue
.size
= 0;
1671 rctx
->task
= SHA_FIRST
;
1672 rctx
->blk_size
= crypto_ahash_blocksize(tfm
);
1674 rctx
->residue
.buf
= dma_alloc_coherent(se
->dev
, rctx
->blk_size
* 2,
1675 &rctx
->residue
.addr
, GFP_KERNEL
);
1676 if (!rctx
->residue
.buf
)
1679 rctx
->residue
.size
= 0;
1681 rctx
->datbuf
.buf
= dma_alloc_coherent(se
->dev
, SE_SHA_BUFLEN
,
1682 &rctx
->datbuf
.addr
, GFP_KERNEL
);
1683 if (!rctx
->datbuf
.buf
)
1686 rctx
->datbuf
.size
= 0;
1688 /* Clear any previous result */
1689 for (i
= 0; i
< CMAC_RESULT_REG_COUNT
; i
++)
1690 writel(0, se
->base
+ se
->hw
->regs
->result
+ (i
* 4));
1695 dma_free_coherent(se
->dev
, rctx
->blk_size
, rctx
->residue
.buf
,
1696 rctx
->residue
.addr
);
1701 static int tegra_cmac_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
1702 unsigned int keylen
)
1704 struct tegra_cmac_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1706 if (aes_check_keylen(keylen
)) {
1707 dev_dbg(ctx
->se
->dev
, "invalid key length (%d)\n", keylen
);
1711 if (ctx
->fallback_tfm
)
1712 crypto_shash_setkey(ctx
->fallback_tfm
, key
, keylen
);
1714 return tegra_key_submit(ctx
->se
, key
, keylen
, ctx
->alg
, &ctx
->key_id
);
1717 static int tegra_cmac_update(struct ahash_request
*req
)
1719 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1720 struct tegra_cmac_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1721 struct tegra_cmac_reqctx
*rctx
= ahash_request_ctx(req
);
1723 rctx
->task
|= SHA_UPDATE
;
1725 return crypto_transfer_hash_request_to_engine(ctx
->se
->engine
, req
);
1728 static int tegra_cmac_final(struct ahash_request
*req
)
1730 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1731 struct tegra_cmac_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1732 struct tegra_cmac_reqctx
*rctx
= ahash_request_ctx(req
);
1734 rctx
->task
|= SHA_FINAL
;
1736 return crypto_transfer_hash_request_to_engine(ctx
->se
->engine
, req
);
1739 static int tegra_cmac_finup(struct ahash_request
*req
)
1741 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1742 struct tegra_cmac_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1743 struct tegra_cmac_reqctx
*rctx
= ahash_request_ctx(req
);
1745 rctx
->task
|= SHA_UPDATE
| SHA_FINAL
;
1747 return crypto_transfer_hash_request_to_engine(ctx
->se
->engine
, req
);
1750 static int tegra_cmac_digest(struct ahash_request
*req
)
1752 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1753 struct tegra_cmac_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1754 struct tegra_cmac_reqctx
*rctx
= ahash_request_ctx(req
);
1756 tegra_cmac_init(req
);
1757 rctx
->task
|= SHA_UPDATE
| SHA_FINAL
;
1759 return crypto_transfer_hash_request_to_engine(ctx
->se
->engine
, req
);
1762 static int tegra_cmac_export(struct ahash_request
*req
, void *out
)
1764 struct tegra_cmac_reqctx
*rctx
= ahash_request_ctx(req
);
1766 memcpy(out
, rctx
, sizeof(*rctx
));
1771 static int tegra_cmac_import(struct ahash_request
*req
, const void *in
)
1773 struct tegra_cmac_reqctx
*rctx
= ahash_request_ctx(req
);
1775 memcpy(rctx
, in
, sizeof(*rctx
));
1780 static struct tegra_se_alg tegra_aead_algs
[] = {
1782 .alg
.aead
.op
.do_one_request
= tegra_gcm_do_one_req
,
1784 .init
= tegra_aead_cra_init
,
1785 .exit
= tegra_aead_cra_exit
,
1786 .setkey
= tegra_aead_setkey
,
1787 .setauthsize
= tegra_gcm_setauthsize
,
1788 .encrypt
= tegra_aead_encrypt
,
1789 .decrypt
= tegra_aead_decrypt
,
1790 .maxauthsize
= AES_BLOCK_SIZE
,
1791 .ivsize
= GCM_AES_IV_SIZE
,
1793 .cra_name
= "gcm(aes)",
1794 .cra_driver_name
= "gcm-aes-tegra",
1795 .cra_priority
= 500,
1797 .cra_ctxsize
= sizeof(struct tegra_aead_ctx
),
1798 .cra_alignmask
= 0xf,
1799 .cra_module
= THIS_MODULE
,
1803 .alg
.aead
.op
.do_one_request
= tegra_ccm_do_one_req
,
1805 .init
= tegra_aead_cra_init
,
1806 .exit
= tegra_aead_cra_exit
,
1807 .setkey
= tegra_aead_setkey
,
1808 .setauthsize
= tegra_ccm_setauthsize
,
1809 .encrypt
= tegra_aead_encrypt
,
1810 .decrypt
= tegra_aead_decrypt
,
1811 .maxauthsize
= AES_BLOCK_SIZE
,
1812 .ivsize
= AES_BLOCK_SIZE
,
1813 .chunksize
= AES_BLOCK_SIZE
,
1815 .cra_name
= "ccm(aes)",
1816 .cra_driver_name
= "ccm-aes-tegra",
1817 .cra_priority
= 500,
1819 .cra_ctxsize
= sizeof(struct tegra_aead_ctx
),
1820 .cra_alignmask
= 0xf,
1821 .cra_module
= THIS_MODULE
,
1827 static struct tegra_se_alg tegra_cmac_algs
[] = {
1829 .alg
.ahash
.op
.do_one_request
= tegra_cmac_do_one_req
,
1831 .init
= tegra_cmac_init
,
1832 .setkey
= tegra_cmac_setkey
,
1833 .update
= tegra_cmac_update
,
1834 .final
= tegra_cmac_final
,
1835 .finup
= tegra_cmac_finup
,
1836 .digest
= tegra_cmac_digest
,
1837 .export
= tegra_cmac_export
,
1838 .import
= tegra_cmac_import
,
1839 .halg
.digestsize
= AES_BLOCK_SIZE
,
1840 .halg
.statesize
= sizeof(struct tegra_cmac_reqctx
),
1842 .cra_name
= "cmac(aes)",
1843 .cra_driver_name
= "tegra-se-cmac",
1844 .cra_priority
= 300,
1845 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
,
1846 .cra_blocksize
= AES_BLOCK_SIZE
,
1847 .cra_ctxsize
= sizeof(struct tegra_cmac_ctx
),
1849 .cra_module
= THIS_MODULE
,
1850 .cra_init
= tegra_cmac_cra_init
,
1851 .cra_exit
= tegra_cmac_cra_exit
,
1857 int tegra_init_aes(struct tegra_se
*se
)
1859 struct aead_engine_alg
*aead_alg
;
1860 struct ahash_engine_alg
*ahash_alg
;
1861 struct skcipher_engine_alg
*sk_alg
;
1864 se
->manifest
= tegra_aes_kac_manifest
;
1866 for (i
= 0; i
< ARRAY_SIZE(tegra_aes_algs
); i
++) {
1867 sk_alg
= &tegra_aes_algs
[i
].alg
.skcipher
;
1868 tegra_aes_algs
[i
].se_dev
= se
;
1870 ret
= crypto_engine_register_skcipher(sk_alg
);
1872 dev_err(se
->dev
, "failed to register %s\n",
1873 sk_alg
->base
.base
.cra_name
);
1878 for (i
= 0; i
< ARRAY_SIZE(tegra_aead_algs
); i
++) {
1879 aead_alg
= &tegra_aead_algs
[i
].alg
.aead
;
1880 tegra_aead_algs
[i
].se_dev
= se
;
1882 ret
= crypto_engine_register_aead(aead_alg
);
1884 dev_err(se
->dev
, "failed to register %s\n",
1885 aead_alg
->base
.base
.cra_name
);
1890 for (i
= 0; i
< ARRAY_SIZE(tegra_cmac_algs
); i
++) {
1891 ahash_alg
= &tegra_cmac_algs
[i
].alg
.ahash
;
1892 tegra_cmac_algs
[i
].se_dev
= se
;
1894 ret
= crypto_engine_register_ahash(ahash_alg
);
1896 dev_err(se
->dev
, "failed to register %s\n",
1897 ahash_alg
->base
.halg
.base
.cra_name
);
1906 crypto_engine_unregister_ahash(&tegra_cmac_algs
[i
].alg
.ahash
);
1908 i
= ARRAY_SIZE(tegra_aead_algs
);
1911 crypto_engine_unregister_aead(&tegra_aead_algs
[i
].alg
.aead
);
1913 i
= ARRAY_SIZE(tegra_aes_algs
);
1916 crypto_engine_unregister_skcipher(&tegra_aes_algs
[i
].alg
.skcipher
);
1921 void tegra_deinit_aes(struct tegra_se
*se
)
1925 for (i
= 0; i
< ARRAY_SIZE(tegra_aes_algs
); i
++)
1926 crypto_engine_unregister_skcipher(&tegra_aes_algs
[i
].alg
.skcipher
);
1928 for (i
= 0; i
< ARRAY_SIZE(tegra_aead_algs
); i
++)
1929 crypto_engine_unregister_aead(&tegra_aead_algs
[i
].alg
.aead
);
1931 for (i
= 0; i
< ARRAY_SIZE(tegra_cmac_algs
); i
++)
1932 crypto_engine_unregister_ahash(&tegra_cmac_algs
[i
].alg
.ahash
);