Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / drivers / crypto / tegra / tegra-se-aes.c
blob9d130592cc0acd264ceb6596aaa4a24cb8673264
1 // SPDX-License-Identifier: GPL-2.0-only
2 // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 /*
4 * Crypto driver to handle block cipher algorithms using NVIDIA Security Engine.
5 */
7 #include <linux/clk.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/module.h>
10 #include <linux/of_device.h>
11 #include <linux/platform_device.h>
13 #include <crypto/aead.h>
14 #include <crypto/aes.h>
15 #include <crypto/engine.h>
16 #include <crypto/gcm.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/xts.h>
19 #include <crypto/internal/aead.h>
20 #include <crypto/internal/hash.h>
21 #include <crypto/internal/skcipher.h>
23 #include "tegra-se.h"
25 struct tegra_aes_ctx {
26 struct tegra_se *se;
27 u32 alg;
28 u32 ivsize;
29 u32 key1_id;
30 u32 key2_id;
33 struct tegra_aes_reqctx {
34 struct tegra_se_datbuf datbuf;
35 bool encrypt;
36 u32 config;
37 u32 crypto_config;
38 u32 len;
39 u32 *iv;
42 struct tegra_aead_ctx {
43 struct tegra_se *se;
44 unsigned int authsize;
45 u32 alg;
46 u32 keylen;
47 u32 key_id;
50 struct tegra_aead_reqctx {
51 struct tegra_se_datbuf inbuf;
52 struct tegra_se_datbuf outbuf;
53 struct scatterlist *src_sg;
54 struct scatterlist *dst_sg;
55 unsigned int assoclen;
56 unsigned int cryptlen;
57 unsigned int authsize;
58 bool encrypt;
59 u32 config;
60 u32 crypto_config;
61 u32 key_id;
62 u32 iv[4];
63 u8 authdata[16];
66 struct tegra_cmac_ctx {
67 struct tegra_se *se;
68 unsigned int alg;
69 u32 key_id;
70 struct crypto_shash *fallback_tfm;
73 struct tegra_cmac_reqctx {
74 struct scatterlist *src_sg;
75 struct tegra_se_datbuf datbuf;
76 struct tegra_se_datbuf residue;
77 unsigned int total_len;
78 unsigned int blk_size;
79 unsigned int task;
80 u32 crypto_config;
81 u32 config;
82 u32 key_id;
83 u32 *iv;
84 u32 result[CMAC_RESULT_REG_COUNT];
87 /* increment counter (128-bit int) */
88 static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)
90 do {
91 --bits;
92 nums += counter[bits];
93 counter[bits] = nums & 0xff;
94 nums >>= 8;
95 } while (bits && nums);
98 static void tegra_cbc_iv_copyback(struct skcipher_request *req, struct tegra_aes_ctx *ctx)
100 struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
101 unsigned int offset;
103 offset = req->cryptlen - ctx->ivsize;
105 if (rctx->encrypt)
106 memcpy(req->iv, rctx->datbuf.buf + offset, ctx->ivsize);
107 else
108 scatterwalk_map_and_copy(req->iv, req->src, offset, ctx->ivsize, 0);
111 static void tegra_aes_update_iv(struct skcipher_request *req, struct tegra_aes_ctx *ctx)
113 int num;
115 if (ctx->alg == SE_ALG_CBC) {
116 tegra_cbc_iv_copyback(req, ctx);
117 } else if (ctx->alg == SE_ALG_CTR) {
118 num = req->cryptlen / ctx->ivsize;
119 if (req->cryptlen % ctx->ivsize)
120 num++;
122 ctr_iv_inc(req->iv, ctx->ivsize, num);
126 static int tegra234_aes_crypto_cfg(u32 alg, bool encrypt)
128 switch (alg) {
129 case SE_ALG_CMAC:
130 case SE_ALG_GMAC:
131 case SE_ALG_GCM:
132 case SE_ALG_GCM_FINAL:
133 return 0;
134 case SE_ALG_CBC:
135 if (encrypt)
136 return SE_CRYPTO_CFG_CBC_ENCRYPT;
137 else
138 return SE_CRYPTO_CFG_CBC_DECRYPT;
139 case SE_ALG_ECB:
140 if (encrypt)
141 return SE_CRYPTO_CFG_ECB_ENCRYPT;
142 else
143 return SE_CRYPTO_CFG_ECB_DECRYPT;
144 case SE_ALG_XTS:
145 if (encrypt)
146 return SE_CRYPTO_CFG_XTS_ENCRYPT;
147 else
148 return SE_CRYPTO_CFG_XTS_DECRYPT;
150 case SE_ALG_CTR:
151 return SE_CRYPTO_CFG_CTR;
152 case SE_ALG_CBC_MAC:
153 return SE_CRYPTO_CFG_CBC_MAC;
155 default:
156 break;
159 return -EINVAL;
162 static int tegra234_aes_cfg(u32 alg, bool encrypt)
164 switch (alg) {
165 case SE_ALG_CBC:
166 case SE_ALG_ECB:
167 case SE_ALG_XTS:
168 case SE_ALG_CTR:
169 if (encrypt)
170 return SE_CFG_AES_ENCRYPT;
171 else
172 return SE_CFG_AES_DECRYPT;
174 case SE_ALG_GMAC:
175 if (encrypt)
176 return SE_CFG_GMAC_ENCRYPT;
177 else
178 return SE_CFG_GMAC_DECRYPT;
180 case SE_ALG_GCM:
181 if (encrypt)
182 return SE_CFG_GCM_ENCRYPT;
183 else
184 return SE_CFG_GCM_DECRYPT;
186 case SE_ALG_GCM_FINAL:
187 if (encrypt)
188 return SE_CFG_GCM_FINAL_ENCRYPT;
189 else
190 return SE_CFG_GCM_FINAL_DECRYPT;
192 case SE_ALG_CMAC:
193 return SE_CFG_CMAC;
195 case SE_ALG_CBC_MAC:
196 return SE_AES_ENC_ALG_AES_ENC |
197 SE_AES_DST_HASH_REG;
199 return -EINVAL;
202 static unsigned int tegra_aes_prep_cmd(struct tegra_aes_ctx *ctx,
203 struct tegra_aes_reqctx *rctx)
205 unsigned int data_count, res_bits, i = 0, j;
206 struct tegra_se *se = ctx->se;
207 u32 *cpuvaddr = se->cmdbuf->addr;
208 dma_addr_t addr = rctx->datbuf.addr;
210 data_count = rctx->len / AES_BLOCK_SIZE;
211 res_bits = (rctx->len % AES_BLOCK_SIZE) * 8;
214 * Hardware processes data_count + 1 blocks.
215 * Reduce 1 block if there is no residue
217 if (!res_bits)
218 data_count--;
220 if (rctx->iv) {
221 cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
222 cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
223 for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
224 cpuvaddr[i++] = rctx->iv[j];
227 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
228 cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
229 SE_LAST_BLOCK_RES_BITS(res_bits);
231 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
232 cpuvaddr[i++] = rctx->config;
233 cpuvaddr[i++] = rctx->crypto_config;
235 /* Source address setting */
236 cpuvaddr[i++] = lower_32_bits(addr);
237 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) | SE_ADDR_HI_SZ(rctx->len);
239 /* Destination address setting */
240 cpuvaddr[i++] = lower_32_bits(addr);
241 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) |
242 SE_ADDR_HI_SZ(rctx->len);
244 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
245 cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF |
246 SE_AES_OP_START;
248 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
249 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
250 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
252 dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
254 return i;
257 static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
259 struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
260 struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
261 struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
262 struct tegra_se *se = ctx->se;
263 unsigned int cmdlen;
264 int ret;
266 rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_AES_BUFLEN,
267 &rctx->datbuf.addr, GFP_KERNEL);
268 if (!rctx->datbuf.buf)
269 return -ENOMEM;
271 rctx->datbuf.size = SE_AES_BUFLEN;
272 rctx->iv = (u32 *)req->iv;
273 rctx->len = req->cryptlen;
275 /* Pad input to AES Block size */
276 if (ctx->alg != SE_ALG_XTS) {
277 if (rctx->len % AES_BLOCK_SIZE)
278 rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE);
281 scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0);
283 /* Prepare the command and submit for execution */
284 cmdlen = tegra_aes_prep_cmd(ctx, rctx);
285 ret = tegra_se_host1x_submit(se, cmdlen);
287 /* Copy the result */
288 tegra_aes_update_iv(req, ctx);
289 scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1);
291 /* Free the buffer */
292 dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
293 rctx->datbuf.buf, rctx->datbuf.addr);
295 crypto_finalize_skcipher_request(se->engine, req, ret);
297 return 0;
300 static int tegra_aes_cra_init(struct crypto_skcipher *tfm)
302 struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
303 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
304 struct tegra_se_alg *se_alg;
305 const char *algname;
306 int ret;
308 se_alg = container_of(alg, struct tegra_se_alg, alg.skcipher.base);
310 crypto_skcipher_set_reqsize(tfm, sizeof(struct tegra_aes_reqctx));
312 ctx->ivsize = crypto_skcipher_ivsize(tfm);
313 ctx->se = se_alg->se_dev;
314 ctx->key1_id = 0;
315 ctx->key2_id = 0;
317 algname = crypto_tfm_alg_name(&tfm->base);
318 ret = se_algname_to_algid(algname);
319 if (ret < 0) {
320 dev_err(ctx->se->dev, "invalid algorithm\n");
321 return ret;
324 ctx->alg = ret;
326 return 0;
329 static void tegra_aes_cra_exit(struct crypto_skcipher *tfm)
331 struct tegra_aes_ctx *ctx = crypto_tfm_ctx(&tfm->base);
333 if (ctx->key1_id)
334 tegra_key_invalidate(ctx->se, ctx->key1_id, ctx->alg);
336 if (ctx->key2_id)
337 tegra_key_invalidate(ctx->se, ctx->key2_id, ctx->alg);
340 static int tegra_aes_setkey(struct crypto_skcipher *tfm,
341 const u8 *key, u32 keylen)
343 struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
345 if (aes_check_keylen(keylen)) {
346 dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
347 return -EINVAL;
350 return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
353 static int tegra_xts_setkey(struct crypto_skcipher *tfm,
354 const u8 *key, u32 keylen)
356 struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
357 u32 len = keylen / 2;
358 int ret;
360 ret = xts_verify_key(tfm, key, keylen);
361 if (ret || aes_check_keylen(len)) {
362 dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
363 return -EINVAL;
366 ret = tegra_key_submit(ctx->se, key, len,
367 ctx->alg, &ctx->key1_id);
368 if (ret)
369 return ret;
371 return tegra_key_submit(ctx->se, key + len, len,
372 ctx->alg, &ctx->key2_id);
374 return 0;
377 static int tegra_aes_kac_manifest(u32 user, u32 alg, u32 keylen)
379 int manifest;
381 manifest = SE_KAC_USER_NS;
383 switch (alg) {
384 case SE_ALG_CBC:
385 case SE_ALG_ECB:
386 case SE_ALG_CTR:
387 manifest |= SE_KAC_ENC;
388 break;
389 case SE_ALG_XTS:
390 manifest |= SE_KAC_XTS;
391 break;
392 case SE_ALG_GCM:
393 manifest |= SE_KAC_GCM;
394 break;
395 case SE_ALG_CMAC:
396 manifest |= SE_KAC_CMAC;
397 break;
398 case SE_ALG_CBC_MAC:
399 manifest |= SE_KAC_ENC;
400 break;
401 default:
402 return -EINVAL;
405 switch (keylen) {
406 case AES_KEYSIZE_128:
407 manifest |= SE_KAC_SIZE_128;
408 break;
409 case AES_KEYSIZE_192:
410 manifest |= SE_KAC_SIZE_192;
411 break;
412 case AES_KEYSIZE_256:
413 manifest |= SE_KAC_SIZE_256;
414 break;
415 default:
416 return -EINVAL;
419 return manifest;
422 static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt)
425 struct crypto_skcipher *tfm;
426 struct tegra_aes_ctx *ctx;
427 struct tegra_aes_reqctx *rctx;
429 tfm = crypto_skcipher_reqtfm(req);
430 ctx = crypto_skcipher_ctx(tfm);
431 rctx = skcipher_request_ctx(req);
433 if (ctx->alg != SE_ALG_XTS) {
434 if (!IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(tfm))) {
435 dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen);
436 return -EINVAL;
438 } else if (req->cryptlen < XTS_BLOCK_SIZE) {
439 dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen);
440 return -EINVAL;
443 if (!req->cryptlen)
444 return 0;
446 rctx->encrypt = encrypt;
447 rctx->config = tegra234_aes_cfg(ctx->alg, encrypt);
448 rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt);
449 rctx->crypto_config |= SE_AES_KEY_INDEX(ctx->key1_id);
451 if (ctx->key2_id)
452 rctx->crypto_config |= SE_AES_KEY2_INDEX(ctx->key2_id);
454 return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req);
457 static int tegra_aes_encrypt(struct skcipher_request *req)
459 return tegra_aes_crypt(req, true);
462 static int tegra_aes_decrypt(struct skcipher_request *req)
464 return tegra_aes_crypt(req, false);
467 static struct tegra_se_alg tegra_aes_algs[] = {
469 .alg.skcipher.op.do_one_request = tegra_aes_do_one_req,
470 .alg.skcipher.base = {
471 .init = tegra_aes_cra_init,
472 .exit = tegra_aes_cra_exit,
473 .setkey = tegra_aes_setkey,
474 .encrypt = tegra_aes_encrypt,
475 .decrypt = tegra_aes_decrypt,
476 .min_keysize = AES_MIN_KEY_SIZE,
477 .max_keysize = AES_MAX_KEY_SIZE,
478 .ivsize = AES_BLOCK_SIZE,
479 .base = {
480 .cra_name = "cbc(aes)",
481 .cra_driver_name = "cbc-aes-tegra",
482 .cra_priority = 500,
483 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
484 .cra_blocksize = AES_BLOCK_SIZE,
485 .cra_ctxsize = sizeof(struct tegra_aes_ctx),
486 .cra_alignmask = 0xf,
487 .cra_module = THIS_MODULE,
490 }, {
491 .alg.skcipher.op.do_one_request = tegra_aes_do_one_req,
492 .alg.skcipher.base = {
493 .init = tegra_aes_cra_init,
494 .exit = tegra_aes_cra_exit,
495 .setkey = tegra_aes_setkey,
496 .encrypt = tegra_aes_encrypt,
497 .decrypt = tegra_aes_decrypt,
498 .min_keysize = AES_MIN_KEY_SIZE,
499 .max_keysize = AES_MAX_KEY_SIZE,
500 .base = {
501 .cra_name = "ecb(aes)",
502 .cra_driver_name = "ecb-aes-tegra",
503 .cra_priority = 500,
504 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
505 .cra_blocksize = AES_BLOCK_SIZE,
506 .cra_ctxsize = sizeof(struct tegra_aes_ctx),
507 .cra_alignmask = 0xf,
508 .cra_module = THIS_MODULE,
511 }, {
512 .alg.skcipher.op.do_one_request = tegra_aes_do_one_req,
513 .alg.skcipher.base = {
514 .init = tegra_aes_cra_init,
515 .exit = tegra_aes_cra_exit,
516 .setkey = tegra_aes_setkey,
517 .encrypt = tegra_aes_encrypt,
518 .decrypt = tegra_aes_decrypt,
519 .min_keysize = AES_MIN_KEY_SIZE,
520 .max_keysize = AES_MAX_KEY_SIZE,
521 .ivsize = AES_BLOCK_SIZE,
522 .base = {
523 .cra_name = "ctr(aes)",
524 .cra_driver_name = "ctr-aes-tegra",
525 .cra_priority = 500,
526 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
527 .cra_blocksize = 1,
528 .cra_ctxsize = sizeof(struct tegra_aes_ctx),
529 .cra_alignmask = 0xf,
530 .cra_module = THIS_MODULE,
533 }, {
534 .alg.skcipher.op.do_one_request = tegra_aes_do_one_req,
535 .alg.skcipher.base = {
536 .init = tegra_aes_cra_init,
537 .exit = tegra_aes_cra_exit,
538 .setkey = tegra_xts_setkey,
539 .encrypt = tegra_aes_encrypt,
540 .decrypt = tegra_aes_decrypt,
541 .min_keysize = 2 * AES_MIN_KEY_SIZE,
542 .max_keysize = 2 * AES_MAX_KEY_SIZE,
543 .ivsize = AES_BLOCK_SIZE,
544 .base = {
545 .cra_name = "xts(aes)",
546 .cra_driver_name = "xts-aes-tegra",
547 .cra_priority = 500,
548 .cra_blocksize = AES_BLOCK_SIZE,
549 .cra_ctxsize = sizeof(struct tegra_aes_ctx),
550 .cra_alignmask = (__alignof__(u64) - 1),
551 .cra_module = THIS_MODULE,
557 static unsigned int tegra_gmac_prep_cmd(struct tegra_aead_ctx *ctx,
558 struct tegra_aead_reqctx *rctx)
560 unsigned int data_count, res_bits, i = 0;
561 struct tegra_se *se = ctx->se;
562 u32 *cpuvaddr = se->cmdbuf->addr;
564 data_count = (rctx->assoclen / AES_BLOCK_SIZE);
565 res_bits = (rctx->assoclen % AES_BLOCK_SIZE) * 8;
568 * Hardware processes data_count + 1 blocks.
569 * Reduce 1 block if there is no residue
571 if (!res_bits)
572 data_count--;
574 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
575 cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
576 SE_LAST_BLOCK_RES_BITS(res_bits);
578 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 4);
579 cpuvaddr[i++] = rctx->config;
580 cpuvaddr[i++] = rctx->crypto_config;
581 cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
582 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
583 SE_ADDR_HI_SZ(rctx->assoclen);
585 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
586 cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
587 SE_AES_OP_INIT | SE_AES_OP_LASTBUF |
588 SE_AES_OP_START;
590 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
591 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
592 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
594 return i;
597 static unsigned int tegra_gcm_crypt_prep_cmd(struct tegra_aead_ctx *ctx,
598 struct tegra_aead_reqctx *rctx)
600 unsigned int data_count, res_bits, i = 0, j;
601 struct tegra_se *se = ctx->se;
602 u32 *cpuvaddr = se->cmdbuf->addr, op;
604 data_count = (rctx->cryptlen / AES_BLOCK_SIZE);
605 res_bits = (rctx->cryptlen % AES_BLOCK_SIZE) * 8;
606 op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
607 SE_AES_OP_LASTBUF | SE_AES_OP_START;
610 * If there is no assoc data,
611 * this will be the init command
613 if (!rctx->assoclen)
614 op |= SE_AES_OP_INIT;
617 * Hardware processes data_count + 1 blocks.
618 * Reduce 1 block if there is no residue
620 if (!res_bits)
621 data_count--;
623 cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
624 cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
625 for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
626 cpuvaddr[i++] = rctx->iv[j];
628 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
629 cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
630 SE_LAST_BLOCK_RES_BITS(res_bits);
632 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
633 cpuvaddr[i++] = rctx->config;
634 cpuvaddr[i++] = rctx->crypto_config;
636 /* Source Address */
637 cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
638 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
639 SE_ADDR_HI_SZ(rctx->cryptlen);
641 /* Destination Address */
642 cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
643 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
644 SE_ADDR_HI_SZ(rctx->cryptlen);
646 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
647 cpuvaddr[i++] = op;
649 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
650 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
651 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
653 dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
654 return i;
657 static int tegra_gcm_prep_final_cmd(struct tegra_se *se, u32 *cpuvaddr,
658 struct tegra_aead_reqctx *rctx)
660 unsigned int i = 0, j;
661 u32 op;
663 op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
664 SE_AES_OP_LASTBUF | SE_AES_OP_START;
667 * Set init for zero sized vector
669 if (!rctx->assoclen && !rctx->cryptlen)
670 op |= SE_AES_OP_INIT;
672 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->aad_len, 2);
673 cpuvaddr[i++] = rctx->assoclen * 8;
674 cpuvaddr[i++] = 0;
676 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->cryp_msg_len, 2);
677 cpuvaddr[i++] = rctx->cryptlen * 8;
678 cpuvaddr[i++] = 0;
680 cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
681 cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
682 for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
683 cpuvaddr[i++] = rctx->iv[j];
685 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
686 cpuvaddr[i++] = rctx->config;
687 cpuvaddr[i++] = rctx->crypto_config;
688 cpuvaddr[i++] = 0;
689 cpuvaddr[i++] = 0;
691 /* Destination Address */
692 cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
693 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
694 SE_ADDR_HI_SZ(0x10); /* HW always generates 128-bit tag */
696 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
697 cpuvaddr[i++] = op;
699 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
700 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
701 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
703 dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
705 return i;
708 static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
710 struct tegra_se *se = ctx->se;
711 unsigned int cmdlen;
713 scatterwalk_map_and_copy(rctx->inbuf.buf,
714 rctx->src_sg, 0, rctx->assoclen, 0);
716 rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt);
717 rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) |
718 SE_AES_KEY_INDEX(ctx->key_id);
720 cmdlen = tegra_gmac_prep_cmd(ctx, rctx);
722 return tegra_se_host1x_submit(se, cmdlen);
725 static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
727 struct tegra_se *se = ctx->se;
728 int cmdlen, ret;
730 scatterwalk_map_and_copy(rctx->inbuf.buf, rctx->src_sg,
731 rctx->assoclen, rctx->cryptlen, 0);
733 rctx->config = tegra234_aes_cfg(SE_ALG_GCM, rctx->encrypt);
734 rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM, rctx->encrypt) |
735 SE_AES_KEY_INDEX(ctx->key_id);
737 /* Prepare command and submit */
738 cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx);
739 ret = tegra_se_host1x_submit(se, cmdlen);
740 if (ret)
741 return ret;
743 /* Copy the result */
744 scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
745 rctx->assoclen, rctx->cryptlen, 1);
747 return 0;
750 static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
752 struct tegra_se *se = ctx->se;
753 u32 *cpuvaddr = se->cmdbuf->addr;
754 int cmdlen, ret, offset;
756 rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt);
757 rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) |
758 SE_AES_KEY_INDEX(ctx->key_id);
760 /* Prepare command and submit */
761 cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
762 ret = tegra_se_host1x_submit(se, cmdlen);
763 if (ret)
764 return ret;
766 if (rctx->encrypt) {
767 /* Copy the result */
768 offset = rctx->assoclen + rctx->cryptlen;
769 scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
770 offset, rctx->authsize, 1);
773 return 0;
776 static int tegra_gcm_do_verify(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
778 unsigned int offset;
779 u8 mac[16];
781 offset = rctx->assoclen + rctx->cryptlen;
782 scatterwalk_map_and_copy(mac, rctx->src_sg, offset, rctx->authsize, 0);
784 if (crypto_memneq(rctx->outbuf.buf, mac, rctx->authsize))
785 return -EBADMSG;
787 return 0;
790 static inline int tegra_ccm_check_iv(const u8 *iv)
792 /* iv[0] gives value of q-1
793 * 2 <= q <= 8 as per NIST 800-38C notation
794 * 2 <= L <= 8, so 1 <= L' <= 7. as per rfc 3610 notation
796 if (iv[0] < 1 || iv[0] > 7) {
797 pr_debug("ccm_check_iv failed %d\n", iv[0]);
798 return -EINVAL;
801 return 0;
804 static unsigned int tegra_cbcmac_prep_cmd(struct tegra_aead_ctx *ctx,
805 struct tegra_aead_reqctx *rctx)
807 unsigned int data_count, i = 0;
808 struct tegra_se *se = ctx->se;
809 u32 *cpuvaddr = se->cmdbuf->addr;
811 data_count = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1;
813 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
814 cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count);
816 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
817 cpuvaddr[i++] = rctx->config;
818 cpuvaddr[i++] = rctx->crypto_config;
820 cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
821 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
822 SE_ADDR_HI_SZ(rctx->inbuf.size);
824 cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
825 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
826 SE_ADDR_HI_SZ(0x10); /* HW always generates 128 bit tag */
828 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
829 cpuvaddr[i++] = SE_AES_OP_WRSTALL |
830 SE_AES_OP_LASTBUF | SE_AES_OP_START;
832 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
833 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
834 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
836 return i;
839 static unsigned int tegra_ctr_prep_cmd(struct tegra_aead_ctx *ctx,
840 struct tegra_aead_reqctx *rctx)
842 unsigned int i = 0, j;
843 struct tegra_se *se = ctx->se;
844 u32 *cpuvaddr = se->cmdbuf->addr;
846 cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
847 cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
848 for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
849 cpuvaddr[i++] = rctx->iv[j];
851 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
852 cpuvaddr[i++] = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1;
853 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
854 cpuvaddr[i++] = rctx->config;
855 cpuvaddr[i++] = rctx->crypto_config;
857 /* Source address setting */
858 cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
859 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
860 SE_ADDR_HI_SZ(rctx->inbuf.size);
862 /* Destination address setting */
863 cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
864 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
865 SE_ADDR_HI_SZ(rctx->inbuf.size);
867 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
868 cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF |
869 SE_AES_OP_START;
871 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
872 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
873 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
875 dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n",
876 rctx->config, rctx->crypto_config);
878 return i;
881 static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
883 struct tegra_se *se = ctx->se;
884 int cmdlen;
886 rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt);
887 rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC,
888 rctx->encrypt) |
889 SE_AES_KEY_INDEX(ctx->key_id);
891 /* Prepare command and submit */
892 cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx);
894 return tegra_se_host1x_submit(se, cmdlen);
897 static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
899 __be32 data;
901 memset(block, 0, csize);
902 block += csize;
904 if (csize >= 4)
905 csize = 4;
906 else if (msglen > (1 << (8 * csize)))
907 return -EOVERFLOW;
909 data = cpu_to_be32(msglen);
910 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
912 return 0;
915 static int tegra_ccm_format_nonce(struct tegra_aead_reqctx *rctx, u8 *nonce)
917 unsigned int q, t;
918 u8 *q_ptr, *iv = (u8 *)rctx->iv;
920 memcpy(nonce, rctx->iv, 16);
922 /*** 1. Prepare Flags Octet ***/
924 /* Encode t (mac length) */
925 t = rctx->authsize;
926 nonce[0] |= (((t - 2) / 2) << 3);
928 /* Adata */
929 if (rctx->assoclen)
930 nonce[0] |= (1 << 6);
932 /*** Encode Q - message length ***/
933 q = iv[0] + 1;
934 q_ptr = nonce + 16 - q;
936 return tegra_ccm_set_msg_len(q_ptr, rctx->cryptlen, q);
939 static int tegra_ccm_format_adata(u8 *adata, unsigned int a)
941 int len = 0;
943 /* add control info for associated data
944 * RFC 3610 and NIST Special Publication 800-38C
946 if (a < 65280) {
947 *(__be16 *)adata = cpu_to_be16(a);
948 len = 2;
949 } else {
950 *(__be16 *)adata = cpu_to_be16(0xfffe);
951 *(__be32 *)&adata[2] = cpu_to_be32(a);
952 len = 6;
955 return len;
958 static int tegra_ccm_add_padding(u8 *buf, unsigned int len)
960 unsigned int padlen = 16 - (len % 16);
961 u8 padding[16] = {0};
963 if (padlen == 16)
964 return 0;
966 memcpy(buf, padding, padlen);
968 return padlen;
971 static int tegra_ccm_format_blocks(struct tegra_aead_reqctx *rctx)
973 unsigned int alen = 0, offset = 0;
974 u8 nonce[16], adata[16];
975 int ret;
977 ret = tegra_ccm_format_nonce(rctx, nonce);
978 if (ret)
979 return ret;
981 memcpy(rctx->inbuf.buf, nonce, 16);
982 offset = 16;
984 if (rctx->assoclen) {
985 alen = tegra_ccm_format_adata(adata, rctx->assoclen);
986 memcpy(rctx->inbuf.buf + offset, adata, alen);
987 offset += alen;
989 scatterwalk_map_and_copy(rctx->inbuf.buf + offset,
990 rctx->src_sg, 0, rctx->assoclen, 0);
992 offset += rctx->assoclen;
993 offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset,
994 rctx->assoclen + alen);
997 return offset;
1000 static int tegra_ccm_mac_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
1002 u32 result[16];
1003 int i, ret;
1005 /* Read and clear Result */
1006 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1007 result[i] = readl(se->base + se->hw->regs->result + (i * 4));
1009 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1010 writel(0, se->base + se->hw->regs->result + (i * 4));
1012 if (rctx->encrypt) {
1013 memcpy(rctx->authdata, result, rctx->authsize);
1014 } else {
1015 ret = crypto_memneq(rctx->authdata, result, rctx->authsize);
1016 if (ret)
1017 return -EBADMSG;
1020 return 0;
1023 static int tegra_ccm_ctr_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
1025 /* Copy result */
1026 scatterwalk_map_and_copy(rctx->outbuf.buf + 16, rctx->dst_sg,
1027 rctx->assoclen, rctx->cryptlen, 1);
1029 if (rctx->encrypt)
1030 scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
1031 rctx->assoclen + rctx->cryptlen,
1032 rctx->authsize, 1);
1033 else
1034 memcpy(rctx->authdata, rctx->outbuf.buf, rctx->authsize);
1036 return 0;
1039 static int tegra_ccm_compute_auth(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
1041 struct tegra_se *se = ctx->se;
1042 struct scatterlist *sg;
1043 int offset, ret;
1045 offset = tegra_ccm_format_blocks(rctx);
1046 if (offset < 0)
1047 return -EINVAL;
1049 /* Copy plain text to the buffer */
1050 sg = rctx->encrypt ? rctx->src_sg : rctx->dst_sg;
1052 scatterwalk_map_and_copy(rctx->inbuf.buf + offset,
1053 sg, rctx->assoclen,
1054 rctx->cryptlen, 0);
1055 offset += rctx->cryptlen;
1056 offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen);
1058 rctx->inbuf.size = offset;
1060 ret = tegra_ccm_do_cbcmac(ctx, rctx);
1061 if (ret)
1062 return ret;
1064 return tegra_ccm_mac_result(se, rctx);
1067 static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
1069 struct tegra_se *se = ctx->se;
1070 unsigned int cmdlen, offset = 0;
1071 struct scatterlist *sg = rctx->src_sg;
1072 int ret;
1074 rctx->config = tegra234_aes_cfg(SE_ALG_CTR, rctx->encrypt);
1075 rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CTR, rctx->encrypt) |
1076 SE_AES_KEY_INDEX(ctx->key_id);
1078 /* Copy authdata in the top of buffer for encryption/decryption */
1079 if (rctx->encrypt)
1080 memcpy(rctx->inbuf.buf, rctx->authdata, rctx->authsize);
1081 else
1082 scatterwalk_map_and_copy(rctx->inbuf.buf, sg,
1083 rctx->assoclen + rctx->cryptlen,
1084 rctx->authsize, 0);
1086 offset += rctx->authsize;
1087 offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->authsize);
1089 /* If there is no cryptlen, proceed to submit the task */
1090 if (rctx->cryptlen) {
1091 scatterwalk_map_and_copy(rctx->inbuf.buf + offset, sg,
1092 rctx->assoclen, rctx->cryptlen, 0);
1093 offset += rctx->cryptlen;
1094 offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen);
1097 rctx->inbuf.size = offset;
1099 /* Prepare command and submit */
1100 cmdlen = tegra_ctr_prep_cmd(ctx, rctx);
1101 ret = tegra_se_host1x_submit(se, cmdlen);
1102 if (ret)
1103 return ret;
1105 return tegra_ccm_ctr_result(se, rctx);
1108 static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se,
1109 struct tegra_aead_reqctx *rctx)
1111 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1112 u8 *iv = (u8 *)rctx->iv;
1113 int ret, i;
1115 rctx->src_sg = req->src;
1116 rctx->dst_sg = req->dst;
1117 rctx->assoclen = req->assoclen;
1118 rctx->authsize = crypto_aead_authsize(tfm);
1120 memcpy(iv, req->iv, 16);
1122 ret = tegra_ccm_check_iv(iv);
1123 if (ret)
1124 return ret;
1126 /* Note: rfc 3610 and NIST 800-38C require counter (ctr_0) of
1127 * zero to encrypt auth tag.
1128 * req->iv has the formatted ctr_0 (i.e. Flags || N || 0).
1130 memset(iv + 15 - iv[0], 0, iv[0] + 1);
1132 /* Clear any previous result */
1133 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1134 writel(0, se->base + se->hw->regs->result + (i * 4));
1136 return 0;
1139 static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
1141 struct aead_request *req = container_of(areq, struct aead_request, base);
1142 struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
1143 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1144 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1145 struct tegra_se *se = ctx->se;
1146 int ret;
1148 /* Allocate buffers required */
1149 rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
1150 &rctx->inbuf.addr, GFP_KERNEL);
1151 if (!rctx->inbuf.buf)
1152 return -ENOMEM;
1154 rctx->inbuf.size = SE_AES_BUFLEN;
1156 rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
1157 &rctx->outbuf.addr, GFP_KERNEL);
1158 if (!rctx->outbuf.buf) {
1159 ret = -ENOMEM;
1160 goto outbuf_err;
1163 rctx->outbuf.size = SE_AES_BUFLEN;
1165 ret = tegra_ccm_crypt_init(req, se, rctx);
1166 if (ret)
1167 goto out;
1169 if (rctx->encrypt) {
1170 rctx->cryptlen = req->cryptlen;
1172 /* CBC MAC Operation */
1173 ret = tegra_ccm_compute_auth(ctx, rctx);
1174 if (ret)
1175 goto out;
1177 /* CTR operation */
1178 ret = tegra_ccm_do_ctr(ctx, rctx);
1179 if (ret)
1180 goto out;
1181 } else {
1182 rctx->cryptlen = req->cryptlen - ctx->authsize;
1184 /* CTR operation */
1185 ret = tegra_ccm_do_ctr(ctx, rctx);
1186 if (ret)
1187 goto out;
1189 /* CBC MAC Operation */
1190 ret = tegra_ccm_compute_auth(ctx, rctx);
1191 if (ret)
1192 goto out;
1195 out:
1196 dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
1197 rctx->outbuf.buf, rctx->outbuf.addr);
1199 outbuf_err:
1200 dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
1201 rctx->inbuf.buf, rctx->inbuf.addr);
1203 crypto_finalize_aead_request(ctx->se->engine, req, ret);
1205 return 0;
1208 static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
1210 struct aead_request *req = container_of(areq, struct aead_request, base);
1211 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1212 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1213 struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
1214 int ret;
1216 /* Allocate buffers required */
1217 rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
1218 &rctx->inbuf.addr, GFP_KERNEL);
1219 if (!rctx->inbuf.buf)
1220 return -ENOMEM;
1222 rctx->inbuf.size = SE_AES_BUFLEN;
1224 rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
1225 &rctx->outbuf.addr, GFP_KERNEL);
1226 if (!rctx->outbuf.buf) {
1227 ret = -ENOMEM;
1228 goto outbuf_err;
1231 rctx->outbuf.size = SE_AES_BUFLEN;
1233 rctx->src_sg = req->src;
1234 rctx->dst_sg = req->dst;
1235 rctx->assoclen = req->assoclen;
1236 rctx->authsize = crypto_aead_authsize(tfm);
1238 if (rctx->encrypt)
1239 rctx->cryptlen = req->cryptlen;
1240 else
1241 rctx->cryptlen = req->cryptlen - ctx->authsize;
1243 memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
1244 rctx->iv[3] = (1 << 24);
1246 /* If there is associated data perform GMAC operation */
1247 if (rctx->assoclen) {
1248 ret = tegra_gcm_do_gmac(ctx, rctx);
1249 if (ret)
1250 goto out;
1253 /* GCM Encryption/Decryption operation */
1254 if (rctx->cryptlen) {
1255 ret = tegra_gcm_do_crypt(ctx, rctx);
1256 if (ret)
1257 goto out;
1260 /* GCM_FINAL operation */
1261 ret = tegra_gcm_do_final(ctx, rctx);
1262 if (ret)
1263 goto out;
1265 if (!rctx->encrypt)
1266 ret = tegra_gcm_do_verify(ctx->se, rctx);
1268 out:
1269 dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
1270 rctx->outbuf.buf, rctx->outbuf.addr);
1272 outbuf_err:
1273 dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
1274 rctx->inbuf.buf, rctx->inbuf.addr);
1276 /* Finalize the request if there are no errors */
1277 crypto_finalize_aead_request(ctx->se->engine, req, ret);
1279 return 0;
1282 static int tegra_aead_cra_init(struct crypto_aead *tfm)
1284 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1285 struct aead_alg *alg = crypto_aead_alg(tfm);
1286 struct tegra_se_alg *se_alg;
1287 const char *algname;
1288 int ret;
1290 algname = crypto_tfm_alg_name(&tfm->base);
1292 se_alg = container_of(alg, struct tegra_se_alg, alg.aead.base);
1294 crypto_aead_set_reqsize(tfm, sizeof(struct tegra_aead_reqctx));
1296 ctx->se = se_alg->se_dev;
1297 ctx->key_id = 0;
1299 ret = se_algname_to_algid(algname);
1300 if (ret < 0) {
1301 dev_err(ctx->se->dev, "invalid algorithm\n");
1302 return ret;
1305 ctx->alg = ret;
1307 return 0;
1310 static int tegra_ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1312 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1314 switch (authsize) {
1315 case 4:
1316 case 6:
1317 case 8:
1318 case 10:
1319 case 12:
1320 case 14:
1321 case 16:
1322 break;
1323 default:
1324 return -EINVAL;
1327 ctx->authsize = authsize;
1329 return 0;
1332 static int tegra_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1334 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1335 int ret;
1337 ret = crypto_gcm_check_authsize(authsize);
1338 if (ret)
1339 return ret;
1341 ctx->authsize = authsize;
1343 return 0;
1346 static void tegra_aead_cra_exit(struct crypto_aead *tfm)
1348 struct tegra_aead_ctx *ctx = crypto_tfm_ctx(&tfm->base);
1350 if (ctx->key_id)
1351 tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
1354 static int tegra_aead_crypt(struct aead_request *req, bool encrypt)
1356 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1357 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1358 struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
1360 rctx->encrypt = encrypt;
1362 return crypto_transfer_aead_request_to_engine(ctx->se->engine, req);
1365 static int tegra_aead_encrypt(struct aead_request *req)
1367 return tegra_aead_crypt(req, true);
1370 static int tegra_aead_decrypt(struct aead_request *req)
1372 return tegra_aead_crypt(req, false);
1375 static int tegra_aead_setkey(struct crypto_aead *tfm,
1376 const u8 *key, u32 keylen)
1378 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1380 if (aes_check_keylen(keylen)) {
1381 dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
1382 return -EINVAL;
1385 return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
1388 static unsigned int tegra_cmac_prep_cmd(struct tegra_cmac_ctx *ctx,
1389 struct tegra_cmac_reqctx *rctx)
1391 unsigned int data_count, res_bits = 0, i = 0, j;
1392 struct tegra_se *se = ctx->se;
1393 u32 *cpuvaddr = se->cmdbuf->addr, op;
1395 data_count = (rctx->datbuf.size / AES_BLOCK_SIZE);
1397 op = SE_AES_OP_WRSTALL | SE_AES_OP_START | SE_AES_OP_LASTBUF;
1399 if (!(rctx->task & SHA_UPDATE)) {
1400 op |= SE_AES_OP_FINAL;
1401 res_bits = (rctx->datbuf.size % AES_BLOCK_SIZE) * 8;
1404 if (!res_bits && data_count)
1405 data_count--;
1407 if (rctx->task & SHA_FIRST) {
1408 rctx->task &= ~SHA_FIRST;
1410 cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
1411 cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
1412 /* Load 0 IV */
1413 for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
1414 cpuvaddr[i++] = 0;
1417 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
1418 cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
1419 SE_LAST_BLOCK_RES_BITS(res_bits);
1421 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
1422 cpuvaddr[i++] = rctx->config;
1423 cpuvaddr[i++] = rctx->crypto_config;
1425 /* Source Address */
1426 cpuvaddr[i++] = lower_32_bits(rctx->datbuf.addr);
1427 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) |
1428 SE_ADDR_HI_SZ(rctx->datbuf.size);
1429 cpuvaddr[i++] = 0;
1430 cpuvaddr[i++] = SE_ADDR_HI_SZ(AES_BLOCK_SIZE);
1432 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
1433 cpuvaddr[i++] = op;
1435 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
1436 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
1437 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
1439 return i;
1442 static void tegra_cmac_copy_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
1444 int i;
1446 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1447 rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4));
1450 static void tegra_cmac_paste_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
1452 int i;
1454 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1455 writel(rctx->result[i],
1456 se->base + se->hw->regs->result + (i * 4));
1459 static int tegra_cmac_do_update(struct ahash_request *req)
1461 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1462 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1463 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1464 struct tegra_se *se = ctx->se;
1465 unsigned int nblks, nresidue, cmdlen;
1466 int ret;
1468 if (!req->nbytes)
1469 return 0;
1471 nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
1472 nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
1475 * Reserve the last block as residue during final() to process.
1477 if (!nresidue && nblks) {
1478 nresidue += rctx->blk_size;
1479 nblks--;
1482 rctx->src_sg = req->src;
1483 rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
1484 rctx->total_len += rctx->datbuf.size;
1485 rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
1486 rctx->crypto_config = SE_AES_KEY_INDEX(ctx->key_id);
1489 * Keep one block and residue bytes in residue and
1490 * return. The bytes will be processed in final()
1492 if (nblks < 1) {
1493 scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size,
1494 rctx->src_sg, 0, req->nbytes, 0);
1496 rctx->residue.size += req->nbytes;
1497 return 0;
1500 /* Copy the previous residue first */
1501 if (rctx->residue.size)
1502 memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
1504 scatterwalk_map_and_copy(rctx->datbuf.buf + rctx->residue.size,
1505 rctx->src_sg, 0, req->nbytes - nresidue, 0);
1507 scatterwalk_map_and_copy(rctx->residue.buf, rctx->src_sg,
1508 req->nbytes - nresidue, nresidue, 0);
1510 /* Update residue value with the residue after current block */
1511 rctx->residue.size = nresidue;
1514 * If this is not the first 'update' call, paste the previous copied
1515 * intermediate results to the registers so that it gets picked up.
1516 * This is to support the import/export functionality.
1518 if (!(rctx->task & SHA_FIRST))
1519 tegra_cmac_paste_result(ctx->se, rctx);
1521 cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
1523 ret = tegra_se_host1x_submit(se, cmdlen);
1525 * If this is not the final update, copy the intermediate results
1526 * from the registers so that it can be used in the next 'update'
1527 * call. This is to support the import/export functionality.
1529 if (!(rctx->task & SHA_FINAL))
1530 tegra_cmac_copy_result(ctx->se, rctx);
1532 return ret;
1535 static int tegra_cmac_do_final(struct ahash_request *req)
1537 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1538 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1539 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1540 struct tegra_se *se = ctx->se;
1541 u32 *result = (u32 *)req->result;
1542 int ret = 0, i, cmdlen;
1544 if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) {
1545 return crypto_shash_tfm_digest(ctx->fallback_tfm,
1546 rctx->datbuf.buf, 0, req->result);
1549 memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
1550 rctx->datbuf.size = rctx->residue.size;
1551 rctx->total_len += rctx->residue.size;
1552 rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
1554 /* Prepare command and submit */
1555 cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
1556 ret = tegra_se_host1x_submit(se, cmdlen);
1557 if (ret)
1558 goto out;
1560 /* Read and clear Result register */
1561 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1562 result[i] = readl(se->base + se->hw->regs->result + (i * 4));
1564 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1565 writel(0, se->base + se->hw->regs->result + (i * 4));
1567 out:
1568 dma_free_coherent(se->dev, SE_SHA_BUFLEN,
1569 rctx->datbuf.buf, rctx->datbuf.addr);
1570 dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2,
1571 rctx->residue.buf, rctx->residue.addr);
1572 return ret;
1575 static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq)
1577 struct ahash_request *req = ahash_request_cast(areq);
1578 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1579 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1580 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1581 struct tegra_se *se = ctx->se;
1582 int ret;
1584 if (rctx->task & SHA_UPDATE) {
1585 ret = tegra_cmac_do_update(req);
1586 rctx->task &= ~SHA_UPDATE;
1589 if (rctx->task & SHA_FINAL) {
1590 ret = tegra_cmac_do_final(req);
1591 rctx->task &= ~SHA_FINAL;
1594 crypto_finalize_hash_request(se->engine, req, ret);
1596 return 0;
1599 static void tegra_cmac_init_fallback(struct crypto_ahash *tfm, struct tegra_cmac_ctx *ctx,
1600 const char *algname)
1602 unsigned int statesize;
1604 ctx->fallback_tfm = crypto_alloc_shash(algname, 0, CRYPTO_ALG_NEED_FALLBACK);
1606 if (IS_ERR(ctx->fallback_tfm)) {
1607 dev_warn(ctx->se->dev, "failed to allocate fallback for %s\n", algname);
1608 ctx->fallback_tfm = NULL;
1609 return;
1612 statesize = crypto_shash_statesize(ctx->fallback_tfm);
1614 if (statesize > sizeof(struct tegra_cmac_reqctx))
1615 crypto_ahash_set_statesize(tfm, statesize);
1618 static int tegra_cmac_cra_init(struct crypto_tfm *tfm)
1620 struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm);
1621 struct crypto_ahash *ahash_tfm = __crypto_ahash_cast(tfm);
1622 struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
1623 struct tegra_se_alg *se_alg;
1624 const char *algname;
1625 int ret;
1627 algname = crypto_tfm_alg_name(tfm);
1628 se_alg = container_of(alg, struct tegra_se_alg, alg.ahash.base);
1630 crypto_ahash_set_reqsize(ahash_tfm, sizeof(struct tegra_cmac_reqctx));
1632 ctx->se = se_alg->se_dev;
1633 ctx->key_id = 0;
1635 ret = se_algname_to_algid(algname);
1636 if (ret < 0) {
1637 dev_err(ctx->se->dev, "invalid algorithm\n");
1638 return ret;
1641 ctx->alg = ret;
1643 tegra_cmac_init_fallback(ahash_tfm, ctx, algname);
1645 return 0;
1648 static void tegra_cmac_cra_exit(struct crypto_tfm *tfm)
1650 struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm);
1652 if (ctx->fallback_tfm)
1653 crypto_free_shash(ctx->fallback_tfm);
1655 tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
1658 static int tegra_cmac_init(struct ahash_request *req)
1660 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1661 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1662 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1663 struct tegra_se *se = ctx->se;
1664 int i;
1666 rctx->total_len = 0;
1667 rctx->datbuf.size = 0;
1668 rctx->residue.size = 0;
1669 rctx->task = SHA_FIRST;
1670 rctx->blk_size = crypto_ahash_blocksize(tfm);
1672 rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
1673 &rctx->residue.addr, GFP_KERNEL);
1674 if (!rctx->residue.buf)
1675 goto resbuf_fail;
1677 rctx->residue.size = 0;
1679 rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
1680 &rctx->datbuf.addr, GFP_KERNEL);
1681 if (!rctx->datbuf.buf)
1682 goto datbuf_fail;
1684 rctx->datbuf.size = 0;
1686 /* Clear any previous result */
1687 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1688 writel(0, se->base + se->hw->regs->result + (i * 4));
1690 return 0;
1692 datbuf_fail:
1693 dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
1694 rctx->residue.addr);
1695 resbuf_fail:
1696 return -ENOMEM;
1699 static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
1700 unsigned int keylen)
1702 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1704 if (aes_check_keylen(keylen)) {
1705 dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
1706 return -EINVAL;
1709 if (ctx->fallback_tfm)
1710 crypto_shash_setkey(ctx->fallback_tfm, key, keylen);
1712 return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
1715 static int tegra_cmac_update(struct ahash_request *req)
1717 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1718 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1719 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1721 rctx->task |= SHA_UPDATE;
1723 return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1726 static int tegra_cmac_final(struct ahash_request *req)
1728 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1729 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1730 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1732 rctx->task |= SHA_FINAL;
1734 return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1737 static int tegra_cmac_finup(struct ahash_request *req)
1739 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1740 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1741 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1743 rctx->task |= SHA_UPDATE | SHA_FINAL;
1745 return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1748 static int tegra_cmac_digest(struct ahash_request *req)
1750 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1751 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1752 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1754 tegra_cmac_init(req);
1755 rctx->task |= SHA_UPDATE | SHA_FINAL;
1757 return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1760 static int tegra_cmac_export(struct ahash_request *req, void *out)
1762 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1764 memcpy(out, rctx, sizeof(*rctx));
1766 return 0;
1769 static int tegra_cmac_import(struct ahash_request *req, const void *in)
1771 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1773 memcpy(rctx, in, sizeof(*rctx));
1775 return 0;
1778 static struct tegra_se_alg tegra_aead_algs[] = {
1780 .alg.aead.op.do_one_request = tegra_gcm_do_one_req,
1781 .alg.aead.base = {
1782 .init = tegra_aead_cra_init,
1783 .exit = tegra_aead_cra_exit,
1784 .setkey = tegra_aead_setkey,
1785 .setauthsize = tegra_gcm_setauthsize,
1786 .encrypt = tegra_aead_encrypt,
1787 .decrypt = tegra_aead_decrypt,
1788 .maxauthsize = AES_BLOCK_SIZE,
1789 .ivsize = GCM_AES_IV_SIZE,
1790 .base = {
1791 .cra_name = "gcm(aes)",
1792 .cra_driver_name = "gcm-aes-tegra",
1793 .cra_priority = 500,
1794 .cra_blocksize = 1,
1795 .cra_ctxsize = sizeof(struct tegra_aead_ctx),
1796 .cra_alignmask = 0xf,
1797 .cra_module = THIS_MODULE,
1800 }, {
1801 .alg.aead.op.do_one_request = tegra_ccm_do_one_req,
1802 .alg.aead.base = {
1803 .init = tegra_aead_cra_init,
1804 .exit = tegra_aead_cra_exit,
1805 .setkey = tegra_aead_setkey,
1806 .setauthsize = tegra_ccm_setauthsize,
1807 .encrypt = tegra_aead_encrypt,
1808 .decrypt = tegra_aead_decrypt,
1809 .maxauthsize = AES_BLOCK_SIZE,
1810 .ivsize = AES_BLOCK_SIZE,
1811 .chunksize = AES_BLOCK_SIZE,
1812 .base = {
1813 .cra_name = "ccm(aes)",
1814 .cra_driver_name = "ccm-aes-tegra",
1815 .cra_priority = 500,
1816 .cra_blocksize = 1,
1817 .cra_ctxsize = sizeof(struct tegra_aead_ctx),
1818 .cra_alignmask = 0xf,
1819 .cra_module = THIS_MODULE,
1825 static struct tegra_se_alg tegra_cmac_algs[] = {
1827 .alg.ahash.op.do_one_request = tegra_cmac_do_one_req,
1828 .alg.ahash.base = {
1829 .init = tegra_cmac_init,
1830 .setkey = tegra_cmac_setkey,
1831 .update = tegra_cmac_update,
1832 .final = tegra_cmac_final,
1833 .finup = tegra_cmac_finup,
1834 .digest = tegra_cmac_digest,
1835 .export = tegra_cmac_export,
1836 .import = tegra_cmac_import,
1837 .halg.digestsize = AES_BLOCK_SIZE,
1838 .halg.statesize = sizeof(struct tegra_cmac_reqctx),
1839 .halg.base = {
1840 .cra_name = "cmac(aes)",
1841 .cra_driver_name = "tegra-se-cmac",
1842 .cra_priority = 300,
1843 .cra_flags = CRYPTO_ALG_TYPE_AHASH,
1844 .cra_blocksize = AES_BLOCK_SIZE,
1845 .cra_ctxsize = sizeof(struct tegra_cmac_ctx),
1846 .cra_alignmask = 0,
1847 .cra_module = THIS_MODULE,
1848 .cra_init = tegra_cmac_cra_init,
1849 .cra_exit = tegra_cmac_cra_exit,
1855 int tegra_init_aes(struct tegra_se *se)
1857 struct aead_engine_alg *aead_alg;
1858 struct ahash_engine_alg *ahash_alg;
1859 struct skcipher_engine_alg *sk_alg;
1860 int i, ret;
1862 se->manifest = tegra_aes_kac_manifest;
1864 for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++) {
1865 sk_alg = &tegra_aes_algs[i].alg.skcipher;
1866 tegra_aes_algs[i].se_dev = se;
1868 ret = crypto_engine_register_skcipher(sk_alg);
1869 if (ret) {
1870 dev_err(se->dev, "failed to register %s\n",
1871 sk_alg->base.base.cra_name);
1872 goto err_aes;
1876 for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++) {
1877 aead_alg = &tegra_aead_algs[i].alg.aead;
1878 tegra_aead_algs[i].se_dev = se;
1880 ret = crypto_engine_register_aead(aead_alg);
1881 if (ret) {
1882 dev_err(se->dev, "failed to register %s\n",
1883 aead_alg->base.base.cra_name);
1884 goto err_aead;
1888 for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++) {
1889 ahash_alg = &tegra_cmac_algs[i].alg.ahash;
1890 tegra_cmac_algs[i].se_dev = se;
1892 ret = crypto_engine_register_ahash(ahash_alg);
1893 if (ret) {
1894 dev_err(se->dev, "failed to register %s\n",
1895 ahash_alg->base.halg.base.cra_name);
1896 goto err_cmac;
1900 return 0;
1902 err_cmac:
1903 while (i--)
1904 crypto_engine_unregister_ahash(&tegra_cmac_algs[i].alg.ahash);
1906 i = ARRAY_SIZE(tegra_aead_algs);
1907 err_aead:
1908 while (i--)
1909 crypto_engine_unregister_aead(&tegra_aead_algs[i].alg.aead);
1911 i = ARRAY_SIZE(tegra_aes_algs);
1912 err_aes:
1913 while (i--)
1914 crypto_engine_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher);
1916 return ret;
1919 void tegra_deinit_aes(struct tegra_se *se)
1921 int i;
1923 for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++)
1924 crypto_engine_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher);
1926 for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++)
1927 crypto_engine_unregister_aead(&tegra_aead_algs[i].alg.aead);
1929 for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++)
1930 crypto_engine_unregister_ahash(&tegra_cmac_algs[i].alg.ahash);