Linux 5.1.15
[linux/fpc-iii.git] / drivers / crypto / caam / caamalg_qi.c
blobc61921d32489a53386d4eafaf258150bff1bd368
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Freescale FSL CAAM support for crypto API over QI backend.
4 * Based on caamalg.c
6 * Copyright 2013-2016 Freescale Semiconductor, Inc.
7 * Copyright 2016-2018 NXP
8 */
10 #include "compat.h"
11 #include "ctrl.h"
12 #include "regs.h"
13 #include "intern.h"
14 #include "desc_constr.h"
15 #include "error.h"
16 #include "sg_sw_qm.h"
17 #include "key_gen.h"
18 #include "qi.h"
19 #include "jr.h"
20 #include "caamalg_desc.h"
23 * crypto alg
25 #define CAAM_CRA_PRIORITY 2000
26 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
27 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
28 SHA512_DIGEST_SIZE * 2)
30 #define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \
31 CAAM_MAX_KEY_SIZE)
32 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
34 struct caam_alg_entry {
35 int class1_alg_type;
36 int class2_alg_type;
37 bool rfc3686;
38 bool geniv;
41 struct caam_aead_alg {
42 struct aead_alg aead;
43 struct caam_alg_entry caam;
44 bool registered;
47 struct caam_skcipher_alg {
48 struct skcipher_alg skcipher;
49 struct caam_alg_entry caam;
50 bool registered;
54 * per-session context
56 struct caam_ctx {
57 struct device *jrdev;
58 u32 sh_desc_enc[DESC_MAX_USED_LEN];
59 u32 sh_desc_dec[DESC_MAX_USED_LEN];
60 u8 key[CAAM_MAX_KEY_SIZE];
61 dma_addr_t key_dma;
62 enum dma_data_direction dir;
63 struct alginfo adata;
64 struct alginfo cdata;
65 unsigned int authsize;
66 struct device *qidev;
67 spinlock_t lock; /* Protects multiple init of driver context */
68 struct caam_drv_ctx *drv_ctx[NUM_OP];
71 static int aead_set_sh_desc(struct crypto_aead *aead)
73 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
74 typeof(*alg), aead);
75 struct caam_ctx *ctx = crypto_aead_ctx(aead);
76 unsigned int ivsize = crypto_aead_ivsize(aead);
77 u32 ctx1_iv_off = 0;
78 u32 *nonce = NULL;
79 unsigned int data_len[2];
80 u32 inl_mask;
81 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
82 OP_ALG_AAI_CTR_MOD128);
83 const bool is_rfc3686 = alg->caam.rfc3686;
84 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
86 if (!ctx->cdata.keylen || !ctx->authsize)
87 return 0;
90 * AES-CTR needs to load IV in CONTEXT1 reg
91 * at an offset of 128bits (16bytes)
92 * CONTEXT1[255:128] = IV
94 if (ctr_mode)
95 ctx1_iv_off = 16;
98 * RFC3686 specific:
99 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
101 if (is_rfc3686) {
102 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
103 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
104 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
107 data_len[0] = ctx->adata.keylen_pad;
108 data_len[1] = ctx->cdata.keylen;
110 if (alg->caam.geniv)
111 goto skip_enc;
113 /* aead_encrypt shared descriptor */
114 if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
115 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
116 DESC_JOB_IO_LEN, data_len, &inl_mask,
117 ARRAY_SIZE(data_len)) < 0)
118 return -EINVAL;
120 if (inl_mask & 1)
121 ctx->adata.key_virt = ctx->key;
122 else
123 ctx->adata.key_dma = ctx->key_dma;
125 if (inl_mask & 2)
126 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
127 else
128 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
130 ctx->adata.key_inline = !!(inl_mask & 1);
131 ctx->cdata.key_inline = !!(inl_mask & 2);
133 cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
134 ivsize, ctx->authsize, is_rfc3686, nonce,
135 ctx1_iv_off, true, ctrlpriv->era);
137 skip_enc:
138 /* aead_decrypt shared descriptor */
139 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
140 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
141 DESC_JOB_IO_LEN, data_len, &inl_mask,
142 ARRAY_SIZE(data_len)) < 0)
143 return -EINVAL;
145 if (inl_mask & 1)
146 ctx->adata.key_virt = ctx->key;
147 else
148 ctx->adata.key_dma = ctx->key_dma;
150 if (inl_mask & 2)
151 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
152 else
153 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
155 ctx->adata.key_inline = !!(inl_mask & 1);
156 ctx->cdata.key_inline = !!(inl_mask & 2);
158 cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
159 ivsize, ctx->authsize, alg->caam.geniv,
160 is_rfc3686, nonce, ctx1_iv_off, true,
161 ctrlpriv->era);
163 if (!alg->caam.geniv)
164 goto skip_givenc;
166 /* aead_givencrypt shared descriptor */
167 if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
168 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
169 DESC_JOB_IO_LEN, data_len, &inl_mask,
170 ARRAY_SIZE(data_len)) < 0)
171 return -EINVAL;
173 if (inl_mask & 1)
174 ctx->adata.key_virt = ctx->key;
175 else
176 ctx->adata.key_dma = ctx->key_dma;
178 if (inl_mask & 2)
179 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
180 else
181 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
183 ctx->adata.key_inline = !!(inl_mask & 1);
184 ctx->cdata.key_inline = !!(inl_mask & 2);
186 cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
187 ivsize, ctx->authsize, is_rfc3686, nonce,
188 ctx1_iv_off, true, ctrlpriv->era);
190 skip_givenc:
191 return 0;
194 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
196 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
198 ctx->authsize = authsize;
199 aead_set_sh_desc(authenc);
201 return 0;
204 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
205 unsigned int keylen)
207 struct caam_ctx *ctx = crypto_aead_ctx(aead);
208 struct device *jrdev = ctx->jrdev;
209 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
210 struct crypto_authenc_keys keys;
211 int ret = 0;
213 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
214 goto badkey;
216 #ifdef DEBUG
217 dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
218 keys.authkeylen + keys.enckeylen, keys.enckeylen,
219 keys.authkeylen);
220 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
221 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
222 #endif
225 * If DKP is supported, use it in the shared descriptor to generate
226 * the split key.
228 if (ctrlpriv->era >= 6) {
229 ctx->adata.keylen = keys.authkeylen;
230 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
231 OP_ALG_ALGSEL_MASK);
233 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
234 goto badkey;
236 memcpy(ctx->key, keys.authkey, keys.authkeylen);
237 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
238 keys.enckeylen);
239 dma_sync_single_for_device(jrdev, ctx->key_dma,
240 ctx->adata.keylen_pad +
241 keys.enckeylen, ctx->dir);
242 goto skip_split_key;
245 ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
246 keys.authkeylen, CAAM_MAX_KEY_SIZE -
247 keys.enckeylen);
248 if (ret)
249 goto badkey;
251 /* postpend encryption key to auth split key */
252 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
253 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
254 keys.enckeylen, ctx->dir);
255 #ifdef DEBUG
256 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
257 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
258 ctx->adata.keylen_pad + keys.enckeylen, 1);
259 #endif
261 skip_split_key:
262 ctx->cdata.keylen = keys.enckeylen;
264 ret = aead_set_sh_desc(aead);
265 if (ret)
266 goto badkey;
268 /* Now update the driver contexts with the new shared descriptor */
269 if (ctx->drv_ctx[ENCRYPT]) {
270 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
271 ctx->sh_desc_enc);
272 if (ret) {
273 dev_err(jrdev, "driver enc context update failed\n");
274 goto badkey;
278 if (ctx->drv_ctx[DECRYPT]) {
279 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
280 ctx->sh_desc_dec);
281 if (ret) {
282 dev_err(jrdev, "driver dec context update failed\n");
283 goto badkey;
287 memzero_explicit(&keys, sizeof(keys));
288 return ret;
289 badkey:
290 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
291 memzero_explicit(&keys, sizeof(keys));
292 return -EINVAL;
295 static int gcm_set_sh_desc(struct crypto_aead *aead)
297 struct caam_ctx *ctx = crypto_aead_ctx(aead);
298 unsigned int ivsize = crypto_aead_ivsize(aead);
299 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
300 ctx->cdata.keylen;
302 if (!ctx->cdata.keylen || !ctx->authsize)
303 return 0;
306 * Job Descriptor and Shared Descriptor
307 * must fit into the 64-word Descriptor h/w Buffer
309 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
310 ctx->cdata.key_inline = true;
311 ctx->cdata.key_virt = ctx->key;
312 } else {
313 ctx->cdata.key_inline = false;
314 ctx->cdata.key_dma = ctx->key_dma;
317 cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
318 ctx->authsize, true);
321 * Job Descriptor and Shared Descriptor
322 * must fit into the 64-word Descriptor h/w Buffer
324 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
325 ctx->cdata.key_inline = true;
326 ctx->cdata.key_virt = ctx->key;
327 } else {
328 ctx->cdata.key_inline = false;
329 ctx->cdata.key_dma = ctx->key_dma;
332 cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
333 ctx->authsize, true);
335 return 0;
338 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
340 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
342 ctx->authsize = authsize;
343 gcm_set_sh_desc(authenc);
345 return 0;
348 static int gcm_setkey(struct crypto_aead *aead,
349 const u8 *key, unsigned int keylen)
351 struct caam_ctx *ctx = crypto_aead_ctx(aead);
352 struct device *jrdev = ctx->jrdev;
353 int ret;
355 #ifdef DEBUG
356 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
357 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
358 #endif
360 memcpy(ctx->key, key, keylen);
361 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
362 ctx->cdata.keylen = keylen;
364 ret = gcm_set_sh_desc(aead);
365 if (ret)
366 return ret;
368 /* Now update the driver contexts with the new shared descriptor */
369 if (ctx->drv_ctx[ENCRYPT]) {
370 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
371 ctx->sh_desc_enc);
372 if (ret) {
373 dev_err(jrdev, "driver enc context update failed\n");
374 return ret;
378 if (ctx->drv_ctx[DECRYPT]) {
379 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
380 ctx->sh_desc_dec);
381 if (ret) {
382 dev_err(jrdev, "driver dec context update failed\n");
383 return ret;
387 return 0;
390 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
392 struct caam_ctx *ctx = crypto_aead_ctx(aead);
393 unsigned int ivsize = crypto_aead_ivsize(aead);
394 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
395 ctx->cdata.keylen;
397 if (!ctx->cdata.keylen || !ctx->authsize)
398 return 0;
400 ctx->cdata.key_virt = ctx->key;
403 * Job Descriptor and Shared Descriptor
404 * must fit into the 64-word Descriptor h/w Buffer
406 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
407 ctx->cdata.key_inline = true;
408 } else {
409 ctx->cdata.key_inline = false;
410 ctx->cdata.key_dma = ctx->key_dma;
413 cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
414 ctx->authsize, true);
417 * Job Descriptor and Shared Descriptor
418 * must fit into the 64-word Descriptor h/w Buffer
420 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
421 ctx->cdata.key_inline = true;
422 } else {
423 ctx->cdata.key_inline = false;
424 ctx->cdata.key_dma = ctx->key_dma;
427 cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
428 ctx->authsize, true);
430 return 0;
433 static int rfc4106_setauthsize(struct crypto_aead *authenc,
434 unsigned int authsize)
436 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
438 ctx->authsize = authsize;
439 rfc4106_set_sh_desc(authenc);
441 return 0;
444 static int rfc4106_setkey(struct crypto_aead *aead,
445 const u8 *key, unsigned int keylen)
447 struct caam_ctx *ctx = crypto_aead_ctx(aead);
448 struct device *jrdev = ctx->jrdev;
449 int ret;
451 if (keylen < 4)
452 return -EINVAL;
454 #ifdef DEBUG
455 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
456 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
457 #endif
459 memcpy(ctx->key, key, keylen);
461 * The last four bytes of the key material are used as the salt value
462 * in the nonce. Update the AES key length.
464 ctx->cdata.keylen = keylen - 4;
465 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
466 ctx->dir);
468 ret = rfc4106_set_sh_desc(aead);
469 if (ret)
470 return ret;
472 /* Now update the driver contexts with the new shared descriptor */
473 if (ctx->drv_ctx[ENCRYPT]) {
474 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
475 ctx->sh_desc_enc);
476 if (ret) {
477 dev_err(jrdev, "driver enc context update failed\n");
478 return ret;
482 if (ctx->drv_ctx[DECRYPT]) {
483 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
484 ctx->sh_desc_dec);
485 if (ret) {
486 dev_err(jrdev, "driver dec context update failed\n");
487 return ret;
491 return 0;
494 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
496 struct caam_ctx *ctx = crypto_aead_ctx(aead);
497 unsigned int ivsize = crypto_aead_ivsize(aead);
498 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
499 ctx->cdata.keylen;
501 if (!ctx->cdata.keylen || !ctx->authsize)
502 return 0;
504 ctx->cdata.key_virt = ctx->key;
507 * Job Descriptor and Shared Descriptor
508 * must fit into the 64-word Descriptor h/w Buffer
510 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
511 ctx->cdata.key_inline = true;
512 } else {
513 ctx->cdata.key_inline = false;
514 ctx->cdata.key_dma = ctx->key_dma;
517 cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
518 ctx->authsize, true);
521 * Job Descriptor and Shared Descriptor
522 * must fit into the 64-word Descriptor h/w Buffer
524 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
525 ctx->cdata.key_inline = true;
526 } else {
527 ctx->cdata.key_inline = false;
528 ctx->cdata.key_dma = ctx->key_dma;
531 cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
532 ctx->authsize, true);
534 return 0;
537 static int rfc4543_setauthsize(struct crypto_aead *authenc,
538 unsigned int authsize)
540 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
542 ctx->authsize = authsize;
543 rfc4543_set_sh_desc(authenc);
545 return 0;
548 static int rfc4543_setkey(struct crypto_aead *aead,
549 const u8 *key, unsigned int keylen)
551 struct caam_ctx *ctx = crypto_aead_ctx(aead);
552 struct device *jrdev = ctx->jrdev;
553 int ret;
555 if (keylen < 4)
556 return -EINVAL;
558 #ifdef DEBUG
559 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
560 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
561 #endif
563 memcpy(ctx->key, key, keylen);
565 * The last four bytes of the key material are used as the salt value
566 * in the nonce. Update the AES key length.
568 ctx->cdata.keylen = keylen - 4;
569 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
570 ctx->dir);
572 ret = rfc4543_set_sh_desc(aead);
573 if (ret)
574 return ret;
576 /* Now update the driver contexts with the new shared descriptor */
577 if (ctx->drv_ctx[ENCRYPT]) {
578 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
579 ctx->sh_desc_enc);
580 if (ret) {
581 dev_err(jrdev, "driver enc context update failed\n");
582 return ret;
586 if (ctx->drv_ctx[DECRYPT]) {
587 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
588 ctx->sh_desc_dec);
589 if (ret) {
590 dev_err(jrdev, "driver dec context update failed\n");
591 return ret;
595 return 0;
598 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
599 unsigned int keylen)
601 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
602 struct caam_skcipher_alg *alg =
603 container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
604 skcipher);
605 struct device *jrdev = ctx->jrdev;
606 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
607 u32 ctx1_iv_off = 0;
608 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
609 OP_ALG_AAI_CTR_MOD128);
610 const bool is_rfc3686 = alg->caam.rfc3686;
611 int ret = 0;
613 #ifdef DEBUG
614 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
615 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
616 #endif
618 * AES-CTR needs to load IV in CONTEXT1 reg
619 * at an offset of 128bits (16bytes)
620 * CONTEXT1[255:128] = IV
622 if (ctr_mode)
623 ctx1_iv_off = 16;
626 * RFC3686 specific:
627 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
628 * | *key = {KEY, NONCE}
630 if (is_rfc3686) {
631 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
632 keylen -= CTR_RFC3686_NONCE_SIZE;
635 ctx->cdata.keylen = keylen;
636 ctx->cdata.key_virt = key;
637 ctx->cdata.key_inline = true;
639 /* skcipher encrypt, decrypt shared descriptors */
640 cnstr_shdsc_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
641 is_rfc3686, ctx1_iv_off);
642 cnstr_shdsc_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
643 is_rfc3686, ctx1_iv_off);
645 /* Now update the driver contexts with the new shared descriptor */
646 if (ctx->drv_ctx[ENCRYPT]) {
647 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
648 ctx->sh_desc_enc);
649 if (ret) {
650 dev_err(jrdev, "driver enc context update failed\n");
651 goto badkey;
655 if (ctx->drv_ctx[DECRYPT]) {
656 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
657 ctx->sh_desc_dec);
658 if (ret) {
659 dev_err(jrdev, "driver dec context update failed\n");
660 goto badkey;
664 return ret;
665 badkey:
666 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
667 return -EINVAL;
670 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
671 unsigned int keylen)
673 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
674 struct device *jrdev = ctx->jrdev;
675 int ret = 0;
677 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
678 dev_err(jrdev, "key size mismatch\n");
679 goto badkey;
682 ctx->cdata.keylen = keylen;
683 ctx->cdata.key_virt = key;
684 ctx->cdata.key_inline = true;
686 /* xts skcipher encrypt, decrypt shared descriptors */
687 cnstr_shdsc_xts_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
688 cnstr_shdsc_xts_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
690 /* Now update the driver contexts with the new shared descriptor */
691 if (ctx->drv_ctx[ENCRYPT]) {
692 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
693 ctx->sh_desc_enc);
694 if (ret) {
695 dev_err(jrdev, "driver enc context update failed\n");
696 goto badkey;
700 if (ctx->drv_ctx[DECRYPT]) {
701 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
702 ctx->sh_desc_dec);
703 if (ret) {
704 dev_err(jrdev, "driver dec context update failed\n");
705 goto badkey;
709 return ret;
710 badkey:
711 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
712 return -EINVAL;
716 * aead_edesc - s/w-extended aead descriptor
717 * @src_nents: number of segments in input scatterlist
718 * @dst_nents: number of segments in output scatterlist
719 * @iv_dma: dma address of iv for checking continuity and link table
720 * @qm_sg_bytes: length of dma mapped h/w link table
721 * @qm_sg_dma: bus physical mapped address of h/w link table
722 * @assoclen: associated data length, in CAAM endianness
723 * @assoclen_dma: bus physical mapped address of req->assoclen
724 * @drv_req: driver-specific request structure
725 * @sgt: the h/w link table, followed by IV
727 struct aead_edesc {
728 int src_nents;
729 int dst_nents;
730 dma_addr_t iv_dma;
731 int qm_sg_bytes;
732 dma_addr_t qm_sg_dma;
733 unsigned int assoclen;
734 dma_addr_t assoclen_dma;
735 struct caam_drv_req drv_req;
736 struct qm_sg_entry sgt[0];
740 * skcipher_edesc - s/w-extended skcipher descriptor
741 * @src_nents: number of segments in input scatterlist
742 * @dst_nents: number of segments in output scatterlist
743 * @iv_dma: dma address of iv for checking continuity and link table
744 * @qm_sg_bytes: length of dma mapped h/w link table
745 * @qm_sg_dma: bus physical mapped address of h/w link table
746 * @drv_req: driver-specific request structure
747 * @sgt: the h/w link table, followed by IV
749 struct skcipher_edesc {
750 int src_nents;
751 int dst_nents;
752 dma_addr_t iv_dma;
753 int qm_sg_bytes;
754 dma_addr_t qm_sg_dma;
755 struct caam_drv_req drv_req;
756 struct qm_sg_entry sgt[0];
759 static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
760 enum optype type)
763 * This function is called on the fast path with values of 'type'
764 * known at compile time. Invalid arguments are not expected and
765 * thus no checks are made.
767 struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
768 u32 *desc;
770 if (unlikely(!drv_ctx)) {
771 spin_lock(&ctx->lock);
773 /* Read again to check if some other core init drv_ctx */
774 drv_ctx = ctx->drv_ctx[type];
775 if (!drv_ctx) {
776 int cpu;
778 if (type == ENCRYPT)
779 desc = ctx->sh_desc_enc;
780 else /* (type == DECRYPT) */
781 desc = ctx->sh_desc_dec;
783 cpu = smp_processor_id();
784 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
785 if (!IS_ERR_OR_NULL(drv_ctx))
786 drv_ctx->op_type = type;
788 ctx->drv_ctx[type] = drv_ctx;
791 spin_unlock(&ctx->lock);
794 return drv_ctx;
797 static void caam_unmap(struct device *dev, struct scatterlist *src,
798 struct scatterlist *dst, int src_nents,
799 int dst_nents, dma_addr_t iv_dma, int ivsize,
800 dma_addr_t qm_sg_dma, int qm_sg_bytes)
802 if (dst != src) {
803 if (src_nents)
804 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
805 if (dst_nents)
806 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
807 } else {
808 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
811 if (iv_dma)
812 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
813 if (qm_sg_bytes)
814 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
817 static void aead_unmap(struct device *dev,
818 struct aead_edesc *edesc,
819 struct aead_request *req)
821 struct crypto_aead *aead = crypto_aead_reqtfm(req);
822 int ivsize = crypto_aead_ivsize(aead);
824 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
825 edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
826 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
829 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
830 struct skcipher_request *req)
832 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
833 int ivsize = crypto_skcipher_ivsize(skcipher);
835 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
836 edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
839 static void aead_done(struct caam_drv_req *drv_req, u32 status)
841 struct device *qidev;
842 struct aead_edesc *edesc;
843 struct aead_request *aead_req = drv_req->app_ctx;
844 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
845 struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
846 int ecode = 0;
848 qidev = caam_ctx->qidev;
850 if (unlikely(status)) {
851 u32 ssrc = status & JRSTA_SSRC_MASK;
852 u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
854 caam_jr_strstatus(qidev, status);
856 * verify hw auth check passed else return -EBADMSG
858 if (ssrc == JRSTA_SSRC_CCB_ERROR &&
859 err_id == JRSTA_CCBERR_ERRID_ICVCHK)
860 ecode = -EBADMSG;
861 else
862 ecode = -EIO;
865 edesc = container_of(drv_req, typeof(*edesc), drv_req);
866 aead_unmap(qidev, edesc, aead_req);
868 aead_request_complete(aead_req, ecode);
869 qi_cache_free(edesc);
873 * allocate and map the aead extended descriptor
875 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
876 bool encrypt)
878 struct crypto_aead *aead = crypto_aead_reqtfm(req);
879 struct caam_ctx *ctx = crypto_aead_ctx(aead);
880 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
881 typeof(*alg), aead);
882 struct device *qidev = ctx->qidev;
883 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
884 GFP_KERNEL : GFP_ATOMIC;
885 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
886 struct aead_edesc *edesc;
887 dma_addr_t qm_sg_dma, iv_dma = 0;
888 int ivsize = 0;
889 unsigned int authsize = ctx->authsize;
890 int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
891 int in_len, out_len;
892 struct qm_sg_entry *sg_table, *fd_sgt;
893 struct caam_drv_ctx *drv_ctx;
895 drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
896 if (IS_ERR_OR_NULL(drv_ctx))
897 return (struct aead_edesc *)drv_ctx;
899 /* allocate space for base edesc and hw desc commands, link tables */
900 edesc = qi_cache_alloc(GFP_DMA | flags);
901 if (unlikely(!edesc)) {
902 dev_err(qidev, "could not allocate extended descriptor\n");
903 return ERR_PTR(-ENOMEM);
906 if (likely(req->src == req->dst)) {
907 src_nents = sg_nents_for_len(req->src, req->assoclen +
908 req->cryptlen +
909 (encrypt ? authsize : 0));
910 if (unlikely(src_nents < 0)) {
911 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
912 req->assoclen + req->cryptlen +
913 (encrypt ? authsize : 0));
914 qi_cache_free(edesc);
915 return ERR_PTR(src_nents);
918 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
919 DMA_BIDIRECTIONAL);
920 if (unlikely(!mapped_src_nents)) {
921 dev_err(qidev, "unable to map source\n");
922 qi_cache_free(edesc);
923 return ERR_PTR(-ENOMEM);
925 } else {
926 src_nents = sg_nents_for_len(req->src, req->assoclen +
927 req->cryptlen);
928 if (unlikely(src_nents < 0)) {
929 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
930 req->assoclen + req->cryptlen);
931 qi_cache_free(edesc);
932 return ERR_PTR(src_nents);
935 dst_nents = sg_nents_for_len(req->dst, req->assoclen +
936 req->cryptlen +
937 (encrypt ? authsize :
938 (-authsize)));
939 if (unlikely(dst_nents < 0)) {
940 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
941 req->assoclen + req->cryptlen +
942 (encrypt ? authsize : (-authsize)));
943 qi_cache_free(edesc);
944 return ERR_PTR(dst_nents);
947 if (src_nents) {
948 mapped_src_nents = dma_map_sg(qidev, req->src,
949 src_nents, DMA_TO_DEVICE);
950 if (unlikely(!mapped_src_nents)) {
951 dev_err(qidev, "unable to map source\n");
952 qi_cache_free(edesc);
953 return ERR_PTR(-ENOMEM);
955 } else {
956 mapped_src_nents = 0;
959 if (dst_nents) {
960 mapped_dst_nents = dma_map_sg(qidev, req->dst,
961 dst_nents,
962 DMA_FROM_DEVICE);
963 if (unlikely(!mapped_dst_nents)) {
964 dev_err(qidev, "unable to map destination\n");
965 dma_unmap_sg(qidev, req->src, src_nents,
966 DMA_TO_DEVICE);
967 qi_cache_free(edesc);
968 return ERR_PTR(-ENOMEM);
970 } else {
971 mapped_dst_nents = 0;
975 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
976 ivsize = crypto_aead_ivsize(aead);
979 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
980 * Input is not contiguous.
982 qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
983 (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
984 sg_table = &edesc->sgt[0];
985 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
986 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
987 CAAM_QI_MEMCACHE_SIZE)) {
988 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
989 qm_sg_ents, ivsize);
990 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
991 0, 0, 0);
992 qi_cache_free(edesc);
993 return ERR_PTR(-ENOMEM);
996 if (ivsize) {
997 u8 *iv = (u8 *)(sg_table + qm_sg_ents);
999 /* Make sure IV is located in a DMAable area */
1000 memcpy(iv, req->iv, ivsize);
1002 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1003 if (dma_mapping_error(qidev, iv_dma)) {
1004 dev_err(qidev, "unable to map IV\n");
1005 caam_unmap(qidev, req->src, req->dst, src_nents,
1006 dst_nents, 0, 0, 0, 0);
1007 qi_cache_free(edesc);
1008 return ERR_PTR(-ENOMEM);
1012 edesc->src_nents = src_nents;
1013 edesc->dst_nents = dst_nents;
1014 edesc->iv_dma = iv_dma;
1015 edesc->drv_req.app_ctx = req;
1016 edesc->drv_req.cbk = aead_done;
1017 edesc->drv_req.drv_ctx = drv_ctx;
1019 edesc->assoclen = cpu_to_caam32(req->assoclen);
1020 edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
1021 DMA_TO_DEVICE);
1022 if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
1023 dev_err(qidev, "unable to map assoclen\n");
1024 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1025 iv_dma, ivsize, 0, 0);
1026 qi_cache_free(edesc);
1027 return ERR_PTR(-ENOMEM);
1030 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
1031 qm_sg_index++;
1032 if (ivsize) {
1033 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
1034 qm_sg_index++;
1036 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
1037 qm_sg_index += mapped_src_nents;
1039 if (mapped_dst_nents > 1)
1040 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1041 qm_sg_index, 0);
1043 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
1044 if (dma_mapping_error(qidev, qm_sg_dma)) {
1045 dev_err(qidev, "unable to map S/G table\n");
1046 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1047 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1048 iv_dma, ivsize, 0, 0);
1049 qi_cache_free(edesc);
1050 return ERR_PTR(-ENOMEM);
1053 edesc->qm_sg_dma = qm_sg_dma;
1054 edesc->qm_sg_bytes = qm_sg_bytes;
1056 out_len = req->assoclen + req->cryptlen +
1057 (encrypt ? ctx->authsize : (-ctx->authsize));
1058 in_len = 4 + ivsize + req->assoclen + req->cryptlen;
1060 fd_sgt = &edesc->drv_req.fd_sgt[0];
1061 dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
1063 if (req->dst == req->src) {
1064 if (mapped_src_nents == 1)
1065 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
1066 out_len, 0);
1067 else
1068 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
1069 (1 + !!ivsize) * sizeof(*sg_table),
1070 out_len, 0);
1071 } else if (mapped_dst_nents == 1) {
1072 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
1074 } else {
1075 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
1076 qm_sg_index, out_len, 0);
1079 return edesc;
1082 static inline int aead_crypt(struct aead_request *req, bool encrypt)
1084 struct aead_edesc *edesc;
1085 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1086 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1087 int ret;
1089 if (unlikely(caam_congested))
1090 return -EAGAIN;
1092 /* allocate extended descriptor */
1093 edesc = aead_edesc_alloc(req, encrypt);
1094 if (IS_ERR_OR_NULL(edesc))
1095 return PTR_ERR(edesc);
1097 /* Create and submit job descriptor */
1098 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1099 if (!ret) {
1100 ret = -EINPROGRESS;
1101 } else {
1102 aead_unmap(ctx->qidev, edesc, req);
1103 qi_cache_free(edesc);
1106 return ret;
1109 static int aead_encrypt(struct aead_request *req)
1111 return aead_crypt(req, true);
1114 static int aead_decrypt(struct aead_request *req)
1116 return aead_crypt(req, false);
1119 static int ipsec_gcm_encrypt(struct aead_request *req)
1121 if (req->assoclen < 8)
1122 return -EINVAL;
1124 return aead_crypt(req, true);
1127 static int ipsec_gcm_decrypt(struct aead_request *req)
1129 if (req->assoclen < 8)
1130 return -EINVAL;
1132 return aead_crypt(req, false);
1135 static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
1137 struct skcipher_edesc *edesc;
1138 struct skcipher_request *req = drv_req->app_ctx;
1139 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1140 struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher);
1141 struct device *qidev = caam_ctx->qidev;
1142 int ivsize = crypto_skcipher_ivsize(skcipher);
1144 #ifdef DEBUG
1145 dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
1146 #endif
1148 edesc = container_of(drv_req, typeof(*edesc), drv_req);
1150 if (status)
1151 caam_jr_strstatus(qidev, status);
1153 #ifdef DEBUG
1154 print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
1155 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1156 edesc->src_nents > 1 ? 100 : ivsize, 1);
1157 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
1158 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1159 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1160 #endif
1162 skcipher_unmap(qidev, edesc, req);
1165 * The crypto API expects us to set the IV (req->iv) to the last
1166 * ciphertext block. This is used e.g. by the CTS mode.
1168 if (edesc->drv_req.drv_ctx->op_type == ENCRYPT)
1169 scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
1170 ivsize, ivsize, 0);
1172 qi_cache_free(edesc);
1173 skcipher_request_complete(req, status);
1176 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1177 bool encrypt)
1179 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1180 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1181 struct device *qidev = ctx->qidev;
1182 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1183 GFP_KERNEL : GFP_ATOMIC;
1184 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1185 struct skcipher_edesc *edesc;
1186 dma_addr_t iv_dma;
1187 u8 *iv;
1188 int ivsize = crypto_skcipher_ivsize(skcipher);
1189 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1190 struct qm_sg_entry *sg_table, *fd_sgt;
1191 struct caam_drv_ctx *drv_ctx;
1193 drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
1194 if (IS_ERR_OR_NULL(drv_ctx))
1195 return (struct skcipher_edesc *)drv_ctx;
1197 src_nents = sg_nents_for_len(req->src, req->cryptlen);
1198 if (unlikely(src_nents < 0)) {
1199 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1200 req->cryptlen);
1201 return ERR_PTR(src_nents);
1204 if (unlikely(req->src != req->dst)) {
1205 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1206 if (unlikely(dst_nents < 0)) {
1207 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1208 req->cryptlen);
1209 return ERR_PTR(dst_nents);
1212 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1213 DMA_TO_DEVICE);
1214 if (unlikely(!mapped_src_nents)) {
1215 dev_err(qidev, "unable to map source\n");
1216 return ERR_PTR(-ENOMEM);
1219 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
1220 DMA_FROM_DEVICE);
1221 if (unlikely(!mapped_dst_nents)) {
1222 dev_err(qidev, "unable to map destination\n");
1223 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
1224 return ERR_PTR(-ENOMEM);
1226 } else {
1227 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1228 DMA_BIDIRECTIONAL);
1229 if (unlikely(!mapped_src_nents)) {
1230 dev_err(qidev, "unable to map source\n");
1231 return ERR_PTR(-ENOMEM);
1235 qm_sg_ents = 1 + mapped_src_nents;
1236 dst_sg_idx = qm_sg_ents;
1238 qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1239 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1240 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1241 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1242 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1243 qm_sg_ents, ivsize);
1244 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1245 0, 0, 0);
1246 return ERR_PTR(-ENOMEM);
1249 /* allocate space for base edesc, link tables and IV */
1250 edesc = qi_cache_alloc(GFP_DMA | flags);
1251 if (unlikely(!edesc)) {
1252 dev_err(qidev, "could not allocate extended descriptor\n");
1253 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1254 0, 0, 0);
1255 return ERR_PTR(-ENOMEM);
1258 /* Make sure IV is located in a DMAable area */
1259 sg_table = &edesc->sgt[0];
1260 iv = (u8 *)(sg_table + qm_sg_ents);
1261 memcpy(iv, req->iv, ivsize);
1263 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1264 if (dma_mapping_error(qidev, iv_dma)) {
1265 dev_err(qidev, "unable to map IV\n");
1266 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1267 0, 0, 0);
1268 qi_cache_free(edesc);
1269 return ERR_PTR(-ENOMEM);
1272 edesc->src_nents = src_nents;
1273 edesc->dst_nents = dst_nents;
1274 edesc->iv_dma = iv_dma;
1275 edesc->qm_sg_bytes = qm_sg_bytes;
1276 edesc->drv_req.app_ctx = req;
1277 edesc->drv_req.cbk = skcipher_done;
1278 edesc->drv_req.drv_ctx = drv_ctx;
1280 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1281 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
1283 if (mapped_dst_nents > 1)
1284 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1285 dst_sg_idx, 0);
1287 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1288 DMA_TO_DEVICE);
1289 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1290 dev_err(qidev, "unable to map S/G table\n");
1291 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1292 iv_dma, ivsize, 0, 0);
1293 qi_cache_free(edesc);
1294 return ERR_PTR(-ENOMEM);
1297 fd_sgt = &edesc->drv_req.fd_sgt[0];
1299 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
1300 ivsize + req->cryptlen, 0);
1302 if (req->src == req->dst) {
1303 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
1304 sizeof(*sg_table), req->cryptlen, 0);
1305 } else if (mapped_dst_nents > 1) {
1306 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1307 sizeof(*sg_table), req->cryptlen, 0);
1308 } else {
1309 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
1310 req->cryptlen, 0);
1313 return edesc;
1316 static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
1318 struct skcipher_edesc *edesc;
1319 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1320 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1321 int ivsize = crypto_skcipher_ivsize(skcipher);
1322 int ret;
1324 if (unlikely(caam_congested))
1325 return -EAGAIN;
1327 /* allocate extended descriptor */
1328 edesc = skcipher_edesc_alloc(req, encrypt);
1329 if (IS_ERR(edesc))
1330 return PTR_ERR(edesc);
1333 * The crypto API expects us to set the IV (req->iv) to the last
1334 * ciphertext block.
1336 if (!encrypt)
1337 scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
1338 ivsize, ivsize, 0);
1340 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1341 if (!ret) {
1342 ret = -EINPROGRESS;
1343 } else {
1344 skcipher_unmap(ctx->qidev, edesc, req);
1345 qi_cache_free(edesc);
1348 return ret;
1351 static int skcipher_encrypt(struct skcipher_request *req)
1353 return skcipher_crypt(req, true);
1356 static int skcipher_decrypt(struct skcipher_request *req)
1358 return skcipher_crypt(req, false);
1361 static struct caam_skcipher_alg driver_algs[] = {
1363 .skcipher = {
1364 .base = {
1365 .cra_name = "cbc(aes)",
1366 .cra_driver_name = "cbc-aes-caam-qi",
1367 .cra_blocksize = AES_BLOCK_SIZE,
1369 .setkey = skcipher_setkey,
1370 .encrypt = skcipher_encrypt,
1371 .decrypt = skcipher_decrypt,
1372 .min_keysize = AES_MIN_KEY_SIZE,
1373 .max_keysize = AES_MAX_KEY_SIZE,
1374 .ivsize = AES_BLOCK_SIZE,
1376 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1379 .skcipher = {
1380 .base = {
1381 .cra_name = "cbc(des3_ede)",
1382 .cra_driver_name = "cbc-3des-caam-qi",
1383 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1385 .setkey = skcipher_setkey,
1386 .encrypt = skcipher_encrypt,
1387 .decrypt = skcipher_decrypt,
1388 .min_keysize = DES3_EDE_KEY_SIZE,
1389 .max_keysize = DES3_EDE_KEY_SIZE,
1390 .ivsize = DES3_EDE_BLOCK_SIZE,
1392 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1395 .skcipher = {
1396 .base = {
1397 .cra_name = "cbc(des)",
1398 .cra_driver_name = "cbc-des-caam-qi",
1399 .cra_blocksize = DES_BLOCK_SIZE,
1401 .setkey = skcipher_setkey,
1402 .encrypt = skcipher_encrypt,
1403 .decrypt = skcipher_decrypt,
1404 .min_keysize = DES_KEY_SIZE,
1405 .max_keysize = DES_KEY_SIZE,
1406 .ivsize = DES_BLOCK_SIZE,
1408 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1411 .skcipher = {
1412 .base = {
1413 .cra_name = "ctr(aes)",
1414 .cra_driver_name = "ctr-aes-caam-qi",
1415 .cra_blocksize = 1,
1417 .setkey = skcipher_setkey,
1418 .encrypt = skcipher_encrypt,
1419 .decrypt = skcipher_decrypt,
1420 .min_keysize = AES_MIN_KEY_SIZE,
1421 .max_keysize = AES_MAX_KEY_SIZE,
1422 .ivsize = AES_BLOCK_SIZE,
1423 .chunksize = AES_BLOCK_SIZE,
1425 .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1426 OP_ALG_AAI_CTR_MOD128,
1429 .skcipher = {
1430 .base = {
1431 .cra_name = "rfc3686(ctr(aes))",
1432 .cra_driver_name = "rfc3686-ctr-aes-caam-qi",
1433 .cra_blocksize = 1,
1435 .setkey = skcipher_setkey,
1436 .encrypt = skcipher_encrypt,
1437 .decrypt = skcipher_decrypt,
1438 .min_keysize = AES_MIN_KEY_SIZE +
1439 CTR_RFC3686_NONCE_SIZE,
1440 .max_keysize = AES_MAX_KEY_SIZE +
1441 CTR_RFC3686_NONCE_SIZE,
1442 .ivsize = CTR_RFC3686_IV_SIZE,
1443 .chunksize = AES_BLOCK_SIZE,
1445 .caam = {
1446 .class1_alg_type = OP_ALG_ALGSEL_AES |
1447 OP_ALG_AAI_CTR_MOD128,
1448 .rfc3686 = true,
1452 .skcipher = {
1453 .base = {
1454 .cra_name = "xts(aes)",
1455 .cra_driver_name = "xts-aes-caam-qi",
1456 .cra_blocksize = AES_BLOCK_SIZE,
1458 .setkey = xts_skcipher_setkey,
1459 .encrypt = skcipher_encrypt,
1460 .decrypt = skcipher_decrypt,
1461 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1462 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1463 .ivsize = AES_BLOCK_SIZE,
1465 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1469 static struct caam_aead_alg driver_aeads[] = {
1471 .aead = {
1472 .base = {
1473 .cra_name = "rfc4106(gcm(aes))",
1474 .cra_driver_name = "rfc4106-gcm-aes-caam-qi",
1475 .cra_blocksize = 1,
1477 .setkey = rfc4106_setkey,
1478 .setauthsize = rfc4106_setauthsize,
1479 .encrypt = ipsec_gcm_encrypt,
1480 .decrypt = ipsec_gcm_decrypt,
1481 .ivsize = 8,
1482 .maxauthsize = AES_BLOCK_SIZE,
1484 .caam = {
1485 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1489 .aead = {
1490 .base = {
1491 .cra_name = "rfc4543(gcm(aes))",
1492 .cra_driver_name = "rfc4543-gcm-aes-caam-qi",
1493 .cra_blocksize = 1,
1495 .setkey = rfc4543_setkey,
1496 .setauthsize = rfc4543_setauthsize,
1497 .encrypt = ipsec_gcm_encrypt,
1498 .decrypt = ipsec_gcm_decrypt,
1499 .ivsize = 8,
1500 .maxauthsize = AES_BLOCK_SIZE,
1502 .caam = {
1503 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1506 /* Galois Counter Mode */
1508 .aead = {
1509 .base = {
1510 .cra_name = "gcm(aes)",
1511 .cra_driver_name = "gcm-aes-caam-qi",
1512 .cra_blocksize = 1,
1514 .setkey = gcm_setkey,
1515 .setauthsize = gcm_setauthsize,
1516 .encrypt = aead_encrypt,
1517 .decrypt = aead_decrypt,
1518 .ivsize = 12,
1519 .maxauthsize = AES_BLOCK_SIZE,
1521 .caam = {
1522 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1525 /* single-pass ipsec_esp descriptor */
1527 .aead = {
1528 .base = {
1529 .cra_name = "authenc(hmac(md5),cbc(aes))",
1530 .cra_driver_name = "authenc-hmac-md5-"
1531 "cbc-aes-caam-qi",
1532 .cra_blocksize = AES_BLOCK_SIZE,
1534 .setkey = aead_setkey,
1535 .setauthsize = aead_setauthsize,
1536 .encrypt = aead_encrypt,
1537 .decrypt = aead_decrypt,
1538 .ivsize = AES_BLOCK_SIZE,
1539 .maxauthsize = MD5_DIGEST_SIZE,
1541 .caam = {
1542 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1543 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1544 OP_ALG_AAI_HMAC_PRECOMP,
1548 .aead = {
1549 .base = {
1550 .cra_name = "echainiv(authenc(hmac(md5),"
1551 "cbc(aes)))",
1552 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1553 "cbc-aes-caam-qi",
1554 .cra_blocksize = AES_BLOCK_SIZE,
1556 .setkey = aead_setkey,
1557 .setauthsize = aead_setauthsize,
1558 .encrypt = aead_encrypt,
1559 .decrypt = aead_decrypt,
1560 .ivsize = AES_BLOCK_SIZE,
1561 .maxauthsize = MD5_DIGEST_SIZE,
1563 .caam = {
1564 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1565 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1566 OP_ALG_AAI_HMAC_PRECOMP,
1567 .geniv = true,
1571 .aead = {
1572 .base = {
1573 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1574 .cra_driver_name = "authenc-hmac-sha1-"
1575 "cbc-aes-caam-qi",
1576 .cra_blocksize = AES_BLOCK_SIZE,
1578 .setkey = aead_setkey,
1579 .setauthsize = aead_setauthsize,
1580 .encrypt = aead_encrypt,
1581 .decrypt = aead_decrypt,
1582 .ivsize = AES_BLOCK_SIZE,
1583 .maxauthsize = SHA1_DIGEST_SIZE,
1585 .caam = {
1586 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1587 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1588 OP_ALG_AAI_HMAC_PRECOMP,
1592 .aead = {
1593 .base = {
1594 .cra_name = "echainiv(authenc(hmac(sha1),"
1595 "cbc(aes)))",
1596 .cra_driver_name = "echainiv-authenc-"
1597 "hmac-sha1-cbc-aes-caam-qi",
1598 .cra_blocksize = AES_BLOCK_SIZE,
1600 .setkey = aead_setkey,
1601 .setauthsize = aead_setauthsize,
1602 .encrypt = aead_encrypt,
1603 .decrypt = aead_decrypt,
1604 .ivsize = AES_BLOCK_SIZE,
1605 .maxauthsize = SHA1_DIGEST_SIZE,
1607 .caam = {
1608 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1609 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1610 OP_ALG_AAI_HMAC_PRECOMP,
1611 .geniv = true,
1615 .aead = {
1616 .base = {
1617 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1618 .cra_driver_name = "authenc-hmac-sha224-"
1619 "cbc-aes-caam-qi",
1620 .cra_blocksize = AES_BLOCK_SIZE,
1622 .setkey = aead_setkey,
1623 .setauthsize = aead_setauthsize,
1624 .encrypt = aead_encrypt,
1625 .decrypt = aead_decrypt,
1626 .ivsize = AES_BLOCK_SIZE,
1627 .maxauthsize = SHA224_DIGEST_SIZE,
1629 .caam = {
1630 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1631 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1632 OP_ALG_AAI_HMAC_PRECOMP,
1636 .aead = {
1637 .base = {
1638 .cra_name = "echainiv(authenc(hmac(sha224),"
1639 "cbc(aes)))",
1640 .cra_driver_name = "echainiv-authenc-"
1641 "hmac-sha224-cbc-aes-caam-qi",
1642 .cra_blocksize = AES_BLOCK_SIZE,
1644 .setkey = aead_setkey,
1645 .setauthsize = aead_setauthsize,
1646 .encrypt = aead_encrypt,
1647 .decrypt = aead_decrypt,
1648 .ivsize = AES_BLOCK_SIZE,
1649 .maxauthsize = SHA224_DIGEST_SIZE,
1651 .caam = {
1652 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1653 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1654 OP_ALG_AAI_HMAC_PRECOMP,
1655 .geniv = true,
1659 .aead = {
1660 .base = {
1661 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1662 .cra_driver_name = "authenc-hmac-sha256-"
1663 "cbc-aes-caam-qi",
1664 .cra_blocksize = AES_BLOCK_SIZE,
1666 .setkey = aead_setkey,
1667 .setauthsize = aead_setauthsize,
1668 .encrypt = aead_encrypt,
1669 .decrypt = aead_decrypt,
1670 .ivsize = AES_BLOCK_SIZE,
1671 .maxauthsize = SHA256_DIGEST_SIZE,
1673 .caam = {
1674 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1675 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1676 OP_ALG_AAI_HMAC_PRECOMP,
1680 .aead = {
1681 .base = {
1682 .cra_name = "echainiv(authenc(hmac(sha256),"
1683 "cbc(aes)))",
1684 .cra_driver_name = "echainiv-authenc-"
1685 "hmac-sha256-cbc-aes-"
1686 "caam-qi",
1687 .cra_blocksize = AES_BLOCK_SIZE,
1689 .setkey = aead_setkey,
1690 .setauthsize = aead_setauthsize,
1691 .encrypt = aead_encrypt,
1692 .decrypt = aead_decrypt,
1693 .ivsize = AES_BLOCK_SIZE,
1694 .maxauthsize = SHA256_DIGEST_SIZE,
1696 .caam = {
1697 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1698 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1699 OP_ALG_AAI_HMAC_PRECOMP,
1700 .geniv = true,
1704 .aead = {
1705 .base = {
1706 .cra_name = "authenc(hmac(sha384),cbc(aes))",
1707 .cra_driver_name = "authenc-hmac-sha384-"
1708 "cbc-aes-caam-qi",
1709 .cra_blocksize = AES_BLOCK_SIZE,
1711 .setkey = aead_setkey,
1712 .setauthsize = aead_setauthsize,
1713 .encrypt = aead_encrypt,
1714 .decrypt = aead_decrypt,
1715 .ivsize = AES_BLOCK_SIZE,
1716 .maxauthsize = SHA384_DIGEST_SIZE,
1718 .caam = {
1719 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1720 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1721 OP_ALG_AAI_HMAC_PRECOMP,
1725 .aead = {
1726 .base = {
1727 .cra_name = "echainiv(authenc(hmac(sha384),"
1728 "cbc(aes)))",
1729 .cra_driver_name = "echainiv-authenc-"
1730 "hmac-sha384-cbc-aes-"
1731 "caam-qi",
1732 .cra_blocksize = AES_BLOCK_SIZE,
1734 .setkey = aead_setkey,
1735 .setauthsize = aead_setauthsize,
1736 .encrypt = aead_encrypt,
1737 .decrypt = aead_decrypt,
1738 .ivsize = AES_BLOCK_SIZE,
1739 .maxauthsize = SHA384_DIGEST_SIZE,
1741 .caam = {
1742 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1743 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1744 OP_ALG_AAI_HMAC_PRECOMP,
1745 .geniv = true,
1749 .aead = {
1750 .base = {
1751 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1752 .cra_driver_name = "authenc-hmac-sha512-"
1753 "cbc-aes-caam-qi",
1754 .cra_blocksize = AES_BLOCK_SIZE,
1756 .setkey = aead_setkey,
1757 .setauthsize = aead_setauthsize,
1758 .encrypt = aead_encrypt,
1759 .decrypt = aead_decrypt,
1760 .ivsize = AES_BLOCK_SIZE,
1761 .maxauthsize = SHA512_DIGEST_SIZE,
1763 .caam = {
1764 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1765 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1766 OP_ALG_AAI_HMAC_PRECOMP,
1770 .aead = {
1771 .base = {
1772 .cra_name = "echainiv(authenc(hmac(sha512),"
1773 "cbc(aes)))",
1774 .cra_driver_name = "echainiv-authenc-"
1775 "hmac-sha512-cbc-aes-"
1776 "caam-qi",
1777 .cra_blocksize = AES_BLOCK_SIZE,
1779 .setkey = aead_setkey,
1780 .setauthsize = aead_setauthsize,
1781 .encrypt = aead_encrypt,
1782 .decrypt = aead_decrypt,
1783 .ivsize = AES_BLOCK_SIZE,
1784 .maxauthsize = SHA512_DIGEST_SIZE,
1786 .caam = {
1787 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1788 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1789 OP_ALG_AAI_HMAC_PRECOMP,
1790 .geniv = true,
1794 .aead = {
1795 .base = {
1796 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1797 .cra_driver_name = "authenc-hmac-md5-"
1798 "cbc-des3_ede-caam-qi",
1799 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1801 .setkey = aead_setkey,
1802 .setauthsize = aead_setauthsize,
1803 .encrypt = aead_encrypt,
1804 .decrypt = aead_decrypt,
1805 .ivsize = DES3_EDE_BLOCK_SIZE,
1806 .maxauthsize = MD5_DIGEST_SIZE,
1808 .caam = {
1809 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1810 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1811 OP_ALG_AAI_HMAC_PRECOMP,
1815 .aead = {
1816 .base = {
1817 .cra_name = "echainiv(authenc(hmac(md5),"
1818 "cbc(des3_ede)))",
1819 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1820 "cbc-des3_ede-caam-qi",
1821 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1823 .setkey = aead_setkey,
1824 .setauthsize = aead_setauthsize,
1825 .encrypt = aead_encrypt,
1826 .decrypt = aead_decrypt,
1827 .ivsize = DES3_EDE_BLOCK_SIZE,
1828 .maxauthsize = MD5_DIGEST_SIZE,
1830 .caam = {
1831 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1832 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1833 OP_ALG_AAI_HMAC_PRECOMP,
1834 .geniv = true,
1838 .aead = {
1839 .base = {
1840 .cra_name = "authenc(hmac(sha1),"
1841 "cbc(des3_ede))",
1842 .cra_driver_name = "authenc-hmac-sha1-"
1843 "cbc-des3_ede-caam-qi",
1844 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1846 .setkey = aead_setkey,
1847 .setauthsize = aead_setauthsize,
1848 .encrypt = aead_encrypt,
1849 .decrypt = aead_decrypt,
1850 .ivsize = DES3_EDE_BLOCK_SIZE,
1851 .maxauthsize = SHA1_DIGEST_SIZE,
1853 .caam = {
1854 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1855 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1856 OP_ALG_AAI_HMAC_PRECOMP,
1860 .aead = {
1861 .base = {
1862 .cra_name = "echainiv(authenc(hmac(sha1),"
1863 "cbc(des3_ede)))",
1864 .cra_driver_name = "echainiv-authenc-"
1865 "hmac-sha1-"
1866 "cbc-des3_ede-caam-qi",
1867 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1869 .setkey = aead_setkey,
1870 .setauthsize = aead_setauthsize,
1871 .encrypt = aead_encrypt,
1872 .decrypt = aead_decrypt,
1873 .ivsize = DES3_EDE_BLOCK_SIZE,
1874 .maxauthsize = SHA1_DIGEST_SIZE,
1876 .caam = {
1877 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1878 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1879 OP_ALG_AAI_HMAC_PRECOMP,
1880 .geniv = true,
1884 .aead = {
1885 .base = {
1886 .cra_name = "authenc(hmac(sha224),"
1887 "cbc(des3_ede))",
1888 .cra_driver_name = "authenc-hmac-sha224-"
1889 "cbc-des3_ede-caam-qi",
1890 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1892 .setkey = aead_setkey,
1893 .setauthsize = aead_setauthsize,
1894 .encrypt = aead_encrypt,
1895 .decrypt = aead_decrypt,
1896 .ivsize = DES3_EDE_BLOCK_SIZE,
1897 .maxauthsize = SHA224_DIGEST_SIZE,
1899 .caam = {
1900 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1901 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1902 OP_ALG_AAI_HMAC_PRECOMP,
1906 .aead = {
1907 .base = {
1908 .cra_name = "echainiv(authenc(hmac(sha224),"
1909 "cbc(des3_ede)))",
1910 .cra_driver_name = "echainiv-authenc-"
1911 "hmac-sha224-"
1912 "cbc-des3_ede-caam-qi",
1913 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1915 .setkey = aead_setkey,
1916 .setauthsize = aead_setauthsize,
1917 .encrypt = aead_encrypt,
1918 .decrypt = aead_decrypt,
1919 .ivsize = DES3_EDE_BLOCK_SIZE,
1920 .maxauthsize = SHA224_DIGEST_SIZE,
1922 .caam = {
1923 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1924 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1925 OP_ALG_AAI_HMAC_PRECOMP,
1926 .geniv = true,
1930 .aead = {
1931 .base = {
1932 .cra_name = "authenc(hmac(sha256),"
1933 "cbc(des3_ede))",
1934 .cra_driver_name = "authenc-hmac-sha256-"
1935 "cbc-des3_ede-caam-qi",
1936 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1938 .setkey = aead_setkey,
1939 .setauthsize = aead_setauthsize,
1940 .encrypt = aead_encrypt,
1941 .decrypt = aead_decrypt,
1942 .ivsize = DES3_EDE_BLOCK_SIZE,
1943 .maxauthsize = SHA256_DIGEST_SIZE,
1945 .caam = {
1946 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1947 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1948 OP_ALG_AAI_HMAC_PRECOMP,
1952 .aead = {
1953 .base = {
1954 .cra_name = "echainiv(authenc(hmac(sha256),"
1955 "cbc(des3_ede)))",
1956 .cra_driver_name = "echainiv-authenc-"
1957 "hmac-sha256-"
1958 "cbc-des3_ede-caam-qi",
1959 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1961 .setkey = aead_setkey,
1962 .setauthsize = aead_setauthsize,
1963 .encrypt = aead_encrypt,
1964 .decrypt = aead_decrypt,
1965 .ivsize = DES3_EDE_BLOCK_SIZE,
1966 .maxauthsize = SHA256_DIGEST_SIZE,
1968 .caam = {
1969 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1970 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1971 OP_ALG_AAI_HMAC_PRECOMP,
1972 .geniv = true,
1976 .aead = {
1977 .base = {
1978 .cra_name = "authenc(hmac(sha384),"
1979 "cbc(des3_ede))",
1980 .cra_driver_name = "authenc-hmac-sha384-"
1981 "cbc-des3_ede-caam-qi",
1982 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1984 .setkey = aead_setkey,
1985 .setauthsize = aead_setauthsize,
1986 .encrypt = aead_encrypt,
1987 .decrypt = aead_decrypt,
1988 .ivsize = DES3_EDE_BLOCK_SIZE,
1989 .maxauthsize = SHA384_DIGEST_SIZE,
1991 .caam = {
1992 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1993 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1994 OP_ALG_AAI_HMAC_PRECOMP,
1998 .aead = {
1999 .base = {
2000 .cra_name = "echainiv(authenc(hmac(sha384),"
2001 "cbc(des3_ede)))",
2002 .cra_driver_name = "echainiv-authenc-"
2003 "hmac-sha384-"
2004 "cbc-des3_ede-caam-qi",
2005 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2007 .setkey = aead_setkey,
2008 .setauthsize = aead_setauthsize,
2009 .encrypt = aead_encrypt,
2010 .decrypt = aead_decrypt,
2011 .ivsize = DES3_EDE_BLOCK_SIZE,
2012 .maxauthsize = SHA384_DIGEST_SIZE,
2014 .caam = {
2015 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2016 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2017 OP_ALG_AAI_HMAC_PRECOMP,
2018 .geniv = true,
2022 .aead = {
2023 .base = {
2024 .cra_name = "authenc(hmac(sha512),"
2025 "cbc(des3_ede))",
2026 .cra_driver_name = "authenc-hmac-sha512-"
2027 "cbc-des3_ede-caam-qi",
2028 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2030 .setkey = aead_setkey,
2031 .setauthsize = aead_setauthsize,
2032 .encrypt = aead_encrypt,
2033 .decrypt = aead_decrypt,
2034 .ivsize = DES3_EDE_BLOCK_SIZE,
2035 .maxauthsize = SHA512_DIGEST_SIZE,
2037 .caam = {
2038 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2039 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2040 OP_ALG_AAI_HMAC_PRECOMP,
2044 .aead = {
2045 .base = {
2046 .cra_name = "echainiv(authenc(hmac(sha512),"
2047 "cbc(des3_ede)))",
2048 .cra_driver_name = "echainiv-authenc-"
2049 "hmac-sha512-"
2050 "cbc-des3_ede-caam-qi",
2051 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2053 .setkey = aead_setkey,
2054 .setauthsize = aead_setauthsize,
2055 .encrypt = aead_encrypt,
2056 .decrypt = aead_decrypt,
2057 .ivsize = DES3_EDE_BLOCK_SIZE,
2058 .maxauthsize = SHA512_DIGEST_SIZE,
2060 .caam = {
2061 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2062 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2063 OP_ALG_AAI_HMAC_PRECOMP,
2064 .geniv = true,
2068 .aead = {
2069 .base = {
2070 .cra_name = "authenc(hmac(md5),cbc(des))",
2071 .cra_driver_name = "authenc-hmac-md5-"
2072 "cbc-des-caam-qi",
2073 .cra_blocksize = DES_BLOCK_SIZE,
2075 .setkey = aead_setkey,
2076 .setauthsize = aead_setauthsize,
2077 .encrypt = aead_encrypt,
2078 .decrypt = aead_decrypt,
2079 .ivsize = DES_BLOCK_SIZE,
2080 .maxauthsize = MD5_DIGEST_SIZE,
2082 .caam = {
2083 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2084 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2085 OP_ALG_AAI_HMAC_PRECOMP,
2089 .aead = {
2090 .base = {
2091 .cra_name = "echainiv(authenc(hmac(md5),"
2092 "cbc(des)))",
2093 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2094 "cbc-des-caam-qi",
2095 .cra_blocksize = DES_BLOCK_SIZE,
2097 .setkey = aead_setkey,
2098 .setauthsize = aead_setauthsize,
2099 .encrypt = aead_encrypt,
2100 .decrypt = aead_decrypt,
2101 .ivsize = DES_BLOCK_SIZE,
2102 .maxauthsize = MD5_DIGEST_SIZE,
2104 .caam = {
2105 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2106 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2107 OP_ALG_AAI_HMAC_PRECOMP,
2108 .geniv = true,
2112 .aead = {
2113 .base = {
2114 .cra_name = "authenc(hmac(sha1),cbc(des))",
2115 .cra_driver_name = "authenc-hmac-sha1-"
2116 "cbc-des-caam-qi",
2117 .cra_blocksize = DES_BLOCK_SIZE,
2119 .setkey = aead_setkey,
2120 .setauthsize = aead_setauthsize,
2121 .encrypt = aead_encrypt,
2122 .decrypt = aead_decrypt,
2123 .ivsize = DES_BLOCK_SIZE,
2124 .maxauthsize = SHA1_DIGEST_SIZE,
2126 .caam = {
2127 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2128 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2129 OP_ALG_AAI_HMAC_PRECOMP,
2133 .aead = {
2134 .base = {
2135 .cra_name = "echainiv(authenc(hmac(sha1),"
2136 "cbc(des)))",
2137 .cra_driver_name = "echainiv-authenc-"
2138 "hmac-sha1-cbc-des-caam-qi",
2139 .cra_blocksize = DES_BLOCK_SIZE,
2141 .setkey = aead_setkey,
2142 .setauthsize = aead_setauthsize,
2143 .encrypt = aead_encrypt,
2144 .decrypt = aead_decrypt,
2145 .ivsize = DES_BLOCK_SIZE,
2146 .maxauthsize = SHA1_DIGEST_SIZE,
2148 .caam = {
2149 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2150 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2151 OP_ALG_AAI_HMAC_PRECOMP,
2152 .geniv = true,
2156 .aead = {
2157 .base = {
2158 .cra_name = "authenc(hmac(sha224),cbc(des))",
2159 .cra_driver_name = "authenc-hmac-sha224-"
2160 "cbc-des-caam-qi",
2161 .cra_blocksize = DES_BLOCK_SIZE,
2163 .setkey = aead_setkey,
2164 .setauthsize = aead_setauthsize,
2165 .encrypt = aead_encrypt,
2166 .decrypt = aead_decrypt,
2167 .ivsize = DES_BLOCK_SIZE,
2168 .maxauthsize = SHA224_DIGEST_SIZE,
2170 .caam = {
2171 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2172 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2173 OP_ALG_AAI_HMAC_PRECOMP,
2177 .aead = {
2178 .base = {
2179 .cra_name = "echainiv(authenc(hmac(sha224),"
2180 "cbc(des)))",
2181 .cra_driver_name = "echainiv-authenc-"
2182 "hmac-sha224-cbc-des-"
2183 "caam-qi",
2184 .cra_blocksize = DES_BLOCK_SIZE,
2186 .setkey = aead_setkey,
2187 .setauthsize = aead_setauthsize,
2188 .encrypt = aead_encrypt,
2189 .decrypt = aead_decrypt,
2190 .ivsize = DES_BLOCK_SIZE,
2191 .maxauthsize = SHA224_DIGEST_SIZE,
2193 .caam = {
2194 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2195 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2196 OP_ALG_AAI_HMAC_PRECOMP,
2197 .geniv = true,
2201 .aead = {
2202 .base = {
2203 .cra_name = "authenc(hmac(sha256),cbc(des))",
2204 .cra_driver_name = "authenc-hmac-sha256-"
2205 "cbc-des-caam-qi",
2206 .cra_blocksize = DES_BLOCK_SIZE,
2208 .setkey = aead_setkey,
2209 .setauthsize = aead_setauthsize,
2210 .encrypt = aead_encrypt,
2211 .decrypt = aead_decrypt,
2212 .ivsize = DES_BLOCK_SIZE,
2213 .maxauthsize = SHA256_DIGEST_SIZE,
2215 .caam = {
2216 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2217 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2218 OP_ALG_AAI_HMAC_PRECOMP,
2222 .aead = {
2223 .base = {
2224 .cra_name = "echainiv(authenc(hmac(sha256),"
2225 "cbc(des)))",
2226 .cra_driver_name = "echainiv-authenc-"
2227 "hmac-sha256-cbc-des-"
2228 "caam-qi",
2229 .cra_blocksize = DES_BLOCK_SIZE,
2231 .setkey = aead_setkey,
2232 .setauthsize = aead_setauthsize,
2233 .encrypt = aead_encrypt,
2234 .decrypt = aead_decrypt,
2235 .ivsize = DES_BLOCK_SIZE,
2236 .maxauthsize = SHA256_DIGEST_SIZE,
2238 .caam = {
2239 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2240 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2241 OP_ALG_AAI_HMAC_PRECOMP,
2242 .geniv = true,
2246 .aead = {
2247 .base = {
2248 .cra_name = "authenc(hmac(sha384),cbc(des))",
2249 .cra_driver_name = "authenc-hmac-sha384-"
2250 "cbc-des-caam-qi",
2251 .cra_blocksize = DES_BLOCK_SIZE,
2253 .setkey = aead_setkey,
2254 .setauthsize = aead_setauthsize,
2255 .encrypt = aead_encrypt,
2256 .decrypt = aead_decrypt,
2257 .ivsize = DES_BLOCK_SIZE,
2258 .maxauthsize = SHA384_DIGEST_SIZE,
2260 .caam = {
2261 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2262 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2263 OP_ALG_AAI_HMAC_PRECOMP,
2267 .aead = {
2268 .base = {
2269 .cra_name = "echainiv(authenc(hmac(sha384),"
2270 "cbc(des)))",
2271 .cra_driver_name = "echainiv-authenc-"
2272 "hmac-sha384-cbc-des-"
2273 "caam-qi",
2274 .cra_blocksize = DES_BLOCK_SIZE,
2276 .setkey = aead_setkey,
2277 .setauthsize = aead_setauthsize,
2278 .encrypt = aead_encrypt,
2279 .decrypt = aead_decrypt,
2280 .ivsize = DES_BLOCK_SIZE,
2281 .maxauthsize = SHA384_DIGEST_SIZE,
2283 .caam = {
2284 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2285 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2286 OP_ALG_AAI_HMAC_PRECOMP,
2287 .geniv = true,
2291 .aead = {
2292 .base = {
2293 .cra_name = "authenc(hmac(sha512),cbc(des))",
2294 .cra_driver_name = "authenc-hmac-sha512-"
2295 "cbc-des-caam-qi",
2296 .cra_blocksize = DES_BLOCK_SIZE,
2298 .setkey = aead_setkey,
2299 .setauthsize = aead_setauthsize,
2300 .encrypt = aead_encrypt,
2301 .decrypt = aead_decrypt,
2302 .ivsize = DES_BLOCK_SIZE,
2303 .maxauthsize = SHA512_DIGEST_SIZE,
2305 .caam = {
2306 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2307 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2308 OP_ALG_AAI_HMAC_PRECOMP,
2312 .aead = {
2313 .base = {
2314 .cra_name = "echainiv(authenc(hmac(sha512),"
2315 "cbc(des)))",
2316 .cra_driver_name = "echainiv-authenc-"
2317 "hmac-sha512-cbc-des-"
2318 "caam-qi",
2319 .cra_blocksize = DES_BLOCK_SIZE,
2321 .setkey = aead_setkey,
2322 .setauthsize = aead_setauthsize,
2323 .encrypt = aead_encrypt,
2324 .decrypt = aead_decrypt,
2325 .ivsize = DES_BLOCK_SIZE,
2326 .maxauthsize = SHA512_DIGEST_SIZE,
2328 .caam = {
2329 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2330 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2331 OP_ALG_AAI_HMAC_PRECOMP,
2332 .geniv = true,
2337 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
2338 bool uses_dkp)
2340 struct caam_drv_private *priv;
2343 * distribute tfms across job rings to ensure in-order
2344 * crypto request processing per tfm
2346 ctx->jrdev = caam_jr_alloc();
2347 if (IS_ERR(ctx->jrdev)) {
2348 pr_err("Job Ring Device allocation for transform failed\n");
2349 return PTR_ERR(ctx->jrdev);
2352 priv = dev_get_drvdata(ctx->jrdev->parent);
2353 if (priv->era >= 6 && uses_dkp)
2354 ctx->dir = DMA_BIDIRECTIONAL;
2355 else
2356 ctx->dir = DMA_TO_DEVICE;
2358 ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
2359 ctx->dir);
2360 if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
2361 dev_err(ctx->jrdev, "unable to map key\n");
2362 caam_jr_free(ctx->jrdev);
2363 return -ENOMEM;
2366 /* copy descriptor header template value */
2367 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
2368 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
2370 ctx->qidev = priv->qidev;
2372 spin_lock_init(&ctx->lock);
2373 ctx->drv_ctx[ENCRYPT] = NULL;
2374 ctx->drv_ctx[DECRYPT] = NULL;
2376 return 0;
2379 static int caam_cra_init(struct crypto_skcipher *tfm)
2381 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
2382 struct caam_skcipher_alg *caam_alg =
2383 container_of(alg, typeof(*caam_alg), skcipher);
2385 return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
2386 false);
2389 static int caam_aead_init(struct crypto_aead *tfm)
2391 struct aead_alg *alg = crypto_aead_alg(tfm);
2392 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2393 aead);
2394 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
2396 return caam_init_common(ctx, &caam_alg->caam,
2397 alg->setkey == aead_setkey);
2400 static void caam_exit_common(struct caam_ctx *ctx)
2402 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
2403 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
2405 dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
2407 caam_jr_free(ctx->jrdev);
2410 static void caam_cra_exit(struct crypto_skcipher *tfm)
2412 caam_exit_common(crypto_skcipher_ctx(tfm));
2415 static void caam_aead_exit(struct crypto_aead *tfm)
2417 caam_exit_common(crypto_aead_ctx(tfm));
2420 static void __exit caam_qi_algapi_exit(void)
2422 int i;
2424 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2425 struct caam_aead_alg *t_alg = driver_aeads + i;
2427 if (t_alg->registered)
2428 crypto_unregister_aead(&t_alg->aead);
2431 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2432 struct caam_skcipher_alg *t_alg = driver_algs + i;
2434 if (t_alg->registered)
2435 crypto_unregister_skcipher(&t_alg->skcipher);
2439 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
2441 struct skcipher_alg *alg = &t_alg->skcipher;
2443 alg->base.cra_module = THIS_MODULE;
2444 alg->base.cra_priority = CAAM_CRA_PRIORITY;
2445 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2446 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2448 alg->init = caam_cra_init;
2449 alg->exit = caam_cra_exit;
2452 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2454 struct aead_alg *alg = &t_alg->aead;
2456 alg->base.cra_module = THIS_MODULE;
2457 alg->base.cra_priority = CAAM_CRA_PRIORITY;
2458 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2459 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2461 alg->init = caam_aead_init;
2462 alg->exit = caam_aead_exit;
2465 static int __init caam_qi_algapi_init(void)
2467 struct device_node *dev_node;
2468 struct platform_device *pdev;
2469 struct device *ctrldev;
2470 struct caam_drv_private *priv;
2471 int i = 0, err = 0;
2472 u32 aes_vid, aes_inst, des_inst, md_vid, md_inst;
2473 unsigned int md_limit = SHA512_DIGEST_SIZE;
2474 bool registered = false;
2476 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2477 if (!dev_node) {
2478 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2479 if (!dev_node)
2480 return -ENODEV;
2483 pdev = of_find_device_by_node(dev_node);
2484 of_node_put(dev_node);
2485 if (!pdev)
2486 return -ENODEV;
2488 ctrldev = &pdev->dev;
2489 priv = dev_get_drvdata(ctrldev);
2492 * If priv is NULL, it's probably because the caam driver wasn't
2493 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2495 if (!priv || !priv->qi_present) {
2496 err = -ENODEV;
2497 goto out_put_dev;
2500 if (caam_dpaa2) {
2501 dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
2502 err = -ENODEV;
2503 goto out_put_dev;
2507 * Register crypto algorithms the device supports.
2508 * First, detect presence and attributes of DES, AES, and MD blocks.
2510 if (priv->era < 10) {
2511 u32 cha_vid, cha_inst;
2513 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2514 aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
2515 md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2517 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2518 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
2519 CHA_ID_LS_DES_SHIFT;
2520 aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
2521 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2522 } else {
2523 u32 aesa, mdha;
2525 aesa = rd_reg32(&priv->ctrl->vreg.aesa);
2526 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
2528 aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
2529 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
2531 des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
2532 aes_inst = aesa & CHA_VER_NUM_MASK;
2533 md_inst = mdha & CHA_VER_NUM_MASK;
2536 /* If MD is present, limit digest size based on LP256 */
2537 if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
2538 md_limit = SHA256_DIGEST_SIZE;
2540 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2541 struct caam_skcipher_alg *t_alg = driver_algs + i;
2542 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
2544 /* Skip DES algorithms if not supported by device */
2545 if (!des_inst &&
2546 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
2547 (alg_sel == OP_ALG_ALGSEL_DES)))
2548 continue;
2550 /* Skip AES algorithms if not supported by device */
2551 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
2552 continue;
2554 caam_skcipher_alg_init(t_alg);
2556 err = crypto_register_skcipher(&t_alg->skcipher);
2557 if (err) {
2558 dev_warn(priv->qidev, "%s alg registration failed\n",
2559 t_alg->skcipher.base.cra_driver_name);
2560 continue;
2563 t_alg->registered = true;
2564 registered = true;
2567 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2568 struct caam_aead_alg *t_alg = driver_aeads + i;
2569 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
2570 OP_ALG_ALGSEL_MASK;
2571 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
2572 OP_ALG_ALGSEL_MASK;
2573 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
2575 /* Skip DES algorithms if not supported by device */
2576 if (!des_inst &&
2577 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
2578 (c1_alg_sel == OP_ALG_ALGSEL_DES)))
2579 continue;
2581 /* Skip AES algorithms if not supported by device */
2582 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
2583 continue;
2586 * Check support for AES algorithms not available
2587 * on LP devices.
2589 if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
2590 continue;
2593 * Skip algorithms requiring message digests
2594 * if MD or MD size is not supported by device.
2596 if (c2_alg_sel &&
2597 (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
2598 continue;
2600 caam_aead_alg_init(t_alg);
2602 err = crypto_register_aead(&t_alg->aead);
2603 if (err) {
2604 pr_warn("%s alg registration failed\n",
2605 t_alg->aead.base.cra_driver_name);
2606 continue;
2609 t_alg->registered = true;
2610 registered = true;
2613 if (registered)
2614 dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
2616 out_put_dev:
2617 put_device(ctrldev);
2618 return err;
2621 module_init(caam_qi_algapi_init);
2622 module_exit(caam_qi_algapi_exit);
2624 MODULE_LICENSE("GPL");
2625 MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
2626 MODULE_AUTHOR("Freescale Semiconductor");