1 // SPDX-License-Identifier: GPL-2.0+
3 * Freescale FSL CAAM support for crypto API over QI backend.
6 * Copyright 2013-2016 Freescale Semiconductor, Inc.
7 * Copyright 2016-2019 NXP
14 #include "desc_constr.h"
20 #include "caamalg_desc.h"
21 #include <crypto/xts.h>
22 #include <asm/unaligned.h>
27 #define CAAM_CRA_PRIORITY 2000
28 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
29 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
30 SHA512_DIGEST_SIZE * 2)
32 #define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \
34 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
36 struct caam_alg_entry
{
44 struct caam_aead_alg
{
46 struct caam_alg_entry caam
;
50 struct caam_skcipher_alg
{
51 struct skcipher_alg skcipher
;
52 struct caam_alg_entry caam
;
61 u32 sh_desc_enc
[DESC_MAX_USED_LEN
];
62 u32 sh_desc_dec
[DESC_MAX_USED_LEN
];
63 u8 key
[CAAM_MAX_KEY_SIZE
];
65 enum dma_data_direction dir
;
68 unsigned int authsize
;
70 spinlock_t lock
; /* Protects multiple init of driver context */
71 struct caam_drv_ctx
*drv_ctx
[NUM_OP
];
72 bool xts_key_fallback
;
73 struct crypto_skcipher
*fallback
;
76 struct caam_skcipher_req_ctx
{
77 struct skcipher_request fallback_req
;
80 static int aead_set_sh_desc(struct crypto_aead
*aead
)
82 struct caam_aead_alg
*alg
= container_of(crypto_aead_alg(aead
),
84 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
85 unsigned int ivsize
= crypto_aead_ivsize(aead
);
88 unsigned int data_len
[2];
90 const bool ctr_mode
= ((ctx
->cdata
.algtype
& OP_ALG_AAI_MASK
) ==
91 OP_ALG_AAI_CTR_MOD128
);
92 const bool is_rfc3686
= alg
->caam
.rfc3686
;
93 struct caam_drv_private
*ctrlpriv
= dev_get_drvdata(ctx
->jrdev
->parent
);
95 if (!ctx
->cdata
.keylen
|| !ctx
->authsize
)
99 * AES-CTR needs to load IV in CONTEXT1 reg
100 * at an offset of 128bits (16bytes)
101 * CONTEXT1[255:128] = IV
108 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
111 ctx1_iv_off
= 16 + CTR_RFC3686_NONCE_SIZE
;
112 nonce
= (u32
*)((void *)ctx
->key
+ ctx
->adata
.keylen_pad
+
113 ctx
->cdata
.keylen
- CTR_RFC3686_NONCE_SIZE
);
117 * In case |user key| > |derived key|, using DKP<imm,imm> would result
118 * in invalid opcodes (last bytes of user key) in the resulting
119 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
120 * addresses are needed.
122 ctx
->adata
.key_virt
= ctx
->key
;
123 ctx
->adata
.key_dma
= ctx
->key_dma
;
125 ctx
->cdata
.key_virt
= ctx
->key
+ ctx
->adata
.keylen_pad
;
126 ctx
->cdata
.key_dma
= ctx
->key_dma
+ ctx
->adata
.keylen_pad
;
128 data_len
[0] = ctx
->adata
.keylen_pad
;
129 data_len
[1] = ctx
->cdata
.keylen
;
134 /* aead_encrypt shared descriptor */
135 if (desc_inline_query(DESC_QI_AEAD_ENC_LEN
+
136 (is_rfc3686
? DESC_AEAD_CTR_RFC3686_LEN
: 0),
137 DESC_JOB_IO_LEN
, data_len
, &inl_mask
,
138 ARRAY_SIZE(data_len
)) < 0)
141 ctx
->adata
.key_inline
= !!(inl_mask
& 1);
142 ctx
->cdata
.key_inline
= !!(inl_mask
& 2);
144 cnstr_shdsc_aead_encap(ctx
->sh_desc_enc
, &ctx
->cdata
, &ctx
->adata
,
145 ivsize
, ctx
->authsize
, is_rfc3686
, nonce
,
146 ctx1_iv_off
, true, ctrlpriv
->era
);
149 /* aead_decrypt shared descriptor */
150 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN
+
151 (is_rfc3686
? DESC_AEAD_CTR_RFC3686_LEN
: 0),
152 DESC_JOB_IO_LEN
, data_len
, &inl_mask
,
153 ARRAY_SIZE(data_len
)) < 0)
156 ctx
->adata
.key_inline
= !!(inl_mask
& 1);
157 ctx
->cdata
.key_inline
= !!(inl_mask
& 2);
159 cnstr_shdsc_aead_decap(ctx
->sh_desc_dec
, &ctx
->cdata
, &ctx
->adata
,
160 ivsize
, ctx
->authsize
, alg
->caam
.geniv
,
161 is_rfc3686
, nonce
, ctx1_iv_off
, true,
164 if (!alg
->caam
.geniv
)
167 /* aead_givencrypt shared descriptor */
168 if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN
+
169 (is_rfc3686
? DESC_AEAD_CTR_RFC3686_LEN
: 0),
170 DESC_JOB_IO_LEN
, data_len
, &inl_mask
,
171 ARRAY_SIZE(data_len
)) < 0)
174 ctx
->adata
.key_inline
= !!(inl_mask
& 1);
175 ctx
->cdata
.key_inline
= !!(inl_mask
& 2);
177 cnstr_shdsc_aead_givencap(ctx
->sh_desc_enc
, &ctx
->cdata
, &ctx
->adata
,
178 ivsize
, ctx
->authsize
, is_rfc3686
, nonce
,
179 ctx1_iv_off
, true, ctrlpriv
->era
);
185 static int aead_setauthsize(struct crypto_aead
*authenc
, unsigned int authsize
)
187 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
189 ctx
->authsize
= authsize
;
190 aead_set_sh_desc(authenc
);
195 static int aead_setkey(struct crypto_aead
*aead
, const u8
*key
,
198 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
199 struct device
*jrdev
= ctx
->jrdev
;
200 struct caam_drv_private
*ctrlpriv
= dev_get_drvdata(jrdev
->parent
);
201 struct crypto_authenc_keys keys
;
204 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0)
207 dev_dbg(jrdev
, "keylen %d enckeylen %d authkeylen %d\n",
208 keys
.authkeylen
+ keys
.enckeylen
, keys
.enckeylen
,
210 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
211 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
214 * If DKP is supported, use it in the shared descriptor to generate
217 if (ctrlpriv
->era
>= 6) {
218 ctx
->adata
.keylen
= keys
.authkeylen
;
219 ctx
->adata
.keylen_pad
= split_key_len(ctx
->adata
.algtype
&
222 if (ctx
->adata
.keylen_pad
+ keys
.enckeylen
> CAAM_MAX_KEY_SIZE
)
225 memcpy(ctx
->key
, keys
.authkey
, keys
.authkeylen
);
226 memcpy(ctx
->key
+ ctx
->adata
.keylen_pad
, keys
.enckey
,
228 dma_sync_single_for_device(jrdev
->parent
, ctx
->key_dma
,
229 ctx
->adata
.keylen_pad
+
230 keys
.enckeylen
, ctx
->dir
);
234 ret
= gen_split_key(jrdev
, ctx
->key
, &ctx
->adata
, keys
.authkey
,
235 keys
.authkeylen
, CAAM_MAX_KEY_SIZE
-
240 /* postpend encryption key to auth split key */
241 memcpy(ctx
->key
+ ctx
->adata
.keylen_pad
, keys
.enckey
, keys
.enckeylen
);
242 dma_sync_single_for_device(jrdev
->parent
, ctx
->key_dma
,
243 ctx
->adata
.keylen_pad
+ keys
.enckeylen
,
246 print_hex_dump_debug("ctx.key@" __stringify(__LINE__
)": ",
247 DUMP_PREFIX_ADDRESS
, 16, 4, ctx
->key
,
248 ctx
->adata
.keylen_pad
+ keys
.enckeylen
, 1);
251 ctx
->cdata
.keylen
= keys
.enckeylen
;
253 ret
= aead_set_sh_desc(aead
);
257 /* Now update the driver contexts with the new shared descriptor */
258 if (ctx
->drv_ctx
[ENCRYPT
]) {
259 ret
= caam_drv_ctx_update(ctx
->drv_ctx
[ENCRYPT
],
262 dev_err(jrdev
, "driver enc context update failed\n");
267 if (ctx
->drv_ctx
[DECRYPT
]) {
268 ret
= caam_drv_ctx_update(ctx
->drv_ctx
[DECRYPT
],
271 dev_err(jrdev
, "driver dec context update failed\n");
276 memzero_explicit(&keys
, sizeof(keys
));
279 memzero_explicit(&keys
, sizeof(keys
));
283 static int des3_aead_setkey(struct crypto_aead
*aead
, const u8
*key
,
286 struct crypto_authenc_keys keys
;
289 err
= crypto_authenc_extractkeys(&keys
, key
, keylen
);
293 err
= verify_aead_des3_key(aead
, keys
.enckey
, keys
.enckeylen
) ?:
294 aead_setkey(aead
, key
, keylen
);
296 memzero_explicit(&keys
, sizeof(keys
));
300 static int gcm_set_sh_desc(struct crypto_aead
*aead
)
302 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
303 unsigned int ivsize
= crypto_aead_ivsize(aead
);
304 int rem_bytes
= CAAM_DESC_BYTES_MAX
- DESC_JOB_IO_LEN
-
307 if (!ctx
->cdata
.keylen
|| !ctx
->authsize
)
311 * Job Descriptor and Shared Descriptor
312 * must fit into the 64-word Descriptor h/w Buffer
314 if (rem_bytes
>= DESC_QI_GCM_ENC_LEN
) {
315 ctx
->cdata
.key_inline
= true;
316 ctx
->cdata
.key_virt
= ctx
->key
;
318 ctx
->cdata
.key_inline
= false;
319 ctx
->cdata
.key_dma
= ctx
->key_dma
;
322 cnstr_shdsc_gcm_encap(ctx
->sh_desc_enc
, &ctx
->cdata
, ivsize
,
323 ctx
->authsize
, true);
326 * Job Descriptor and Shared Descriptor
327 * must fit into the 64-word Descriptor h/w Buffer
329 if (rem_bytes
>= DESC_QI_GCM_DEC_LEN
) {
330 ctx
->cdata
.key_inline
= true;
331 ctx
->cdata
.key_virt
= ctx
->key
;
333 ctx
->cdata
.key_inline
= false;
334 ctx
->cdata
.key_dma
= ctx
->key_dma
;
337 cnstr_shdsc_gcm_decap(ctx
->sh_desc_dec
, &ctx
->cdata
, ivsize
,
338 ctx
->authsize
, true);
343 static int gcm_setauthsize(struct crypto_aead
*authenc
, unsigned int authsize
)
345 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
348 err
= crypto_gcm_check_authsize(authsize
);
352 ctx
->authsize
= authsize
;
353 gcm_set_sh_desc(authenc
);
358 static int gcm_setkey(struct crypto_aead
*aead
,
359 const u8
*key
, unsigned int keylen
)
361 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
362 struct device
*jrdev
= ctx
->jrdev
;
365 ret
= aes_check_keylen(keylen
);
369 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
370 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
372 memcpy(ctx
->key
, key
, keylen
);
373 dma_sync_single_for_device(jrdev
->parent
, ctx
->key_dma
, keylen
,
375 ctx
->cdata
.keylen
= keylen
;
377 ret
= gcm_set_sh_desc(aead
);
381 /* Now update the driver contexts with the new shared descriptor */
382 if (ctx
->drv_ctx
[ENCRYPT
]) {
383 ret
= caam_drv_ctx_update(ctx
->drv_ctx
[ENCRYPT
],
386 dev_err(jrdev
, "driver enc context update failed\n");
391 if (ctx
->drv_ctx
[DECRYPT
]) {
392 ret
= caam_drv_ctx_update(ctx
->drv_ctx
[DECRYPT
],
395 dev_err(jrdev
, "driver dec context update failed\n");
403 static int rfc4106_set_sh_desc(struct crypto_aead
*aead
)
405 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
406 unsigned int ivsize
= crypto_aead_ivsize(aead
);
407 int rem_bytes
= CAAM_DESC_BYTES_MAX
- DESC_JOB_IO_LEN
-
410 if (!ctx
->cdata
.keylen
|| !ctx
->authsize
)
413 ctx
->cdata
.key_virt
= ctx
->key
;
416 * Job Descriptor and Shared Descriptor
417 * must fit into the 64-word Descriptor h/w Buffer
419 if (rem_bytes
>= DESC_QI_RFC4106_ENC_LEN
) {
420 ctx
->cdata
.key_inline
= true;
422 ctx
->cdata
.key_inline
= false;
423 ctx
->cdata
.key_dma
= ctx
->key_dma
;
426 cnstr_shdsc_rfc4106_encap(ctx
->sh_desc_enc
, &ctx
->cdata
, ivsize
,
427 ctx
->authsize
, true);
430 * Job Descriptor and Shared Descriptor
431 * must fit into the 64-word Descriptor h/w Buffer
433 if (rem_bytes
>= DESC_QI_RFC4106_DEC_LEN
) {
434 ctx
->cdata
.key_inline
= true;
436 ctx
->cdata
.key_inline
= false;
437 ctx
->cdata
.key_dma
= ctx
->key_dma
;
440 cnstr_shdsc_rfc4106_decap(ctx
->sh_desc_dec
, &ctx
->cdata
, ivsize
,
441 ctx
->authsize
, true);
446 static int rfc4106_setauthsize(struct crypto_aead
*authenc
,
447 unsigned int authsize
)
449 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
452 err
= crypto_rfc4106_check_authsize(authsize
);
456 ctx
->authsize
= authsize
;
457 rfc4106_set_sh_desc(authenc
);
462 static int rfc4106_setkey(struct crypto_aead
*aead
,
463 const u8
*key
, unsigned int keylen
)
465 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
466 struct device
*jrdev
= ctx
->jrdev
;
469 ret
= aes_check_keylen(keylen
- 4);
473 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
474 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
476 memcpy(ctx
->key
, key
, keylen
);
478 * The last four bytes of the key material are used as the salt value
479 * in the nonce. Update the AES key length.
481 ctx
->cdata
.keylen
= keylen
- 4;
482 dma_sync_single_for_device(jrdev
->parent
, ctx
->key_dma
,
483 ctx
->cdata
.keylen
, ctx
->dir
);
485 ret
= rfc4106_set_sh_desc(aead
);
489 /* Now update the driver contexts with the new shared descriptor */
490 if (ctx
->drv_ctx
[ENCRYPT
]) {
491 ret
= caam_drv_ctx_update(ctx
->drv_ctx
[ENCRYPT
],
494 dev_err(jrdev
, "driver enc context update failed\n");
499 if (ctx
->drv_ctx
[DECRYPT
]) {
500 ret
= caam_drv_ctx_update(ctx
->drv_ctx
[DECRYPT
],
503 dev_err(jrdev
, "driver dec context update failed\n");
511 static int rfc4543_set_sh_desc(struct crypto_aead
*aead
)
513 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
514 unsigned int ivsize
= crypto_aead_ivsize(aead
);
515 int rem_bytes
= CAAM_DESC_BYTES_MAX
- DESC_JOB_IO_LEN
-
518 if (!ctx
->cdata
.keylen
|| !ctx
->authsize
)
521 ctx
->cdata
.key_virt
= ctx
->key
;
524 * Job Descriptor and Shared Descriptor
525 * must fit into the 64-word Descriptor h/w Buffer
527 if (rem_bytes
>= DESC_QI_RFC4543_ENC_LEN
) {
528 ctx
->cdata
.key_inline
= true;
530 ctx
->cdata
.key_inline
= false;
531 ctx
->cdata
.key_dma
= ctx
->key_dma
;
534 cnstr_shdsc_rfc4543_encap(ctx
->sh_desc_enc
, &ctx
->cdata
, ivsize
,
535 ctx
->authsize
, true);
538 * Job Descriptor and Shared Descriptor
539 * must fit into the 64-word Descriptor h/w Buffer
541 if (rem_bytes
>= DESC_QI_RFC4543_DEC_LEN
) {
542 ctx
->cdata
.key_inline
= true;
544 ctx
->cdata
.key_inline
= false;
545 ctx
->cdata
.key_dma
= ctx
->key_dma
;
548 cnstr_shdsc_rfc4543_decap(ctx
->sh_desc_dec
, &ctx
->cdata
, ivsize
,
549 ctx
->authsize
, true);
554 static int rfc4543_setauthsize(struct crypto_aead
*authenc
,
555 unsigned int authsize
)
557 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
562 ctx
->authsize
= authsize
;
563 rfc4543_set_sh_desc(authenc
);
568 static int rfc4543_setkey(struct crypto_aead
*aead
,
569 const u8
*key
, unsigned int keylen
)
571 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
572 struct device
*jrdev
= ctx
->jrdev
;
575 ret
= aes_check_keylen(keylen
- 4);
579 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
580 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
582 memcpy(ctx
->key
, key
, keylen
);
584 * The last four bytes of the key material are used as the salt value
585 * in the nonce. Update the AES key length.
587 ctx
->cdata
.keylen
= keylen
- 4;
588 dma_sync_single_for_device(jrdev
->parent
, ctx
->key_dma
,
589 ctx
->cdata
.keylen
, ctx
->dir
);
591 ret
= rfc4543_set_sh_desc(aead
);
595 /* Now update the driver contexts with the new shared descriptor */
596 if (ctx
->drv_ctx
[ENCRYPT
]) {
597 ret
= caam_drv_ctx_update(ctx
->drv_ctx
[ENCRYPT
],
600 dev_err(jrdev
, "driver enc context update failed\n");
605 if (ctx
->drv_ctx
[DECRYPT
]) {
606 ret
= caam_drv_ctx_update(ctx
->drv_ctx
[DECRYPT
],
609 dev_err(jrdev
, "driver dec context update failed\n");
617 static int skcipher_setkey(struct crypto_skcipher
*skcipher
, const u8
*key
,
618 unsigned int keylen
, const u32 ctx1_iv_off
)
620 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
621 struct caam_skcipher_alg
*alg
=
622 container_of(crypto_skcipher_alg(skcipher
), typeof(*alg
),
624 struct device
*jrdev
= ctx
->jrdev
;
625 unsigned int ivsize
= crypto_skcipher_ivsize(skcipher
);
626 const bool is_rfc3686
= alg
->caam
.rfc3686
;
629 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
630 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
632 ctx
->cdata
.keylen
= keylen
;
633 ctx
->cdata
.key_virt
= key
;
634 ctx
->cdata
.key_inline
= true;
636 /* skcipher encrypt, decrypt shared descriptors */
637 cnstr_shdsc_skcipher_encap(ctx
->sh_desc_enc
, &ctx
->cdata
, ivsize
,
638 is_rfc3686
, ctx1_iv_off
);
639 cnstr_shdsc_skcipher_decap(ctx
->sh_desc_dec
, &ctx
->cdata
, ivsize
,
640 is_rfc3686
, ctx1_iv_off
);
642 /* Now update the driver contexts with the new shared descriptor */
643 if (ctx
->drv_ctx
[ENCRYPT
]) {
644 ret
= caam_drv_ctx_update(ctx
->drv_ctx
[ENCRYPT
],
647 dev_err(jrdev
, "driver enc context update failed\n");
652 if (ctx
->drv_ctx
[DECRYPT
]) {
653 ret
= caam_drv_ctx_update(ctx
->drv_ctx
[DECRYPT
],
656 dev_err(jrdev
, "driver dec context update failed\n");
664 static int aes_skcipher_setkey(struct crypto_skcipher
*skcipher
,
665 const u8
*key
, unsigned int keylen
)
669 err
= aes_check_keylen(keylen
);
673 return skcipher_setkey(skcipher
, key
, keylen
, 0);
676 static int rfc3686_skcipher_setkey(struct crypto_skcipher
*skcipher
,
677 const u8
*key
, unsigned int keylen
)
684 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
685 * | *key = {KEY, NONCE}
687 ctx1_iv_off
= 16 + CTR_RFC3686_NONCE_SIZE
;
688 keylen
-= CTR_RFC3686_NONCE_SIZE
;
690 err
= aes_check_keylen(keylen
);
694 return skcipher_setkey(skcipher
, key
, keylen
, ctx1_iv_off
);
697 static int ctr_skcipher_setkey(struct crypto_skcipher
*skcipher
,
698 const u8
*key
, unsigned int keylen
)
704 * AES-CTR needs to load IV in CONTEXT1 reg
705 * at an offset of 128bits (16bytes)
706 * CONTEXT1[255:128] = IV
710 err
= aes_check_keylen(keylen
);
714 return skcipher_setkey(skcipher
, key
, keylen
, ctx1_iv_off
);
717 static int des3_skcipher_setkey(struct crypto_skcipher
*skcipher
,
718 const u8
*key
, unsigned int keylen
)
720 return verify_skcipher_des3_key(skcipher
, key
) ?:
721 skcipher_setkey(skcipher
, key
, keylen
, 0);
724 static int des_skcipher_setkey(struct crypto_skcipher
*skcipher
,
725 const u8
*key
, unsigned int keylen
)
727 return verify_skcipher_des_key(skcipher
, key
) ?:
728 skcipher_setkey(skcipher
, key
, keylen
, 0);
731 static int xts_skcipher_setkey(struct crypto_skcipher
*skcipher
, const u8
*key
,
734 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
735 struct device
*jrdev
= ctx
->jrdev
;
736 struct caam_drv_private
*ctrlpriv
= dev_get_drvdata(jrdev
->parent
);
740 err
= xts_verify_key(skcipher
, key
, keylen
);
742 dev_dbg(jrdev
, "key size mismatch\n");
746 if (keylen
!= 2 * AES_KEYSIZE_128
&& keylen
!= 2 * AES_KEYSIZE_256
)
747 ctx
->xts_key_fallback
= true;
749 if (ctrlpriv
->era
<= 8 || ctx
->xts_key_fallback
) {
750 err
= crypto_skcipher_setkey(ctx
->fallback
, key
, keylen
);
755 ctx
->cdata
.keylen
= keylen
;
756 ctx
->cdata
.key_virt
= key
;
757 ctx
->cdata
.key_inline
= true;
759 /* xts skcipher encrypt, decrypt shared descriptors */
760 cnstr_shdsc_xts_skcipher_encap(ctx
->sh_desc_enc
, &ctx
->cdata
);
761 cnstr_shdsc_xts_skcipher_decap(ctx
->sh_desc_dec
, &ctx
->cdata
);
763 /* Now update the driver contexts with the new shared descriptor */
764 if (ctx
->drv_ctx
[ENCRYPT
]) {
765 ret
= caam_drv_ctx_update(ctx
->drv_ctx
[ENCRYPT
],
768 dev_err(jrdev
, "driver enc context update failed\n");
773 if (ctx
->drv_ctx
[DECRYPT
]) {
774 ret
= caam_drv_ctx_update(ctx
->drv_ctx
[DECRYPT
],
777 dev_err(jrdev
, "driver dec context update failed\n");
786 * aead_edesc - s/w-extended aead descriptor
787 * @src_nents: number of segments in input scatterlist
788 * @dst_nents: number of segments in output scatterlist
789 * @iv_dma: dma address of iv for checking continuity and link table
790 * @qm_sg_bytes: length of dma mapped h/w link table
791 * @qm_sg_dma: bus physical mapped address of h/w link table
792 * @assoclen: associated data length, in CAAM endianness
793 * @assoclen_dma: bus physical mapped address of req->assoclen
794 * @drv_req: driver-specific request structure
795 * @sgt: the h/w link table, followed by IV
802 dma_addr_t qm_sg_dma
;
803 unsigned int assoclen
;
804 dma_addr_t assoclen_dma
;
805 struct caam_drv_req drv_req
;
806 struct qm_sg_entry sgt
[];
810 * skcipher_edesc - s/w-extended skcipher descriptor
811 * @src_nents: number of segments in input scatterlist
812 * @dst_nents: number of segments in output scatterlist
813 * @iv_dma: dma address of iv for checking continuity and link table
814 * @qm_sg_bytes: length of dma mapped h/w link table
815 * @qm_sg_dma: bus physical mapped address of h/w link table
816 * @drv_req: driver-specific request structure
817 * @sgt: the h/w link table, followed by IV
819 struct skcipher_edesc
{
824 dma_addr_t qm_sg_dma
;
825 struct caam_drv_req drv_req
;
826 struct qm_sg_entry sgt
[];
829 static struct caam_drv_ctx
*get_drv_ctx(struct caam_ctx
*ctx
,
833 * This function is called on the fast path with values of 'type'
834 * known at compile time. Invalid arguments are not expected and
835 * thus no checks are made.
837 struct caam_drv_ctx
*drv_ctx
= ctx
->drv_ctx
[type
];
840 if (unlikely(!drv_ctx
)) {
841 spin_lock(&ctx
->lock
);
843 /* Read again to check if some other core init drv_ctx */
844 drv_ctx
= ctx
->drv_ctx
[type
];
849 desc
= ctx
->sh_desc_enc
;
850 else /* (type == DECRYPT) */
851 desc
= ctx
->sh_desc_dec
;
853 cpu
= smp_processor_id();
854 drv_ctx
= caam_drv_ctx_init(ctx
->qidev
, &cpu
, desc
);
855 if (!IS_ERR(drv_ctx
))
856 drv_ctx
->op_type
= type
;
858 ctx
->drv_ctx
[type
] = drv_ctx
;
861 spin_unlock(&ctx
->lock
);
867 static void caam_unmap(struct device
*dev
, struct scatterlist
*src
,
868 struct scatterlist
*dst
, int src_nents
,
869 int dst_nents
, dma_addr_t iv_dma
, int ivsize
,
870 enum dma_data_direction iv_dir
, dma_addr_t qm_sg_dma
,
875 dma_unmap_sg(dev
, src
, src_nents
, DMA_TO_DEVICE
);
877 dma_unmap_sg(dev
, dst
, dst_nents
, DMA_FROM_DEVICE
);
879 dma_unmap_sg(dev
, src
, src_nents
, DMA_BIDIRECTIONAL
);
883 dma_unmap_single(dev
, iv_dma
, ivsize
, iv_dir
);
885 dma_unmap_single(dev
, qm_sg_dma
, qm_sg_bytes
, DMA_TO_DEVICE
);
888 static void aead_unmap(struct device
*dev
,
889 struct aead_edesc
*edesc
,
890 struct aead_request
*req
)
892 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
893 int ivsize
= crypto_aead_ivsize(aead
);
895 caam_unmap(dev
, req
->src
, req
->dst
, edesc
->src_nents
, edesc
->dst_nents
,
896 edesc
->iv_dma
, ivsize
, DMA_TO_DEVICE
, edesc
->qm_sg_dma
,
898 dma_unmap_single(dev
, edesc
->assoclen_dma
, 4, DMA_TO_DEVICE
);
901 static void skcipher_unmap(struct device
*dev
, struct skcipher_edesc
*edesc
,
902 struct skcipher_request
*req
)
904 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
905 int ivsize
= crypto_skcipher_ivsize(skcipher
);
907 caam_unmap(dev
, req
->src
, req
->dst
, edesc
->src_nents
, edesc
->dst_nents
,
908 edesc
->iv_dma
, ivsize
, DMA_BIDIRECTIONAL
, edesc
->qm_sg_dma
,
912 static void aead_done(struct caam_drv_req
*drv_req
, u32 status
)
914 struct device
*qidev
;
915 struct aead_edesc
*edesc
;
916 struct aead_request
*aead_req
= drv_req
->app_ctx
;
917 struct crypto_aead
*aead
= crypto_aead_reqtfm(aead_req
);
918 struct caam_ctx
*caam_ctx
= crypto_aead_ctx(aead
);
921 qidev
= caam_ctx
->qidev
;
923 if (unlikely(status
))
924 ecode
= caam_jr_strstatus(qidev
, status
);
926 edesc
= container_of(drv_req
, typeof(*edesc
), drv_req
);
927 aead_unmap(qidev
, edesc
, aead_req
);
929 aead_request_complete(aead_req
, ecode
);
930 qi_cache_free(edesc
);
934 * allocate and map the aead extended descriptor
936 static struct aead_edesc
*aead_edesc_alloc(struct aead_request
*req
,
939 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
940 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
941 struct caam_aead_alg
*alg
= container_of(crypto_aead_alg(aead
),
943 struct device
*qidev
= ctx
->qidev
;
944 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
945 GFP_KERNEL
: GFP_ATOMIC
;
946 int src_nents
, mapped_src_nents
, dst_nents
= 0, mapped_dst_nents
= 0;
947 int src_len
, dst_len
= 0;
948 struct aead_edesc
*edesc
;
949 dma_addr_t qm_sg_dma
, iv_dma
= 0;
951 unsigned int authsize
= ctx
->authsize
;
952 int qm_sg_index
= 0, qm_sg_ents
= 0, qm_sg_bytes
;
954 struct qm_sg_entry
*sg_table
, *fd_sgt
;
955 struct caam_drv_ctx
*drv_ctx
;
957 drv_ctx
= get_drv_ctx(ctx
, encrypt
? ENCRYPT
: DECRYPT
);
959 return (struct aead_edesc
*)drv_ctx
;
961 /* allocate space for base edesc and hw desc commands, link tables */
962 edesc
= qi_cache_alloc(GFP_DMA
| flags
);
963 if (unlikely(!edesc
)) {
964 dev_err(qidev
, "could not allocate extended descriptor\n");
965 return ERR_PTR(-ENOMEM
);
968 if (likely(req
->src
== req
->dst
)) {
969 src_len
= req
->assoclen
+ req
->cryptlen
+
970 (encrypt
? authsize
: 0);
972 src_nents
= sg_nents_for_len(req
->src
, src_len
);
973 if (unlikely(src_nents
< 0)) {
974 dev_err(qidev
, "Insufficient bytes (%d) in src S/G\n",
976 qi_cache_free(edesc
);
977 return ERR_PTR(src_nents
);
980 mapped_src_nents
= dma_map_sg(qidev
, req
->src
, src_nents
,
982 if (unlikely(!mapped_src_nents
)) {
983 dev_err(qidev
, "unable to map source\n");
984 qi_cache_free(edesc
);
985 return ERR_PTR(-ENOMEM
);
988 src_len
= req
->assoclen
+ req
->cryptlen
;
989 dst_len
= src_len
+ (encrypt
? authsize
: (-authsize
));
991 src_nents
= sg_nents_for_len(req
->src
, src_len
);
992 if (unlikely(src_nents
< 0)) {
993 dev_err(qidev
, "Insufficient bytes (%d) in src S/G\n",
995 qi_cache_free(edesc
);
996 return ERR_PTR(src_nents
);
999 dst_nents
= sg_nents_for_len(req
->dst
, dst_len
);
1000 if (unlikely(dst_nents
< 0)) {
1001 dev_err(qidev
, "Insufficient bytes (%d) in dst S/G\n",
1003 qi_cache_free(edesc
);
1004 return ERR_PTR(dst_nents
);
1008 mapped_src_nents
= dma_map_sg(qidev
, req
->src
,
1009 src_nents
, DMA_TO_DEVICE
);
1010 if (unlikely(!mapped_src_nents
)) {
1011 dev_err(qidev
, "unable to map source\n");
1012 qi_cache_free(edesc
);
1013 return ERR_PTR(-ENOMEM
);
1016 mapped_src_nents
= 0;
1020 mapped_dst_nents
= dma_map_sg(qidev
, req
->dst
,
1023 if (unlikely(!mapped_dst_nents
)) {
1024 dev_err(qidev
, "unable to map destination\n");
1025 dma_unmap_sg(qidev
, req
->src
, src_nents
,
1027 qi_cache_free(edesc
);
1028 return ERR_PTR(-ENOMEM
);
1031 mapped_dst_nents
= 0;
1035 if ((alg
->caam
.rfc3686
&& encrypt
) || !alg
->caam
.geniv
)
1036 ivsize
= crypto_aead_ivsize(aead
);
1039 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
1040 * Input is not contiguous.
1041 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1042 * the end of the table by allocating more S/G entries. Logic:
1043 * if (src != dst && output S/G)
1044 * pad output S/G, if needed
1045 * else if (src == dst && S/G)
1046 * overlapping S/Gs; pad one of them
1047 * else if (input S/G) ...
1048 * pad input S/G, if needed
1050 qm_sg_ents
= 1 + !!ivsize
+ mapped_src_nents
;
1051 if (mapped_dst_nents
> 1)
1052 qm_sg_ents
+= pad_sg_nents(mapped_dst_nents
);
1053 else if ((req
->src
== req
->dst
) && (mapped_src_nents
> 1))
1054 qm_sg_ents
= max(pad_sg_nents(qm_sg_ents
),
1055 1 + !!ivsize
+ pad_sg_nents(mapped_src_nents
));
1057 qm_sg_ents
= pad_sg_nents(qm_sg_ents
);
1059 sg_table
= &edesc
->sgt
[0];
1060 qm_sg_bytes
= qm_sg_ents
* sizeof(*sg_table
);
1061 if (unlikely(offsetof(struct aead_edesc
, sgt
) + qm_sg_bytes
+ ivsize
>
1062 CAAM_QI_MEMCACHE_SIZE
)) {
1063 dev_err(qidev
, "No space for %d S/G entries and/or %dB IV\n",
1064 qm_sg_ents
, ivsize
);
1065 caam_unmap(qidev
, req
->src
, req
->dst
, src_nents
, dst_nents
, 0,
1067 qi_cache_free(edesc
);
1068 return ERR_PTR(-ENOMEM
);
1072 u8
*iv
= (u8
*)(sg_table
+ qm_sg_ents
);
1074 /* Make sure IV is located in a DMAable area */
1075 memcpy(iv
, req
->iv
, ivsize
);
1077 iv_dma
= dma_map_single(qidev
, iv
, ivsize
, DMA_TO_DEVICE
);
1078 if (dma_mapping_error(qidev
, iv_dma
)) {
1079 dev_err(qidev
, "unable to map IV\n");
1080 caam_unmap(qidev
, req
->src
, req
->dst
, src_nents
,
1081 dst_nents
, 0, 0, DMA_NONE
, 0, 0);
1082 qi_cache_free(edesc
);
1083 return ERR_PTR(-ENOMEM
);
1087 edesc
->src_nents
= src_nents
;
1088 edesc
->dst_nents
= dst_nents
;
1089 edesc
->iv_dma
= iv_dma
;
1090 edesc
->drv_req
.app_ctx
= req
;
1091 edesc
->drv_req
.cbk
= aead_done
;
1092 edesc
->drv_req
.drv_ctx
= drv_ctx
;
1094 edesc
->assoclen
= cpu_to_caam32(req
->assoclen
);
1095 edesc
->assoclen_dma
= dma_map_single(qidev
, &edesc
->assoclen
, 4,
1097 if (dma_mapping_error(qidev
, edesc
->assoclen_dma
)) {
1098 dev_err(qidev
, "unable to map assoclen\n");
1099 caam_unmap(qidev
, req
->src
, req
->dst
, src_nents
, dst_nents
,
1100 iv_dma
, ivsize
, DMA_TO_DEVICE
, 0, 0);
1101 qi_cache_free(edesc
);
1102 return ERR_PTR(-ENOMEM
);
1105 dma_to_qm_sg_one(sg_table
, edesc
->assoclen_dma
, 4, 0);
1108 dma_to_qm_sg_one(sg_table
+ qm_sg_index
, iv_dma
, ivsize
, 0);
1111 sg_to_qm_sg_last(req
->src
, src_len
, sg_table
+ qm_sg_index
, 0);
1112 qm_sg_index
+= mapped_src_nents
;
1114 if (mapped_dst_nents
> 1)
1115 sg_to_qm_sg_last(req
->dst
, dst_len
, sg_table
+ qm_sg_index
, 0);
1117 qm_sg_dma
= dma_map_single(qidev
, sg_table
, qm_sg_bytes
, DMA_TO_DEVICE
);
1118 if (dma_mapping_error(qidev
, qm_sg_dma
)) {
1119 dev_err(qidev
, "unable to map S/G table\n");
1120 dma_unmap_single(qidev
, edesc
->assoclen_dma
, 4, DMA_TO_DEVICE
);
1121 caam_unmap(qidev
, req
->src
, req
->dst
, src_nents
, dst_nents
,
1122 iv_dma
, ivsize
, DMA_TO_DEVICE
, 0, 0);
1123 qi_cache_free(edesc
);
1124 return ERR_PTR(-ENOMEM
);
1127 edesc
->qm_sg_dma
= qm_sg_dma
;
1128 edesc
->qm_sg_bytes
= qm_sg_bytes
;
1130 out_len
= req
->assoclen
+ req
->cryptlen
+
1131 (encrypt
? ctx
->authsize
: (-ctx
->authsize
));
1132 in_len
= 4 + ivsize
+ req
->assoclen
+ req
->cryptlen
;
1134 fd_sgt
= &edesc
->drv_req
.fd_sgt
[0];
1135 dma_to_qm_sg_one_last_ext(&fd_sgt
[1], qm_sg_dma
, in_len
, 0);
1137 if (req
->dst
== req
->src
) {
1138 if (mapped_src_nents
== 1)
1139 dma_to_qm_sg_one(&fd_sgt
[0], sg_dma_address(req
->src
),
1142 dma_to_qm_sg_one_ext(&fd_sgt
[0], qm_sg_dma
+
1143 (1 + !!ivsize
) * sizeof(*sg_table
),
1145 } else if (mapped_dst_nents
<= 1) {
1146 dma_to_qm_sg_one(&fd_sgt
[0], sg_dma_address(req
->dst
), out_len
,
1149 dma_to_qm_sg_one_ext(&fd_sgt
[0], qm_sg_dma
+ sizeof(*sg_table
) *
1150 qm_sg_index
, out_len
, 0);
1156 static inline int aead_crypt(struct aead_request
*req
, bool encrypt
)
1158 struct aead_edesc
*edesc
;
1159 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1160 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1163 if (unlikely(caam_congested
))
1166 /* allocate extended descriptor */
1167 edesc
= aead_edesc_alloc(req
, encrypt
);
1169 return PTR_ERR(edesc
);
1171 /* Create and submit job descriptor */
1172 ret
= caam_qi_enqueue(ctx
->qidev
, &edesc
->drv_req
);
1176 aead_unmap(ctx
->qidev
, edesc
, req
);
1177 qi_cache_free(edesc
);
1183 static int aead_encrypt(struct aead_request
*req
)
1185 return aead_crypt(req
, true);
1188 static int aead_decrypt(struct aead_request
*req
)
1190 return aead_crypt(req
, false);
1193 static int ipsec_gcm_encrypt(struct aead_request
*req
)
1195 return crypto_ipsec_check_assoclen(req
->assoclen
) ? : aead_crypt(req
,
1199 static int ipsec_gcm_decrypt(struct aead_request
*req
)
1201 return crypto_ipsec_check_assoclen(req
->assoclen
) ? : aead_crypt(req
,
1205 static void skcipher_done(struct caam_drv_req
*drv_req
, u32 status
)
1207 struct skcipher_edesc
*edesc
;
1208 struct skcipher_request
*req
= drv_req
->app_ctx
;
1209 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1210 struct caam_ctx
*caam_ctx
= crypto_skcipher_ctx(skcipher
);
1211 struct device
*qidev
= caam_ctx
->qidev
;
1212 int ivsize
= crypto_skcipher_ivsize(skcipher
);
1215 dev_dbg(qidev
, "%s %d: status 0x%x\n", __func__
, __LINE__
, status
);
1217 edesc
= container_of(drv_req
, typeof(*edesc
), drv_req
);
1220 ecode
= caam_jr_strstatus(qidev
, status
);
1222 print_hex_dump_debug("dstiv @" __stringify(__LINE__
)": ",
1223 DUMP_PREFIX_ADDRESS
, 16, 4, req
->iv
,
1224 edesc
->src_nents
> 1 ? 100 : ivsize
, 1);
1225 caam_dump_sg("dst @" __stringify(__LINE__
)": ",
1226 DUMP_PREFIX_ADDRESS
, 16, 4, req
->dst
,
1227 edesc
->dst_nents
> 1 ? 100 : req
->cryptlen
, 1);
1229 skcipher_unmap(qidev
, edesc
, req
);
1232 * The crypto API expects us to set the IV (req->iv) to the last
1233 * ciphertext block (CBC mode) or last counter (CTR mode).
1234 * This is used e.g. by the CTS mode.
1237 memcpy(req
->iv
, (u8
*)&edesc
->sgt
[0] + edesc
->qm_sg_bytes
,
1240 qi_cache_free(edesc
);
1241 skcipher_request_complete(req
, ecode
);
1244 static struct skcipher_edesc
*skcipher_edesc_alloc(struct skcipher_request
*req
,
1247 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1248 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
1249 struct device
*qidev
= ctx
->qidev
;
1250 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
1251 GFP_KERNEL
: GFP_ATOMIC
;
1252 int src_nents
, mapped_src_nents
, dst_nents
= 0, mapped_dst_nents
= 0;
1253 struct skcipher_edesc
*edesc
;
1256 int ivsize
= crypto_skcipher_ivsize(skcipher
);
1257 int dst_sg_idx
, qm_sg_ents
, qm_sg_bytes
;
1258 struct qm_sg_entry
*sg_table
, *fd_sgt
;
1259 struct caam_drv_ctx
*drv_ctx
;
1261 drv_ctx
= get_drv_ctx(ctx
, encrypt
? ENCRYPT
: DECRYPT
);
1262 if (IS_ERR(drv_ctx
))
1263 return (struct skcipher_edesc
*)drv_ctx
;
1265 src_nents
= sg_nents_for_len(req
->src
, req
->cryptlen
);
1266 if (unlikely(src_nents
< 0)) {
1267 dev_err(qidev
, "Insufficient bytes (%d) in src S/G\n",
1269 return ERR_PTR(src_nents
);
1272 if (unlikely(req
->src
!= req
->dst
)) {
1273 dst_nents
= sg_nents_for_len(req
->dst
, req
->cryptlen
);
1274 if (unlikely(dst_nents
< 0)) {
1275 dev_err(qidev
, "Insufficient bytes (%d) in dst S/G\n",
1277 return ERR_PTR(dst_nents
);
1280 mapped_src_nents
= dma_map_sg(qidev
, req
->src
, src_nents
,
1282 if (unlikely(!mapped_src_nents
)) {
1283 dev_err(qidev
, "unable to map source\n");
1284 return ERR_PTR(-ENOMEM
);
1287 mapped_dst_nents
= dma_map_sg(qidev
, req
->dst
, dst_nents
,
1289 if (unlikely(!mapped_dst_nents
)) {
1290 dev_err(qidev
, "unable to map destination\n");
1291 dma_unmap_sg(qidev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1292 return ERR_PTR(-ENOMEM
);
1295 mapped_src_nents
= dma_map_sg(qidev
, req
->src
, src_nents
,
1297 if (unlikely(!mapped_src_nents
)) {
1298 dev_err(qidev
, "unable to map source\n");
1299 return ERR_PTR(-ENOMEM
);
1303 qm_sg_ents
= 1 + mapped_src_nents
;
1304 dst_sg_idx
= qm_sg_ents
;
1307 * Input, output HW S/G tables: [IV, src][dst, IV]
1308 * IV entries point to the same buffer
1309 * If src == dst, S/G entries are reused (S/G tables overlap)
1311 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1312 * the end of the table by allocating more S/G entries.
1314 if (req
->src
!= req
->dst
)
1315 qm_sg_ents
+= pad_sg_nents(mapped_dst_nents
+ 1);
1317 qm_sg_ents
= 1 + pad_sg_nents(qm_sg_ents
);
1319 qm_sg_bytes
= qm_sg_ents
* sizeof(struct qm_sg_entry
);
1320 if (unlikely(offsetof(struct skcipher_edesc
, sgt
) + qm_sg_bytes
+
1321 ivsize
> CAAM_QI_MEMCACHE_SIZE
)) {
1322 dev_err(qidev
, "No space for %d S/G entries and/or %dB IV\n",
1323 qm_sg_ents
, ivsize
);
1324 caam_unmap(qidev
, req
->src
, req
->dst
, src_nents
, dst_nents
, 0,
1326 return ERR_PTR(-ENOMEM
);
1329 /* allocate space for base edesc, link tables and IV */
1330 edesc
= qi_cache_alloc(GFP_DMA
| flags
);
1331 if (unlikely(!edesc
)) {
1332 dev_err(qidev
, "could not allocate extended descriptor\n");
1333 caam_unmap(qidev
, req
->src
, req
->dst
, src_nents
, dst_nents
, 0,
1335 return ERR_PTR(-ENOMEM
);
1338 /* Make sure IV is located in a DMAable area */
1339 sg_table
= &edesc
->sgt
[0];
1340 iv
= (u8
*)(sg_table
+ qm_sg_ents
);
1341 memcpy(iv
, req
->iv
, ivsize
);
1343 iv_dma
= dma_map_single(qidev
, iv
, ivsize
, DMA_BIDIRECTIONAL
);
1344 if (dma_mapping_error(qidev
, iv_dma
)) {
1345 dev_err(qidev
, "unable to map IV\n");
1346 caam_unmap(qidev
, req
->src
, req
->dst
, src_nents
, dst_nents
, 0,
1348 qi_cache_free(edesc
);
1349 return ERR_PTR(-ENOMEM
);
1352 edesc
->src_nents
= src_nents
;
1353 edesc
->dst_nents
= dst_nents
;
1354 edesc
->iv_dma
= iv_dma
;
1355 edesc
->qm_sg_bytes
= qm_sg_bytes
;
1356 edesc
->drv_req
.app_ctx
= req
;
1357 edesc
->drv_req
.cbk
= skcipher_done
;
1358 edesc
->drv_req
.drv_ctx
= drv_ctx
;
1360 dma_to_qm_sg_one(sg_table
, iv_dma
, ivsize
, 0);
1361 sg_to_qm_sg(req
->src
, req
->cryptlen
, sg_table
+ 1, 0);
1363 if (req
->src
!= req
->dst
)
1364 sg_to_qm_sg(req
->dst
, req
->cryptlen
, sg_table
+ dst_sg_idx
, 0);
1366 dma_to_qm_sg_one(sg_table
+ dst_sg_idx
+ mapped_dst_nents
, iv_dma
,
1369 edesc
->qm_sg_dma
= dma_map_single(qidev
, sg_table
, edesc
->qm_sg_bytes
,
1371 if (dma_mapping_error(qidev
, edesc
->qm_sg_dma
)) {
1372 dev_err(qidev
, "unable to map S/G table\n");
1373 caam_unmap(qidev
, req
->src
, req
->dst
, src_nents
, dst_nents
,
1374 iv_dma
, ivsize
, DMA_BIDIRECTIONAL
, 0, 0);
1375 qi_cache_free(edesc
);
1376 return ERR_PTR(-ENOMEM
);
1379 fd_sgt
= &edesc
->drv_req
.fd_sgt
[0];
1381 dma_to_qm_sg_one_last_ext(&fd_sgt
[1], edesc
->qm_sg_dma
,
1382 ivsize
+ req
->cryptlen
, 0);
1384 if (req
->src
== req
->dst
)
1385 dma_to_qm_sg_one_ext(&fd_sgt
[0], edesc
->qm_sg_dma
+
1386 sizeof(*sg_table
), req
->cryptlen
+ ivsize
,
1389 dma_to_qm_sg_one_ext(&fd_sgt
[0], edesc
->qm_sg_dma
+ dst_sg_idx
*
1390 sizeof(*sg_table
), req
->cryptlen
+ ivsize
,
1396 static inline bool xts_skcipher_ivsize(struct skcipher_request
*req
)
1398 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1399 unsigned int ivsize
= crypto_skcipher_ivsize(skcipher
);
1401 return !!get_unaligned((u64
*)(req
->iv
+ (ivsize
/ 2)));
1404 static inline int skcipher_crypt(struct skcipher_request
*req
, bool encrypt
)
1406 struct skcipher_edesc
*edesc
;
1407 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1408 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
1409 struct caam_drv_private
*ctrlpriv
= dev_get_drvdata(ctx
->jrdev
->parent
);
1413 * XTS is expected to return an error even for input length = 0
1414 * Note that the case input length < block size will be caught during
1415 * HW offloading and return an error.
1417 if (!req
->cryptlen
&& !ctx
->fallback
)
1420 if (ctx
->fallback
&& ((ctrlpriv
->era
<= 8 && xts_skcipher_ivsize(req
)) ||
1421 ctx
->xts_key_fallback
)) {
1422 struct caam_skcipher_req_ctx
*rctx
= skcipher_request_ctx(req
);
1424 skcipher_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
1425 skcipher_request_set_callback(&rctx
->fallback_req
,
1429 skcipher_request_set_crypt(&rctx
->fallback_req
, req
->src
,
1430 req
->dst
, req
->cryptlen
, req
->iv
);
1432 return encrypt
? crypto_skcipher_encrypt(&rctx
->fallback_req
) :
1433 crypto_skcipher_decrypt(&rctx
->fallback_req
);
1436 if (unlikely(caam_congested
))
1439 /* allocate extended descriptor */
1440 edesc
= skcipher_edesc_alloc(req
, encrypt
);
1442 return PTR_ERR(edesc
);
1444 ret
= caam_qi_enqueue(ctx
->qidev
, &edesc
->drv_req
);
1448 skcipher_unmap(ctx
->qidev
, edesc
, req
);
1449 qi_cache_free(edesc
);
1455 static int skcipher_encrypt(struct skcipher_request
*req
)
1457 return skcipher_crypt(req
, true);
1460 static int skcipher_decrypt(struct skcipher_request
*req
)
1462 return skcipher_crypt(req
, false);
1465 static struct caam_skcipher_alg driver_algs
[] = {
1469 .cra_name
= "cbc(aes)",
1470 .cra_driver_name
= "cbc-aes-caam-qi",
1471 .cra_blocksize
= AES_BLOCK_SIZE
,
1473 .setkey
= aes_skcipher_setkey
,
1474 .encrypt
= skcipher_encrypt
,
1475 .decrypt
= skcipher_decrypt
,
1476 .min_keysize
= AES_MIN_KEY_SIZE
,
1477 .max_keysize
= AES_MAX_KEY_SIZE
,
1478 .ivsize
= AES_BLOCK_SIZE
,
1480 .caam
.class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1485 .cra_name
= "cbc(des3_ede)",
1486 .cra_driver_name
= "cbc-3des-caam-qi",
1487 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1489 .setkey
= des3_skcipher_setkey
,
1490 .encrypt
= skcipher_encrypt
,
1491 .decrypt
= skcipher_decrypt
,
1492 .min_keysize
= DES3_EDE_KEY_SIZE
,
1493 .max_keysize
= DES3_EDE_KEY_SIZE
,
1494 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1496 .caam
.class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1501 .cra_name
= "cbc(des)",
1502 .cra_driver_name
= "cbc-des-caam-qi",
1503 .cra_blocksize
= DES_BLOCK_SIZE
,
1505 .setkey
= des_skcipher_setkey
,
1506 .encrypt
= skcipher_encrypt
,
1507 .decrypt
= skcipher_decrypt
,
1508 .min_keysize
= DES_KEY_SIZE
,
1509 .max_keysize
= DES_KEY_SIZE
,
1510 .ivsize
= DES_BLOCK_SIZE
,
1512 .caam
.class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
1517 .cra_name
= "ctr(aes)",
1518 .cra_driver_name
= "ctr-aes-caam-qi",
1521 .setkey
= ctr_skcipher_setkey
,
1522 .encrypt
= skcipher_encrypt
,
1523 .decrypt
= skcipher_decrypt
,
1524 .min_keysize
= AES_MIN_KEY_SIZE
,
1525 .max_keysize
= AES_MAX_KEY_SIZE
,
1526 .ivsize
= AES_BLOCK_SIZE
,
1527 .chunksize
= AES_BLOCK_SIZE
,
1529 .caam
.class1_alg_type
= OP_ALG_ALGSEL_AES
|
1530 OP_ALG_AAI_CTR_MOD128
,
1535 .cra_name
= "rfc3686(ctr(aes))",
1536 .cra_driver_name
= "rfc3686-ctr-aes-caam-qi",
1539 .setkey
= rfc3686_skcipher_setkey
,
1540 .encrypt
= skcipher_encrypt
,
1541 .decrypt
= skcipher_decrypt
,
1542 .min_keysize
= AES_MIN_KEY_SIZE
+
1543 CTR_RFC3686_NONCE_SIZE
,
1544 .max_keysize
= AES_MAX_KEY_SIZE
+
1545 CTR_RFC3686_NONCE_SIZE
,
1546 .ivsize
= CTR_RFC3686_IV_SIZE
,
1547 .chunksize
= AES_BLOCK_SIZE
,
1550 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
1551 OP_ALG_AAI_CTR_MOD128
,
1558 .cra_name
= "xts(aes)",
1559 .cra_driver_name
= "xts-aes-caam-qi",
1560 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
,
1561 .cra_blocksize
= AES_BLOCK_SIZE
,
1563 .setkey
= xts_skcipher_setkey
,
1564 .encrypt
= skcipher_encrypt
,
1565 .decrypt
= skcipher_decrypt
,
1566 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1567 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1568 .ivsize
= AES_BLOCK_SIZE
,
1570 .caam
.class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_XTS
,
1574 static struct caam_aead_alg driver_aeads
[] = {
1578 .cra_name
= "rfc4106(gcm(aes))",
1579 .cra_driver_name
= "rfc4106-gcm-aes-caam-qi",
1582 .setkey
= rfc4106_setkey
,
1583 .setauthsize
= rfc4106_setauthsize
,
1584 .encrypt
= ipsec_gcm_encrypt
,
1585 .decrypt
= ipsec_gcm_decrypt
,
1587 .maxauthsize
= AES_BLOCK_SIZE
,
1590 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_GCM
,
1597 .cra_name
= "rfc4543(gcm(aes))",
1598 .cra_driver_name
= "rfc4543-gcm-aes-caam-qi",
1601 .setkey
= rfc4543_setkey
,
1602 .setauthsize
= rfc4543_setauthsize
,
1603 .encrypt
= ipsec_gcm_encrypt
,
1604 .decrypt
= ipsec_gcm_decrypt
,
1606 .maxauthsize
= AES_BLOCK_SIZE
,
1609 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_GCM
,
1613 /* Galois Counter Mode */
1617 .cra_name
= "gcm(aes)",
1618 .cra_driver_name
= "gcm-aes-caam-qi",
1621 .setkey
= gcm_setkey
,
1622 .setauthsize
= gcm_setauthsize
,
1623 .encrypt
= aead_encrypt
,
1624 .decrypt
= aead_decrypt
,
1626 .maxauthsize
= AES_BLOCK_SIZE
,
1629 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_GCM
,
1633 /* single-pass ipsec_esp descriptor */
1637 .cra_name
= "authenc(hmac(md5),cbc(aes))",
1638 .cra_driver_name
= "authenc-hmac-md5-"
1640 .cra_blocksize
= AES_BLOCK_SIZE
,
1642 .setkey
= aead_setkey
,
1643 .setauthsize
= aead_setauthsize
,
1644 .encrypt
= aead_encrypt
,
1645 .decrypt
= aead_decrypt
,
1646 .ivsize
= AES_BLOCK_SIZE
,
1647 .maxauthsize
= MD5_DIGEST_SIZE
,
1650 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1651 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
1652 OP_ALG_AAI_HMAC_PRECOMP
,
1658 .cra_name
= "echainiv(authenc(hmac(md5),"
1660 .cra_driver_name
= "echainiv-authenc-hmac-md5-"
1662 .cra_blocksize
= AES_BLOCK_SIZE
,
1664 .setkey
= aead_setkey
,
1665 .setauthsize
= aead_setauthsize
,
1666 .encrypt
= aead_encrypt
,
1667 .decrypt
= aead_decrypt
,
1668 .ivsize
= AES_BLOCK_SIZE
,
1669 .maxauthsize
= MD5_DIGEST_SIZE
,
1672 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1673 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
1674 OP_ALG_AAI_HMAC_PRECOMP
,
1681 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
1682 .cra_driver_name
= "authenc-hmac-sha1-"
1684 .cra_blocksize
= AES_BLOCK_SIZE
,
1686 .setkey
= aead_setkey
,
1687 .setauthsize
= aead_setauthsize
,
1688 .encrypt
= aead_encrypt
,
1689 .decrypt
= aead_decrypt
,
1690 .ivsize
= AES_BLOCK_SIZE
,
1691 .maxauthsize
= SHA1_DIGEST_SIZE
,
1694 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1695 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
1696 OP_ALG_AAI_HMAC_PRECOMP
,
1702 .cra_name
= "echainiv(authenc(hmac(sha1),"
1704 .cra_driver_name
= "echainiv-authenc-"
1705 "hmac-sha1-cbc-aes-caam-qi",
1706 .cra_blocksize
= AES_BLOCK_SIZE
,
1708 .setkey
= aead_setkey
,
1709 .setauthsize
= aead_setauthsize
,
1710 .encrypt
= aead_encrypt
,
1711 .decrypt
= aead_decrypt
,
1712 .ivsize
= AES_BLOCK_SIZE
,
1713 .maxauthsize
= SHA1_DIGEST_SIZE
,
1716 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1717 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
1718 OP_ALG_AAI_HMAC_PRECOMP
,
1725 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
1726 .cra_driver_name
= "authenc-hmac-sha224-"
1728 .cra_blocksize
= AES_BLOCK_SIZE
,
1730 .setkey
= aead_setkey
,
1731 .setauthsize
= aead_setauthsize
,
1732 .encrypt
= aead_encrypt
,
1733 .decrypt
= aead_decrypt
,
1734 .ivsize
= AES_BLOCK_SIZE
,
1735 .maxauthsize
= SHA224_DIGEST_SIZE
,
1738 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1739 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
1740 OP_ALG_AAI_HMAC_PRECOMP
,
1746 .cra_name
= "echainiv(authenc(hmac(sha224),"
1748 .cra_driver_name
= "echainiv-authenc-"
1749 "hmac-sha224-cbc-aes-caam-qi",
1750 .cra_blocksize
= AES_BLOCK_SIZE
,
1752 .setkey
= aead_setkey
,
1753 .setauthsize
= aead_setauthsize
,
1754 .encrypt
= aead_encrypt
,
1755 .decrypt
= aead_decrypt
,
1756 .ivsize
= AES_BLOCK_SIZE
,
1757 .maxauthsize
= SHA224_DIGEST_SIZE
,
1760 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1761 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
1762 OP_ALG_AAI_HMAC_PRECOMP
,
1769 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
1770 .cra_driver_name
= "authenc-hmac-sha256-"
1772 .cra_blocksize
= AES_BLOCK_SIZE
,
1774 .setkey
= aead_setkey
,
1775 .setauthsize
= aead_setauthsize
,
1776 .encrypt
= aead_encrypt
,
1777 .decrypt
= aead_decrypt
,
1778 .ivsize
= AES_BLOCK_SIZE
,
1779 .maxauthsize
= SHA256_DIGEST_SIZE
,
1782 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1783 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
1784 OP_ALG_AAI_HMAC_PRECOMP
,
1790 .cra_name
= "echainiv(authenc(hmac(sha256),"
1792 .cra_driver_name
= "echainiv-authenc-"
1793 "hmac-sha256-cbc-aes-"
1795 .cra_blocksize
= AES_BLOCK_SIZE
,
1797 .setkey
= aead_setkey
,
1798 .setauthsize
= aead_setauthsize
,
1799 .encrypt
= aead_encrypt
,
1800 .decrypt
= aead_decrypt
,
1801 .ivsize
= AES_BLOCK_SIZE
,
1802 .maxauthsize
= SHA256_DIGEST_SIZE
,
1805 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1806 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
1807 OP_ALG_AAI_HMAC_PRECOMP
,
1814 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
1815 .cra_driver_name
= "authenc-hmac-sha384-"
1817 .cra_blocksize
= AES_BLOCK_SIZE
,
1819 .setkey
= aead_setkey
,
1820 .setauthsize
= aead_setauthsize
,
1821 .encrypt
= aead_encrypt
,
1822 .decrypt
= aead_decrypt
,
1823 .ivsize
= AES_BLOCK_SIZE
,
1824 .maxauthsize
= SHA384_DIGEST_SIZE
,
1827 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1828 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
1829 OP_ALG_AAI_HMAC_PRECOMP
,
1835 .cra_name
= "echainiv(authenc(hmac(sha384),"
1837 .cra_driver_name
= "echainiv-authenc-"
1838 "hmac-sha384-cbc-aes-"
1840 .cra_blocksize
= AES_BLOCK_SIZE
,
1842 .setkey
= aead_setkey
,
1843 .setauthsize
= aead_setauthsize
,
1844 .encrypt
= aead_encrypt
,
1845 .decrypt
= aead_decrypt
,
1846 .ivsize
= AES_BLOCK_SIZE
,
1847 .maxauthsize
= SHA384_DIGEST_SIZE
,
1850 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1851 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
1852 OP_ALG_AAI_HMAC_PRECOMP
,
1859 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
1860 .cra_driver_name
= "authenc-hmac-sha512-"
1862 .cra_blocksize
= AES_BLOCK_SIZE
,
1864 .setkey
= aead_setkey
,
1865 .setauthsize
= aead_setauthsize
,
1866 .encrypt
= aead_encrypt
,
1867 .decrypt
= aead_decrypt
,
1868 .ivsize
= AES_BLOCK_SIZE
,
1869 .maxauthsize
= SHA512_DIGEST_SIZE
,
1872 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1873 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
1874 OP_ALG_AAI_HMAC_PRECOMP
,
1880 .cra_name
= "echainiv(authenc(hmac(sha512),"
1882 .cra_driver_name
= "echainiv-authenc-"
1883 "hmac-sha512-cbc-aes-"
1885 .cra_blocksize
= AES_BLOCK_SIZE
,
1887 .setkey
= aead_setkey
,
1888 .setauthsize
= aead_setauthsize
,
1889 .encrypt
= aead_encrypt
,
1890 .decrypt
= aead_decrypt
,
1891 .ivsize
= AES_BLOCK_SIZE
,
1892 .maxauthsize
= SHA512_DIGEST_SIZE
,
1895 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1896 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
1897 OP_ALG_AAI_HMAC_PRECOMP
,
1904 .cra_name
= "authenc(hmac(md5),cbc(des3_ede))",
1905 .cra_driver_name
= "authenc-hmac-md5-"
1906 "cbc-des3_ede-caam-qi",
1907 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1909 .setkey
= des3_aead_setkey
,
1910 .setauthsize
= aead_setauthsize
,
1911 .encrypt
= aead_encrypt
,
1912 .decrypt
= aead_decrypt
,
1913 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1914 .maxauthsize
= MD5_DIGEST_SIZE
,
1917 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1918 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
1919 OP_ALG_AAI_HMAC_PRECOMP
,
1925 .cra_name
= "echainiv(authenc(hmac(md5),"
1927 .cra_driver_name
= "echainiv-authenc-hmac-md5-"
1928 "cbc-des3_ede-caam-qi",
1929 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1931 .setkey
= des3_aead_setkey
,
1932 .setauthsize
= aead_setauthsize
,
1933 .encrypt
= aead_encrypt
,
1934 .decrypt
= aead_decrypt
,
1935 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1936 .maxauthsize
= MD5_DIGEST_SIZE
,
1939 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1940 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
1941 OP_ALG_AAI_HMAC_PRECOMP
,
1948 .cra_name
= "authenc(hmac(sha1),"
1950 .cra_driver_name
= "authenc-hmac-sha1-"
1951 "cbc-des3_ede-caam-qi",
1952 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1954 .setkey
= des3_aead_setkey
,
1955 .setauthsize
= aead_setauthsize
,
1956 .encrypt
= aead_encrypt
,
1957 .decrypt
= aead_decrypt
,
1958 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1959 .maxauthsize
= SHA1_DIGEST_SIZE
,
1962 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1963 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
1964 OP_ALG_AAI_HMAC_PRECOMP
,
1970 .cra_name
= "echainiv(authenc(hmac(sha1),"
1972 .cra_driver_name
= "echainiv-authenc-"
1974 "cbc-des3_ede-caam-qi",
1975 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1977 .setkey
= des3_aead_setkey
,
1978 .setauthsize
= aead_setauthsize
,
1979 .encrypt
= aead_encrypt
,
1980 .decrypt
= aead_decrypt
,
1981 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1982 .maxauthsize
= SHA1_DIGEST_SIZE
,
1985 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1986 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
1987 OP_ALG_AAI_HMAC_PRECOMP
,
1994 .cra_name
= "authenc(hmac(sha224),"
1996 .cra_driver_name
= "authenc-hmac-sha224-"
1997 "cbc-des3_ede-caam-qi",
1998 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2000 .setkey
= des3_aead_setkey
,
2001 .setauthsize
= aead_setauthsize
,
2002 .encrypt
= aead_encrypt
,
2003 .decrypt
= aead_decrypt
,
2004 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2005 .maxauthsize
= SHA224_DIGEST_SIZE
,
2008 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2009 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2010 OP_ALG_AAI_HMAC_PRECOMP
,
2016 .cra_name
= "echainiv(authenc(hmac(sha224),"
2018 .cra_driver_name
= "echainiv-authenc-"
2020 "cbc-des3_ede-caam-qi",
2021 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2023 .setkey
= des3_aead_setkey
,
2024 .setauthsize
= aead_setauthsize
,
2025 .encrypt
= aead_encrypt
,
2026 .decrypt
= aead_decrypt
,
2027 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2028 .maxauthsize
= SHA224_DIGEST_SIZE
,
2031 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2032 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2033 OP_ALG_AAI_HMAC_PRECOMP
,
2040 .cra_name
= "authenc(hmac(sha256),"
2042 .cra_driver_name
= "authenc-hmac-sha256-"
2043 "cbc-des3_ede-caam-qi",
2044 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2046 .setkey
= des3_aead_setkey
,
2047 .setauthsize
= aead_setauthsize
,
2048 .encrypt
= aead_encrypt
,
2049 .decrypt
= aead_decrypt
,
2050 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2051 .maxauthsize
= SHA256_DIGEST_SIZE
,
2054 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2055 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2056 OP_ALG_AAI_HMAC_PRECOMP
,
2062 .cra_name
= "echainiv(authenc(hmac(sha256),"
2064 .cra_driver_name
= "echainiv-authenc-"
2066 "cbc-des3_ede-caam-qi",
2067 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2069 .setkey
= des3_aead_setkey
,
2070 .setauthsize
= aead_setauthsize
,
2071 .encrypt
= aead_encrypt
,
2072 .decrypt
= aead_decrypt
,
2073 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2074 .maxauthsize
= SHA256_DIGEST_SIZE
,
2077 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2078 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2079 OP_ALG_AAI_HMAC_PRECOMP
,
2086 .cra_name
= "authenc(hmac(sha384),"
2088 .cra_driver_name
= "authenc-hmac-sha384-"
2089 "cbc-des3_ede-caam-qi",
2090 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2092 .setkey
= des3_aead_setkey
,
2093 .setauthsize
= aead_setauthsize
,
2094 .encrypt
= aead_encrypt
,
2095 .decrypt
= aead_decrypt
,
2096 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2097 .maxauthsize
= SHA384_DIGEST_SIZE
,
2100 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2101 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2102 OP_ALG_AAI_HMAC_PRECOMP
,
2108 .cra_name
= "echainiv(authenc(hmac(sha384),"
2110 .cra_driver_name
= "echainiv-authenc-"
2112 "cbc-des3_ede-caam-qi",
2113 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2115 .setkey
= des3_aead_setkey
,
2116 .setauthsize
= aead_setauthsize
,
2117 .encrypt
= aead_encrypt
,
2118 .decrypt
= aead_decrypt
,
2119 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2120 .maxauthsize
= SHA384_DIGEST_SIZE
,
2123 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2124 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2125 OP_ALG_AAI_HMAC_PRECOMP
,
2132 .cra_name
= "authenc(hmac(sha512),"
2134 .cra_driver_name
= "authenc-hmac-sha512-"
2135 "cbc-des3_ede-caam-qi",
2136 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2138 .setkey
= des3_aead_setkey
,
2139 .setauthsize
= aead_setauthsize
,
2140 .encrypt
= aead_encrypt
,
2141 .decrypt
= aead_decrypt
,
2142 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2143 .maxauthsize
= SHA512_DIGEST_SIZE
,
2146 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2147 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2148 OP_ALG_AAI_HMAC_PRECOMP
,
2154 .cra_name
= "echainiv(authenc(hmac(sha512),"
2156 .cra_driver_name
= "echainiv-authenc-"
2158 "cbc-des3_ede-caam-qi",
2159 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2161 .setkey
= des3_aead_setkey
,
2162 .setauthsize
= aead_setauthsize
,
2163 .encrypt
= aead_encrypt
,
2164 .decrypt
= aead_decrypt
,
2165 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2166 .maxauthsize
= SHA512_DIGEST_SIZE
,
2169 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2170 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2171 OP_ALG_AAI_HMAC_PRECOMP
,
2178 .cra_name
= "authenc(hmac(md5),cbc(des))",
2179 .cra_driver_name
= "authenc-hmac-md5-"
2181 .cra_blocksize
= DES_BLOCK_SIZE
,
2183 .setkey
= aead_setkey
,
2184 .setauthsize
= aead_setauthsize
,
2185 .encrypt
= aead_encrypt
,
2186 .decrypt
= aead_decrypt
,
2187 .ivsize
= DES_BLOCK_SIZE
,
2188 .maxauthsize
= MD5_DIGEST_SIZE
,
2191 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2192 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
2193 OP_ALG_AAI_HMAC_PRECOMP
,
2199 .cra_name
= "echainiv(authenc(hmac(md5),"
2201 .cra_driver_name
= "echainiv-authenc-hmac-md5-"
2203 .cra_blocksize
= DES_BLOCK_SIZE
,
2205 .setkey
= aead_setkey
,
2206 .setauthsize
= aead_setauthsize
,
2207 .encrypt
= aead_encrypt
,
2208 .decrypt
= aead_decrypt
,
2209 .ivsize
= DES_BLOCK_SIZE
,
2210 .maxauthsize
= MD5_DIGEST_SIZE
,
2213 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2214 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
2215 OP_ALG_AAI_HMAC_PRECOMP
,
2222 .cra_name
= "authenc(hmac(sha1),cbc(des))",
2223 .cra_driver_name
= "authenc-hmac-sha1-"
2225 .cra_blocksize
= DES_BLOCK_SIZE
,
2227 .setkey
= aead_setkey
,
2228 .setauthsize
= aead_setauthsize
,
2229 .encrypt
= aead_encrypt
,
2230 .decrypt
= aead_decrypt
,
2231 .ivsize
= DES_BLOCK_SIZE
,
2232 .maxauthsize
= SHA1_DIGEST_SIZE
,
2235 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2236 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
2237 OP_ALG_AAI_HMAC_PRECOMP
,
2243 .cra_name
= "echainiv(authenc(hmac(sha1),"
2245 .cra_driver_name
= "echainiv-authenc-"
2246 "hmac-sha1-cbc-des-caam-qi",
2247 .cra_blocksize
= DES_BLOCK_SIZE
,
2249 .setkey
= aead_setkey
,
2250 .setauthsize
= aead_setauthsize
,
2251 .encrypt
= aead_encrypt
,
2252 .decrypt
= aead_decrypt
,
2253 .ivsize
= DES_BLOCK_SIZE
,
2254 .maxauthsize
= SHA1_DIGEST_SIZE
,
2257 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2258 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
2259 OP_ALG_AAI_HMAC_PRECOMP
,
2266 .cra_name
= "authenc(hmac(sha224),cbc(des))",
2267 .cra_driver_name
= "authenc-hmac-sha224-"
2269 .cra_blocksize
= DES_BLOCK_SIZE
,
2271 .setkey
= aead_setkey
,
2272 .setauthsize
= aead_setauthsize
,
2273 .encrypt
= aead_encrypt
,
2274 .decrypt
= aead_decrypt
,
2275 .ivsize
= DES_BLOCK_SIZE
,
2276 .maxauthsize
= SHA224_DIGEST_SIZE
,
2279 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2280 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2281 OP_ALG_AAI_HMAC_PRECOMP
,
2287 .cra_name
= "echainiv(authenc(hmac(sha224),"
2289 .cra_driver_name
= "echainiv-authenc-"
2290 "hmac-sha224-cbc-des-"
2292 .cra_blocksize
= DES_BLOCK_SIZE
,
2294 .setkey
= aead_setkey
,
2295 .setauthsize
= aead_setauthsize
,
2296 .encrypt
= aead_encrypt
,
2297 .decrypt
= aead_decrypt
,
2298 .ivsize
= DES_BLOCK_SIZE
,
2299 .maxauthsize
= SHA224_DIGEST_SIZE
,
2302 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2303 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2304 OP_ALG_AAI_HMAC_PRECOMP
,
2311 .cra_name
= "authenc(hmac(sha256),cbc(des))",
2312 .cra_driver_name
= "authenc-hmac-sha256-"
2314 .cra_blocksize
= DES_BLOCK_SIZE
,
2316 .setkey
= aead_setkey
,
2317 .setauthsize
= aead_setauthsize
,
2318 .encrypt
= aead_encrypt
,
2319 .decrypt
= aead_decrypt
,
2320 .ivsize
= DES_BLOCK_SIZE
,
2321 .maxauthsize
= SHA256_DIGEST_SIZE
,
2324 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2325 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2326 OP_ALG_AAI_HMAC_PRECOMP
,
2332 .cra_name
= "echainiv(authenc(hmac(sha256),"
2334 .cra_driver_name
= "echainiv-authenc-"
2335 "hmac-sha256-cbc-des-"
2337 .cra_blocksize
= DES_BLOCK_SIZE
,
2339 .setkey
= aead_setkey
,
2340 .setauthsize
= aead_setauthsize
,
2341 .encrypt
= aead_encrypt
,
2342 .decrypt
= aead_decrypt
,
2343 .ivsize
= DES_BLOCK_SIZE
,
2344 .maxauthsize
= SHA256_DIGEST_SIZE
,
2347 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2348 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2349 OP_ALG_AAI_HMAC_PRECOMP
,
2356 .cra_name
= "authenc(hmac(sha384),cbc(des))",
2357 .cra_driver_name
= "authenc-hmac-sha384-"
2359 .cra_blocksize
= DES_BLOCK_SIZE
,
2361 .setkey
= aead_setkey
,
2362 .setauthsize
= aead_setauthsize
,
2363 .encrypt
= aead_encrypt
,
2364 .decrypt
= aead_decrypt
,
2365 .ivsize
= DES_BLOCK_SIZE
,
2366 .maxauthsize
= SHA384_DIGEST_SIZE
,
2369 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2370 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2371 OP_ALG_AAI_HMAC_PRECOMP
,
2377 .cra_name
= "echainiv(authenc(hmac(sha384),"
2379 .cra_driver_name
= "echainiv-authenc-"
2380 "hmac-sha384-cbc-des-"
2382 .cra_blocksize
= DES_BLOCK_SIZE
,
2384 .setkey
= aead_setkey
,
2385 .setauthsize
= aead_setauthsize
,
2386 .encrypt
= aead_encrypt
,
2387 .decrypt
= aead_decrypt
,
2388 .ivsize
= DES_BLOCK_SIZE
,
2389 .maxauthsize
= SHA384_DIGEST_SIZE
,
2392 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2393 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2394 OP_ALG_AAI_HMAC_PRECOMP
,
2401 .cra_name
= "authenc(hmac(sha512),cbc(des))",
2402 .cra_driver_name
= "authenc-hmac-sha512-"
2404 .cra_blocksize
= DES_BLOCK_SIZE
,
2406 .setkey
= aead_setkey
,
2407 .setauthsize
= aead_setauthsize
,
2408 .encrypt
= aead_encrypt
,
2409 .decrypt
= aead_decrypt
,
2410 .ivsize
= DES_BLOCK_SIZE
,
2411 .maxauthsize
= SHA512_DIGEST_SIZE
,
2414 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2415 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2416 OP_ALG_AAI_HMAC_PRECOMP
,
2422 .cra_name
= "echainiv(authenc(hmac(sha512),"
2424 .cra_driver_name
= "echainiv-authenc-"
2425 "hmac-sha512-cbc-des-"
2427 .cra_blocksize
= DES_BLOCK_SIZE
,
2429 .setkey
= aead_setkey
,
2430 .setauthsize
= aead_setauthsize
,
2431 .encrypt
= aead_encrypt
,
2432 .decrypt
= aead_decrypt
,
2433 .ivsize
= DES_BLOCK_SIZE
,
2434 .maxauthsize
= SHA512_DIGEST_SIZE
,
2437 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2438 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2439 OP_ALG_AAI_HMAC_PRECOMP
,
2445 static int caam_init_common(struct caam_ctx
*ctx
, struct caam_alg_entry
*caam
,
2448 struct caam_drv_private
*priv
;
2452 * distribute tfms across job rings to ensure in-order
2453 * crypto request processing per tfm
2455 ctx
->jrdev
= caam_jr_alloc();
2456 if (IS_ERR(ctx
->jrdev
)) {
2457 pr_err("Job Ring Device allocation for transform failed\n");
2458 return PTR_ERR(ctx
->jrdev
);
2461 dev
= ctx
->jrdev
->parent
;
2462 priv
= dev_get_drvdata(dev
);
2463 if (priv
->era
>= 6 && uses_dkp
)
2464 ctx
->dir
= DMA_BIDIRECTIONAL
;
2466 ctx
->dir
= DMA_TO_DEVICE
;
2468 ctx
->key_dma
= dma_map_single(dev
, ctx
->key
, sizeof(ctx
->key
),
2470 if (dma_mapping_error(dev
, ctx
->key_dma
)) {
2471 dev_err(dev
, "unable to map key\n");
2472 caam_jr_free(ctx
->jrdev
);
2476 /* copy descriptor header template value */
2477 ctx
->cdata
.algtype
= OP_TYPE_CLASS1_ALG
| caam
->class1_alg_type
;
2478 ctx
->adata
.algtype
= OP_TYPE_CLASS2_ALG
| caam
->class2_alg_type
;
2482 spin_lock_init(&ctx
->lock
);
2483 ctx
->drv_ctx
[ENCRYPT
] = NULL
;
2484 ctx
->drv_ctx
[DECRYPT
] = NULL
;
2489 static int caam_cra_init(struct crypto_skcipher
*tfm
)
2491 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
2492 struct caam_skcipher_alg
*caam_alg
=
2493 container_of(alg
, typeof(*caam_alg
), skcipher
);
2494 struct caam_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
2495 u32 alg_aai
= caam_alg
->caam
.class1_alg_type
& OP_ALG_AAI_MASK
;
2498 if (alg_aai
== OP_ALG_AAI_XTS
) {
2499 const char *tfm_name
= crypto_tfm_alg_name(&tfm
->base
);
2500 struct crypto_skcipher
*fallback
;
2502 fallback
= crypto_alloc_skcipher(tfm_name
, 0,
2503 CRYPTO_ALG_NEED_FALLBACK
);
2504 if (IS_ERR(fallback
)) {
2505 pr_err("Failed to allocate %s fallback: %ld\n",
2506 tfm_name
, PTR_ERR(fallback
));
2507 return PTR_ERR(fallback
);
2510 ctx
->fallback
= fallback
;
2511 crypto_skcipher_set_reqsize(tfm
, sizeof(struct caam_skcipher_req_ctx
) +
2512 crypto_skcipher_reqsize(fallback
));
2515 ret
= caam_init_common(ctx
, &caam_alg
->caam
, false);
2516 if (ret
&& ctx
->fallback
)
2517 crypto_free_skcipher(ctx
->fallback
);
2522 static int caam_aead_init(struct crypto_aead
*tfm
)
2524 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
2525 struct caam_aead_alg
*caam_alg
= container_of(alg
, typeof(*caam_alg
),
2527 struct caam_ctx
*ctx
= crypto_aead_ctx(tfm
);
2529 return caam_init_common(ctx
, &caam_alg
->caam
, !caam_alg
->caam
.nodkp
);
2532 static void caam_exit_common(struct caam_ctx
*ctx
)
2534 caam_drv_ctx_rel(ctx
->drv_ctx
[ENCRYPT
]);
2535 caam_drv_ctx_rel(ctx
->drv_ctx
[DECRYPT
]);
2537 dma_unmap_single(ctx
->jrdev
->parent
, ctx
->key_dma
, sizeof(ctx
->key
),
2540 caam_jr_free(ctx
->jrdev
);
2543 static void caam_cra_exit(struct crypto_skcipher
*tfm
)
2545 struct caam_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
2548 crypto_free_skcipher(ctx
->fallback
);
2549 caam_exit_common(ctx
);
2552 static void caam_aead_exit(struct crypto_aead
*tfm
)
2554 caam_exit_common(crypto_aead_ctx(tfm
));
2557 void caam_qi_algapi_exit(void)
2561 for (i
= 0; i
< ARRAY_SIZE(driver_aeads
); i
++) {
2562 struct caam_aead_alg
*t_alg
= driver_aeads
+ i
;
2564 if (t_alg
->registered
)
2565 crypto_unregister_aead(&t_alg
->aead
);
2568 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
2569 struct caam_skcipher_alg
*t_alg
= driver_algs
+ i
;
2571 if (t_alg
->registered
)
2572 crypto_unregister_skcipher(&t_alg
->skcipher
);
2576 static void caam_skcipher_alg_init(struct caam_skcipher_alg
*t_alg
)
2578 struct skcipher_alg
*alg
= &t_alg
->skcipher
;
2580 alg
->base
.cra_module
= THIS_MODULE
;
2581 alg
->base
.cra_priority
= CAAM_CRA_PRIORITY
;
2582 alg
->base
.cra_ctxsize
= sizeof(struct caam_ctx
);
2583 alg
->base
.cra_flags
|= (CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
|
2584 CRYPTO_ALG_KERN_DRIVER_ONLY
);
2586 alg
->init
= caam_cra_init
;
2587 alg
->exit
= caam_cra_exit
;
2590 static void caam_aead_alg_init(struct caam_aead_alg
*t_alg
)
2592 struct aead_alg
*alg
= &t_alg
->aead
;
2594 alg
->base
.cra_module
= THIS_MODULE
;
2595 alg
->base
.cra_priority
= CAAM_CRA_PRIORITY
;
2596 alg
->base
.cra_ctxsize
= sizeof(struct caam_ctx
);
2597 alg
->base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
|
2598 CRYPTO_ALG_KERN_DRIVER_ONLY
;
2600 alg
->init
= caam_aead_init
;
2601 alg
->exit
= caam_aead_exit
;
2604 int caam_qi_algapi_init(struct device
*ctrldev
)
2606 struct caam_drv_private
*priv
= dev_get_drvdata(ctrldev
);
2608 u32 aes_vid
, aes_inst
, des_inst
, md_vid
, md_inst
;
2609 unsigned int md_limit
= SHA512_DIGEST_SIZE
;
2610 bool registered
= false;
2612 /* Make sure this runs only on (DPAA 1.x) QI */
2613 if (!priv
->qi_present
|| caam_dpaa2
)
2617 * Register crypto algorithms the device supports.
2618 * First, detect presence and attributes of DES, AES, and MD blocks.
2620 if (priv
->era
< 10) {
2621 u32 cha_vid
, cha_inst
;
2623 cha_vid
= rd_reg32(&priv
->ctrl
->perfmon
.cha_id_ls
);
2624 aes_vid
= cha_vid
& CHA_ID_LS_AES_MASK
;
2625 md_vid
= (cha_vid
& CHA_ID_LS_MD_MASK
) >> CHA_ID_LS_MD_SHIFT
;
2627 cha_inst
= rd_reg32(&priv
->ctrl
->perfmon
.cha_num_ls
);
2628 des_inst
= (cha_inst
& CHA_ID_LS_DES_MASK
) >>
2629 CHA_ID_LS_DES_SHIFT
;
2630 aes_inst
= cha_inst
& CHA_ID_LS_AES_MASK
;
2631 md_inst
= (cha_inst
& CHA_ID_LS_MD_MASK
) >> CHA_ID_LS_MD_SHIFT
;
2635 aesa
= rd_reg32(&priv
->ctrl
->vreg
.aesa
);
2636 mdha
= rd_reg32(&priv
->ctrl
->vreg
.mdha
);
2638 aes_vid
= (aesa
& CHA_VER_VID_MASK
) >> CHA_VER_VID_SHIFT
;
2639 md_vid
= (mdha
& CHA_VER_VID_MASK
) >> CHA_VER_VID_SHIFT
;
2641 des_inst
= rd_reg32(&priv
->ctrl
->vreg
.desa
) & CHA_VER_NUM_MASK
;
2642 aes_inst
= aesa
& CHA_VER_NUM_MASK
;
2643 md_inst
= mdha
& CHA_VER_NUM_MASK
;
2646 /* If MD is present, limit digest size based on LP256 */
2647 if (md_inst
&& md_vid
== CHA_VER_VID_MD_LP256
)
2648 md_limit
= SHA256_DIGEST_SIZE
;
2650 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
2651 struct caam_skcipher_alg
*t_alg
= driver_algs
+ i
;
2652 u32 alg_sel
= t_alg
->caam
.class1_alg_type
& OP_ALG_ALGSEL_MASK
;
2654 /* Skip DES algorithms if not supported by device */
2656 ((alg_sel
== OP_ALG_ALGSEL_3DES
) ||
2657 (alg_sel
== OP_ALG_ALGSEL_DES
)))
2660 /* Skip AES algorithms if not supported by device */
2661 if (!aes_inst
&& (alg_sel
== OP_ALG_ALGSEL_AES
))
2664 caam_skcipher_alg_init(t_alg
);
2666 err
= crypto_register_skcipher(&t_alg
->skcipher
);
2668 dev_warn(ctrldev
, "%s alg registration failed\n",
2669 t_alg
->skcipher
.base
.cra_driver_name
);
2673 t_alg
->registered
= true;
2677 for (i
= 0; i
< ARRAY_SIZE(driver_aeads
); i
++) {
2678 struct caam_aead_alg
*t_alg
= driver_aeads
+ i
;
2679 u32 c1_alg_sel
= t_alg
->caam
.class1_alg_type
&
2681 u32 c2_alg_sel
= t_alg
->caam
.class2_alg_type
&
2683 u32 alg_aai
= t_alg
->caam
.class1_alg_type
& OP_ALG_AAI_MASK
;
2685 /* Skip DES algorithms if not supported by device */
2687 ((c1_alg_sel
== OP_ALG_ALGSEL_3DES
) ||
2688 (c1_alg_sel
== OP_ALG_ALGSEL_DES
)))
2691 /* Skip AES algorithms if not supported by device */
2692 if (!aes_inst
&& (c1_alg_sel
== OP_ALG_ALGSEL_AES
))
2696 * Check support for AES algorithms not available
2699 if (aes_vid
== CHA_VER_VID_AES_LP
&& alg_aai
== OP_ALG_AAI_GCM
)
2703 * Skip algorithms requiring message digests
2704 * if MD or MD size is not supported by device.
2707 (!md_inst
|| (t_alg
->aead
.maxauthsize
> md_limit
)))
2710 caam_aead_alg_init(t_alg
);
2712 err
= crypto_register_aead(&t_alg
->aead
);
2714 pr_warn("%s alg registration failed\n",
2715 t_alg
->aead
.base
.cra_driver_name
);
2719 t_alg
->registered
= true;
2724 dev_info(ctrldev
, "algorithms registered in /proc/crypto\n");