1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
3 * Copyright 2015-2016 Freescale Semiconductor Inc.
4 * Copyright 2017-2018 NXP
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include <linux/fsl/mc.h>
19 #include <soc/fsl/dpaa2-io.h>
20 #include <soc/fsl/dpaa2-fd.h>
22 #define CAAM_CRA_PRIORITY 2000
24 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
25 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
26 SHA512_DIGEST_SIZE * 2)
29 * This is a a cache of buffers, from which the users of CAAM QI driver
30 * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
31 * NOTE: A more elegant solution would be to have some headroom in the frames
32 * being processed. This can be added by the dpaa2-eth driver. This would
33 * pose a problem for userspace application processing which cannot
34 * know of this limitation. So for now, this will work.
35 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
37 static struct kmem_cache
*qi_cache
;
39 struct caam_alg_entry
{
48 struct caam_aead_alg
{
50 struct caam_alg_entry caam
;
54 struct caam_skcipher_alg
{
55 struct skcipher_alg skcipher
;
56 struct caam_alg_entry caam
;
61 * caam_ctx - per-session context
62 * @flc: Flow Contexts array
63 * @key: [authentication key], encryption key
64 * @flc_dma: I/O virtual addresses of the Flow Contexts
65 * @key_dma: I/O virtual address of the key
66 * @dir: DMA direction for mapping key and Flow Contexts
68 * @adata: authentication algorithm details
69 * @cdata: encryption algorithm details
70 * @authsize: authentication tag (a.k.a. ICV / MAC) size
73 struct caam_flc flc
[NUM_OP
];
74 u8 key
[CAAM_MAX_KEY_SIZE
];
75 dma_addr_t flc_dma
[NUM_OP
];
77 enum dma_data_direction dir
;
81 unsigned int authsize
;
84 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv
*priv
,
87 phys_addr_t phys_addr
;
89 phys_addr
= priv
->domain
? iommu_iova_to_phys(priv
->domain
, iova_addr
) :
92 return phys_to_virt(phys_addr
);
96 * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
98 * Allocate data on the hotpath. Instead of using kzalloc, one can use the
99 * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
100 * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
101 * hosting 16 SG entries.
103 * @flags - flags that would be used for the equivalent kmalloc(..) call
105 * Returns a pointer to a retrieved buffer on success or NULL on failure.
107 static inline void *qi_cache_zalloc(gfp_t flags
)
109 return kmem_cache_zalloc(qi_cache
, flags
);
113 * qi_cache_free - Frees buffers allocated from CAAM-QI cache
115 * @obj - buffer previously allocated by qi_cache_zalloc
117 * No checking is being done, the call is a passthrough call to
118 * kmem_cache_free(...)
120 static inline void qi_cache_free(void *obj
)
122 kmem_cache_free(qi_cache
, obj
);
125 static struct caam_request
*to_caam_req(struct crypto_async_request
*areq
)
127 switch (crypto_tfm_alg_type(areq
->tfm
)) {
128 case CRYPTO_ALG_TYPE_SKCIPHER
:
129 return skcipher_request_ctx(skcipher_request_cast(areq
));
130 case CRYPTO_ALG_TYPE_AEAD
:
131 return aead_request_ctx(container_of(areq
, struct aead_request
,
133 case CRYPTO_ALG_TYPE_AHASH
:
134 return ahash_request_ctx(ahash_request_cast(areq
));
136 return ERR_PTR(-EINVAL
);
140 static void caam_unmap(struct device
*dev
, struct scatterlist
*src
,
141 struct scatterlist
*dst
, int src_nents
,
142 int dst_nents
, dma_addr_t iv_dma
, int ivsize
,
143 dma_addr_t qm_sg_dma
, int qm_sg_bytes
)
147 dma_unmap_sg(dev
, src
, src_nents
, DMA_TO_DEVICE
);
149 dma_unmap_sg(dev
, dst
, dst_nents
, DMA_FROM_DEVICE
);
151 dma_unmap_sg(dev
, src
, src_nents
, DMA_BIDIRECTIONAL
);
155 dma_unmap_single(dev
, iv_dma
, ivsize
, DMA_TO_DEVICE
);
158 dma_unmap_single(dev
, qm_sg_dma
, qm_sg_bytes
, DMA_TO_DEVICE
);
161 static int aead_set_sh_desc(struct crypto_aead
*aead
)
163 struct caam_aead_alg
*alg
= container_of(crypto_aead_alg(aead
),
165 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
166 unsigned int ivsize
= crypto_aead_ivsize(aead
);
167 struct device
*dev
= ctx
->dev
;
168 struct dpaa2_caam_priv
*priv
= dev_get_drvdata(dev
);
169 struct caam_flc
*flc
;
173 unsigned int data_len
[2];
175 const bool ctr_mode
= ((ctx
->cdata
.algtype
& OP_ALG_AAI_MASK
) ==
176 OP_ALG_AAI_CTR_MOD128
);
177 const bool is_rfc3686
= alg
->caam
.rfc3686
;
179 if (!ctx
->cdata
.keylen
|| !ctx
->authsize
)
183 * AES-CTR needs to load IV in CONTEXT1 reg
184 * at an offset of 128bits (16bytes)
185 * CONTEXT1[255:128] = IV
192 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
195 ctx1_iv_off
= 16 + CTR_RFC3686_NONCE_SIZE
;
196 nonce
= (u32
*)((void *)ctx
->key
+ ctx
->adata
.keylen_pad
+
197 ctx
->cdata
.keylen
- CTR_RFC3686_NONCE_SIZE
);
200 data_len
[0] = ctx
->adata
.keylen_pad
;
201 data_len
[1] = ctx
->cdata
.keylen
;
203 /* aead_encrypt shared descriptor */
204 if (desc_inline_query((alg
->caam
.geniv
? DESC_QI_AEAD_GIVENC_LEN
:
205 DESC_QI_AEAD_ENC_LEN
) +
206 (is_rfc3686
? DESC_AEAD_CTR_RFC3686_LEN
: 0),
207 DESC_JOB_IO_LEN
, data_len
, &inl_mask
,
208 ARRAY_SIZE(data_len
)) < 0)
212 ctx
->adata
.key_virt
= ctx
->key
;
214 ctx
->adata
.key_dma
= ctx
->key_dma
;
217 ctx
->cdata
.key_virt
= ctx
->key
+ ctx
->adata
.keylen_pad
;
219 ctx
->cdata
.key_dma
= ctx
->key_dma
+ ctx
->adata
.keylen_pad
;
221 ctx
->adata
.key_inline
= !!(inl_mask
& 1);
222 ctx
->cdata
.key_inline
= !!(inl_mask
& 2);
224 flc
= &ctx
->flc
[ENCRYPT
];
228 cnstr_shdsc_aead_givencap(desc
, &ctx
->cdata
, &ctx
->adata
,
229 ivsize
, ctx
->authsize
, is_rfc3686
,
230 nonce
, ctx1_iv_off
, true,
233 cnstr_shdsc_aead_encap(desc
, &ctx
->cdata
, &ctx
->adata
,
234 ivsize
, ctx
->authsize
, is_rfc3686
, nonce
,
235 ctx1_iv_off
, true, priv
->sec_attr
.era
);
237 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
238 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
239 sizeof(flc
->flc
) + desc_bytes(desc
),
242 /* aead_decrypt shared descriptor */
243 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN
+
244 (is_rfc3686
? DESC_AEAD_CTR_RFC3686_LEN
: 0),
245 DESC_JOB_IO_LEN
, data_len
, &inl_mask
,
246 ARRAY_SIZE(data_len
)) < 0)
250 ctx
->adata
.key_virt
= ctx
->key
;
252 ctx
->adata
.key_dma
= ctx
->key_dma
;
255 ctx
->cdata
.key_virt
= ctx
->key
+ ctx
->adata
.keylen_pad
;
257 ctx
->cdata
.key_dma
= ctx
->key_dma
+ ctx
->adata
.keylen_pad
;
259 ctx
->adata
.key_inline
= !!(inl_mask
& 1);
260 ctx
->cdata
.key_inline
= !!(inl_mask
& 2);
262 flc
= &ctx
->flc
[DECRYPT
];
264 cnstr_shdsc_aead_decap(desc
, &ctx
->cdata
, &ctx
->adata
,
265 ivsize
, ctx
->authsize
, alg
->caam
.geniv
,
266 is_rfc3686
, nonce
, ctx1_iv_off
, true,
268 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
269 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
270 sizeof(flc
->flc
) + desc_bytes(desc
),
276 static int aead_setauthsize(struct crypto_aead
*authenc
, unsigned int authsize
)
278 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
280 ctx
->authsize
= authsize
;
281 aead_set_sh_desc(authenc
);
286 static int aead_setkey(struct crypto_aead
*aead
, const u8
*key
,
289 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
290 struct device
*dev
= ctx
->dev
;
291 struct crypto_authenc_keys keys
;
293 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0)
296 dev_dbg(dev
, "keylen %d enckeylen %d authkeylen %d\n",
297 keys
.authkeylen
+ keys
.enckeylen
, keys
.enckeylen
,
299 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
300 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
302 ctx
->adata
.keylen
= keys
.authkeylen
;
303 ctx
->adata
.keylen_pad
= split_key_len(ctx
->adata
.algtype
&
306 if (ctx
->adata
.keylen_pad
+ keys
.enckeylen
> CAAM_MAX_KEY_SIZE
)
309 memcpy(ctx
->key
, keys
.authkey
, keys
.authkeylen
);
310 memcpy(ctx
->key
+ ctx
->adata
.keylen_pad
, keys
.enckey
, keys
.enckeylen
);
311 dma_sync_single_for_device(dev
, ctx
->key_dma
, ctx
->adata
.keylen_pad
+
312 keys
.enckeylen
, ctx
->dir
);
313 print_hex_dump_debug("ctx.key@" __stringify(__LINE__
)": ",
314 DUMP_PREFIX_ADDRESS
, 16, 4, ctx
->key
,
315 ctx
->adata
.keylen_pad
+ keys
.enckeylen
, 1);
317 ctx
->cdata
.keylen
= keys
.enckeylen
;
319 memzero_explicit(&keys
, sizeof(keys
));
320 return aead_set_sh_desc(aead
);
322 crypto_aead_set_flags(aead
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
323 memzero_explicit(&keys
, sizeof(keys
));
327 static int des3_aead_setkey(struct crypto_aead
*aead
, const u8
*key
,
330 struct crypto_authenc_keys keys
;
334 err
= crypto_authenc_extractkeys(&keys
, key
, keylen
);
339 if (keys
.enckeylen
!= DES3_EDE_KEY_SIZE
)
342 flags
= crypto_aead_get_flags(aead
);
343 err
= __des3_verify_key(&flags
, keys
.enckey
);
345 crypto_aead_set_flags(aead
, flags
);
349 err
= aead_setkey(aead
, key
, keylen
);
352 memzero_explicit(&keys
, sizeof(keys
));
356 crypto_aead_set_flags(aead
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
360 static struct aead_edesc
*aead_edesc_alloc(struct aead_request
*req
,
363 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
364 struct caam_request
*req_ctx
= aead_request_ctx(req
);
365 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
366 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
367 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
368 struct caam_aead_alg
*alg
= container_of(crypto_aead_alg(aead
),
370 struct device
*dev
= ctx
->dev
;
371 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
372 GFP_KERNEL
: GFP_ATOMIC
;
373 int src_nents
, mapped_src_nents
, dst_nents
= 0, mapped_dst_nents
= 0;
374 struct aead_edesc
*edesc
;
375 dma_addr_t qm_sg_dma
, iv_dma
= 0;
377 unsigned int authsize
= ctx
->authsize
;
378 int qm_sg_index
= 0, qm_sg_nents
= 0, qm_sg_bytes
;
380 struct dpaa2_sg_entry
*sg_table
;
382 /* allocate space for base edesc, link tables and IV */
383 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
384 if (unlikely(!edesc
)) {
385 dev_err(dev
, "could not allocate extended descriptor\n");
386 return ERR_PTR(-ENOMEM
);
389 if (unlikely(req
->dst
!= req
->src
)) {
390 src_nents
= sg_nents_for_len(req
->src
, req
->assoclen
+
392 if (unlikely(src_nents
< 0)) {
393 dev_err(dev
, "Insufficient bytes (%d) in src S/G\n",
394 req
->assoclen
+ req
->cryptlen
);
395 qi_cache_free(edesc
);
396 return ERR_PTR(src_nents
);
399 dst_nents
= sg_nents_for_len(req
->dst
, req
->assoclen
+
401 (encrypt
? authsize
:
403 if (unlikely(dst_nents
< 0)) {
404 dev_err(dev
, "Insufficient bytes (%d) in dst S/G\n",
405 req
->assoclen
+ req
->cryptlen
+
406 (encrypt
? authsize
: (-authsize
)));
407 qi_cache_free(edesc
);
408 return ERR_PTR(dst_nents
);
412 mapped_src_nents
= dma_map_sg(dev
, req
->src
, src_nents
,
414 if (unlikely(!mapped_src_nents
)) {
415 dev_err(dev
, "unable to map source\n");
416 qi_cache_free(edesc
);
417 return ERR_PTR(-ENOMEM
);
420 mapped_src_nents
= 0;
424 mapped_dst_nents
= dma_map_sg(dev
, req
->dst
, dst_nents
,
426 if (unlikely(!mapped_dst_nents
)) {
427 dev_err(dev
, "unable to map destination\n");
428 dma_unmap_sg(dev
, req
->src
, src_nents
,
430 qi_cache_free(edesc
);
431 return ERR_PTR(-ENOMEM
);
434 mapped_dst_nents
= 0;
437 src_nents
= sg_nents_for_len(req
->src
, req
->assoclen
+
439 (encrypt
? authsize
: 0));
440 if (unlikely(src_nents
< 0)) {
441 dev_err(dev
, "Insufficient bytes (%d) in src S/G\n",
442 req
->assoclen
+ req
->cryptlen
+
443 (encrypt
? authsize
: 0));
444 qi_cache_free(edesc
);
445 return ERR_PTR(src_nents
);
448 mapped_src_nents
= dma_map_sg(dev
, req
->src
, src_nents
,
450 if (unlikely(!mapped_src_nents
)) {
451 dev_err(dev
, "unable to map source\n");
452 qi_cache_free(edesc
);
453 return ERR_PTR(-ENOMEM
);
457 if ((alg
->caam
.rfc3686
&& encrypt
) || !alg
->caam
.geniv
)
458 ivsize
= crypto_aead_ivsize(aead
);
461 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
462 * Input is not contiguous.
464 qm_sg_nents
= 1 + !!ivsize
+ mapped_src_nents
+
465 (mapped_dst_nents
> 1 ? mapped_dst_nents
: 0);
466 sg_table
= &edesc
->sgt
[0];
467 qm_sg_bytes
= qm_sg_nents
* sizeof(*sg_table
);
468 if (unlikely(offsetof(struct aead_edesc
, sgt
) + qm_sg_bytes
+ ivsize
>
469 CAAM_QI_MEMCACHE_SIZE
)) {
470 dev_err(dev
, "No space for %d S/G entries and/or %dB IV\n",
471 qm_sg_nents
, ivsize
);
472 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
, 0,
474 qi_cache_free(edesc
);
475 return ERR_PTR(-ENOMEM
);
479 u8
*iv
= (u8
*)(sg_table
+ qm_sg_nents
);
481 /* Make sure IV is located in a DMAable area */
482 memcpy(iv
, req
->iv
, ivsize
);
484 iv_dma
= dma_map_single(dev
, iv
, ivsize
, DMA_TO_DEVICE
);
485 if (dma_mapping_error(dev
, iv_dma
)) {
486 dev_err(dev
, "unable to map IV\n");
487 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
,
488 dst_nents
, 0, 0, 0, 0);
489 qi_cache_free(edesc
);
490 return ERR_PTR(-ENOMEM
);
494 edesc
->src_nents
= src_nents
;
495 edesc
->dst_nents
= dst_nents
;
496 edesc
->iv_dma
= iv_dma
;
498 if ((alg
->caam
.class1_alg_type
& OP_ALG_ALGSEL_MASK
) ==
499 OP_ALG_ALGSEL_CHACHA20
&& ivsize
!= CHACHAPOLY_IV_SIZE
)
501 * The associated data comes already with the IV but we need
502 * to skip it when we authenticate or encrypt...
504 edesc
->assoclen
= cpu_to_caam32(req
->assoclen
- ivsize
);
506 edesc
->assoclen
= cpu_to_caam32(req
->assoclen
);
507 edesc
->assoclen_dma
= dma_map_single(dev
, &edesc
->assoclen
, 4,
509 if (dma_mapping_error(dev
, edesc
->assoclen_dma
)) {
510 dev_err(dev
, "unable to map assoclen\n");
511 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
,
512 iv_dma
, ivsize
, 0, 0);
513 qi_cache_free(edesc
);
514 return ERR_PTR(-ENOMEM
);
517 dma_to_qm_sg_one(sg_table
, edesc
->assoclen_dma
, 4, 0);
520 dma_to_qm_sg_one(sg_table
+ qm_sg_index
, iv_dma
, ivsize
, 0);
523 sg_to_qm_sg_last(req
->src
, mapped_src_nents
, sg_table
+ qm_sg_index
, 0);
524 qm_sg_index
+= mapped_src_nents
;
526 if (mapped_dst_nents
> 1)
527 sg_to_qm_sg_last(req
->dst
, mapped_dst_nents
, sg_table
+
530 qm_sg_dma
= dma_map_single(dev
, sg_table
, qm_sg_bytes
, DMA_TO_DEVICE
);
531 if (dma_mapping_error(dev
, qm_sg_dma
)) {
532 dev_err(dev
, "unable to map S/G table\n");
533 dma_unmap_single(dev
, edesc
->assoclen_dma
, 4, DMA_TO_DEVICE
);
534 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
,
535 iv_dma
, ivsize
, 0, 0);
536 qi_cache_free(edesc
);
537 return ERR_PTR(-ENOMEM
);
540 edesc
->qm_sg_dma
= qm_sg_dma
;
541 edesc
->qm_sg_bytes
= qm_sg_bytes
;
543 out_len
= req
->assoclen
+ req
->cryptlen
+
544 (encrypt
? ctx
->authsize
: (-ctx
->authsize
));
545 in_len
= 4 + ivsize
+ req
->assoclen
+ req
->cryptlen
;
547 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
548 dpaa2_fl_set_final(in_fle
, true);
549 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
550 dpaa2_fl_set_addr(in_fle
, qm_sg_dma
);
551 dpaa2_fl_set_len(in_fle
, in_len
);
553 if (req
->dst
== req
->src
) {
554 if (mapped_src_nents
== 1) {
555 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
556 dpaa2_fl_set_addr(out_fle
, sg_dma_address(req
->src
));
558 dpaa2_fl_set_format(out_fle
, dpaa2_fl_sg
);
559 dpaa2_fl_set_addr(out_fle
, qm_sg_dma
+
560 (1 + !!ivsize
) * sizeof(*sg_table
));
562 } else if (mapped_dst_nents
== 1) {
563 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
564 dpaa2_fl_set_addr(out_fle
, sg_dma_address(req
->dst
));
566 dpaa2_fl_set_format(out_fle
, dpaa2_fl_sg
);
567 dpaa2_fl_set_addr(out_fle
, qm_sg_dma
+ qm_sg_index
*
571 dpaa2_fl_set_len(out_fle
, out_len
);
576 static int chachapoly_set_sh_desc(struct crypto_aead
*aead
)
578 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
579 unsigned int ivsize
= crypto_aead_ivsize(aead
);
580 struct device
*dev
= ctx
->dev
;
581 struct caam_flc
*flc
;
584 if (!ctx
->cdata
.keylen
|| !ctx
->authsize
)
587 flc
= &ctx
->flc
[ENCRYPT
];
589 cnstr_shdsc_chachapoly(desc
, &ctx
->cdata
, &ctx
->adata
, ivsize
,
590 ctx
->authsize
, true, true);
591 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
592 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
593 sizeof(flc
->flc
) + desc_bytes(desc
),
596 flc
= &ctx
->flc
[DECRYPT
];
598 cnstr_shdsc_chachapoly(desc
, &ctx
->cdata
, &ctx
->adata
, ivsize
,
599 ctx
->authsize
, false, true);
600 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
601 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
602 sizeof(flc
->flc
) + desc_bytes(desc
),
608 static int chachapoly_setauthsize(struct crypto_aead
*aead
,
609 unsigned int authsize
)
611 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
613 if (authsize
!= POLY1305_DIGEST_SIZE
)
616 ctx
->authsize
= authsize
;
617 return chachapoly_set_sh_desc(aead
);
620 static int chachapoly_setkey(struct crypto_aead
*aead
, const u8
*key
,
623 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
624 unsigned int ivsize
= crypto_aead_ivsize(aead
);
625 unsigned int saltlen
= CHACHAPOLY_IV_SIZE
- ivsize
;
627 if (keylen
!= CHACHA_KEY_SIZE
+ saltlen
) {
628 crypto_aead_set_flags(aead
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
632 ctx
->cdata
.key_virt
= key
;
633 ctx
->cdata
.keylen
= keylen
- saltlen
;
635 return chachapoly_set_sh_desc(aead
);
638 static int gcm_set_sh_desc(struct crypto_aead
*aead
)
640 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
641 struct device
*dev
= ctx
->dev
;
642 unsigned int ivsize
= crypto_aead_ivsize(aead
);
643 struct caam_flc
*flc
;
645 int rem_bytes
= CAAM_DESC_BYTES_MAX
- DESC_JOB_IO_LEN
-
648 if (!ctx
->cdata
.keylen
|| !ctx
->authsize
)
652 * AES GCM encrypt shared descriptor
653 * Job Descriptor and Shared Descriptor
654 * must fit into the 64-word Descriptor h/w Buffer
656 if (rem_bytes
>= DESC_QI_GCM_ENC_LEN
) {
657 ctx
->cdata
.key_inline
= true;
658 ctx
->cdata
.key_virt
= ctx
->key
;
660 ctx
->cdata
.key_inline
= false;
661 ctx
->cdata
.key_dma
= ctx
->key_dma
;
664 flc
= &ctx
->flc
[ENCRYPT
];
666 cnstr_shdsc_gcm_encap(desc
, &ctx
->cdata
, ivsize
, ctx
->authsize
, true);
667 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
668 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
669 sizeof(flc
->flc
) + desc_bytes(desc
),
673 * Job Descriptor and Shared Descriptors
674 * must all fit into the 64-word Descriptor h/w Buffer
676 if (rem_bytes
>= DESC_QI_GCM_DEC_LEN
) {
677 ctx
->cdata
.key_inline
= true;
678 ctx
->cdata
.key_virt
= ctx
->key
;
680 ctx
->cdata
.key_inline
= false;
681 ctx
->cdata
.key_dma
= ctx
->key_dma
;
684 flc
= &ctx
->flc
[DECRYPT
];
686 cnstr_shdsc_gcm_decap(desc
, &ctx
->cdata
, ivsize
, ctx
->authsize
, true);
687 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
688 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
689 sizeof(flc
->flc
) + desc_bytes(desc
),
695 static int gcm_setauthsize(struct crypto_aead
*authenc
, unsigned int authsize
)
697 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
699 ctx
->authsize
= authsize
;
700 gcm_set_sh_desc(authenc
);
705 static int gcm_setkey(struct crypto_aead
*aead
,
706 const u8
*key
, unsigned int keylen
)
708 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
709 struct device
*dev
= ctx
->dev
;
711 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
712 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
714 memcpy(ctx
->key
, key
, keylen
);
715 dma_sync_single_for_device(dev
, ctx
->key_dma
, keylen
, ctx
->dir
);
716 ctx
->cdata
.keylen
= keylen
;
718 return gcm_set_sh_desc(aead
);
721 static int rfc4106_set_sh_desc(struct crypto_aead
*aead
)
723 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
724 struct device
*dev
= ctx
->dev
;
725 unsigned int ivsize
= crypto_aead_ivsize(aead
);
726 struct caam_flc
*flc
;
728 int rem_bytes
= CAAM_DESC_BYTES_MAX
- DESC_JOB_IO_LEN
-
731 if (!ctx
->cdata
.keylen
|| !ctx
->authsize
)
734 ctx
->cdata
.key_virt
= ctx
->key
;
737 * RFC4106 encrypt shared descriptor
738 * Job Descriptor and Shared Descriptor
739 * must fit into the 64-word Descriptor h/w Buffer
741 if (rem_bytes
>= DESC_QI_RFC4106_ENC_LEN
) {
742 ctx
->cdata
.key_inline
= true;
744 ctx
->cdata
.key_inline
= false;
745 ctx
->cdata
.key_dma
= ctx
->key_dma
;
748 flc
= &ctx
->flc
[ENCRYPT
];
750 cnstr_shdsc_rfc4106_encap(desc
, &ctx
->cdata
, ivsize
, ctx
->authsize
,
752 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
753 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
754 sizeof(flc
->flc
) + desc_bytes(desc
),
758 * Job Descriptor and Shared Descriptors
759 * must all fit into the 64-word Descriptor h/w Buffer
761 if (rem_bytes
>= DESC_QI_RFC4106_DEC_LEN
) {
762 ctx
->cdata
.key_inline
= true;
764 ctx
->cdata
.key_inline
= false;
765 ctx
->cdata
.key_dma
= ctx
->key_dma
;
768 flc
= &ctx
->flc
[DECRYPT
];
770 cnstr_shdsc_rfc4106_decap(desc
, &ctx
->cdata
, ivsize
, ctx
->authsize
,
772 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
773 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
774 sizeof(flc
->flc
) + desc_bytes(desc
),
780 static int rfc4106_setauthsize(struct crypto_aead
*authenc
,
781 unsigned int authsize
)
783 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
785 ctx
->authsize
= authsize
;
786 rfc4106_set_sh_desc(authenc
);
791 static int rfc4106_setkey(struct crypto_aead
*aead
,
792 const u8
*key
, unsigned int keylen
)
794 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
795 struct device
*dev
= ctx
->dev
;
800 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
801 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
803 memcpy(ctx
->key
, key
, keylen
);
805 * The last four bytes of the key material are used as the salt value
806 * in the nonce. Update the AES key length.
808 ctx
->cdata
.keylen
= keylen
- 4;
809 dma_sync_single_for_device(dev
, ctx
->key_dma
, ctx
->cdata
.keylen
,
812 return rfc4106_set_sh_desc(aead
);
815 static int rfc4543_set_sh_desc(struct crypto_aead
*aead
)
817 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
818 struct device
*dev
= ctx
->dev
;
819 unsigned int ivsize
= crypto_aead_ivsize(aead
);
820 struct caam_flc
*flc
;
822 int rem_bytes
= CAAM_DESC_BYTES_MAX
- DESC_JOB_IO_LEN
-
825 if (!ctx
->cdata
.keylen
|| !ctx
->authsize
)
828 ctx
->cdata
.key_virt
= ctx
->key
;
831 * RFC4543 encrypt shared descriptor
832 * Job Descriptor and Shared Descriptor
833 * must fit into the 64-word Descriptor h/w Buffer
835 if (rem_bytes
>= DESC_QI_RFC4543_ENC_LEN
) {
836 ctx
->cdata
.key_inline
= true;
838 ctx
->cdata
.key_inline
= false;
839 ctx
->cdata
.key_dma
= ctx
->key_dma
;
842 flc
= &ctx
->flc
[ENCRYPT
];
844 cnstr_shdsc_rfc4543_encap(desc
, &ctx
->cdata
, ivsize
, ctx
->authsize
,
846 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
847 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
848 sizeof(flc
->flc
) + desc_bytes(desc
),
852 * Job Descriptor and Shared Descriptors
853 * must all fit into the 64-word Descriptor h/w Buffer
855 if (rem_bytes
>= DESC_QI_RFC4543_DEC_LEN
) {
856 ctx
->cdata
.key_inline
= true;
858 ctx
->cdata
.key_inline
= false;
859 ctx
->cdata
.key_dma
= ctx
->key_dma
;
862 flc
= &ctx
->flc
[DECRYPT
];
864 cnstr_shdsc_rfc4543_decap(desc
, &ctx
->cdata
, ivsize
, ctx
->authsize
,
866 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
867 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
868 sizeof(flc
->flc
) + desc_bytes(desc
),
874 static int rfc4543_setauthsize(struct crypto_aead
*authenc
,
875 unsigned int authsize
)
877 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
879 ctx
->authsize
= authsize
;
880 rfc4543_set_sh_desc(authenc
);
885 static int rfc4543_setkey(struct crypto_aead
*aead
,
886 const u8
*key
, unsigned int keylen
)
888 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
889 struct device
*dev
= ctx
->dev
;
894 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
895 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
897 memcpy(ctx
->key
, key
, keylen
);
899 * The last four bytes of the key material are used as the salt value
900 * in the nonce. Update the AES key length.
902 ctx
->cdata
.keylen
= keylen
- 4;
903 dma_sync_single_for_device(dev
, ctx
->key_dma
, ctx
->cdata
.keylen
,
906 return rfc4543_set_sh_desc(aead
);
909 static int skcipher_setkey(struct crypto_skcipher
*skcipher
, const u8
*key
,
912 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
913 struct caam_skcipher_alg
*alg
=
914 container_of(crypto_skcipher_alg(skcipher
),
915 struct caam_skcipher_alg
, skcipher
);
916 struct device
*dev
= ctx
->dev
;
917 struct caam_flc
*flc
;
918 unsigned int ivsize
= crypto_skcipher_ivsize(skcipher
);
921 const bool ctr_mode
= ((ctx
->cdata
.algtype
& OP_ALG_AAI_MASK
) ==
922 OP_ALG_AAI_CTR_MOD128
) &&
923 ((ctx
->cdata
.algtype
& OP_ALG_ALGSEL_MASK
) !=
924 OP_ALG_ALGSEL_CHACHA20
);
925 const bool is_rfc3686
= alg
->caam
.rfc3686
;
927 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
928 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
931 * AES-CTR needs to load IV in CONTEXT1 reg
932 * at an offset of 128bits (16bytes)
933 * CONTEXT1[255:128] = IV
940 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
941 * | *key = {KEY, NONCE}
944 ctx1_iv_off
= 16 + CTR_RFC3686_NONCE_SIZE
;
945 keylen
-= CTR_RFC3686_NONCE_SIZE
;
948 ctx
->cdata
.keylen
= keylen
;
949 ctx
->cdata
.key_virt
= key
;
950 ctx
->cdata
.key_inline
= true;
952 /* skcipher_encrypt shared descriptor */
953 flc
= &ctx
->flc
[ENCRYPT
];
955 cnstr_shdsc_skcipher_encap(desc
, &ctx
->cdata
, ivsize
, is_rfc3686
,
957 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
958 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
959 sizeof(flc
->flc
) + desc_bytes(desc
),
962 /* skcipher_decrypt shared descriptor */
963 flc
= &ctx
->flc
[DECRYPT
];
965 cnstr_shdsc_skcipher_decap(desc
, &ctx
->cdata
, ivsize
, is_rfc3686
,
967 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
968 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
969 sizeof(flc
->flc
) + desc_bytes(desc
),
975 static int des3_skcipher_setkey(struct crypto_skcipher
*skcipher
,
976 const u8
*key
, unsigned int keylen
)
978 return unlikely(des3_verify_key(skcipher
, key
)) ?:
979 skcipher_setkey(skcipher
, key
, keylen
);
982 static int xts_skcipher_setkey(struct crypto_skcipher
*skcipher
, const u8
*key
,
985 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
986 struct device
*dev
= ctx
->dev
;
987 struct caam_flc
*flc
;
990 if (keylen
!= 2 * AES_MIN_KEY_SIZE
&& keylen
!= 2 * AES_MAX_KEY_SIZE
) {
991 dev_err(dev
, "key size mismatch\n");
992 crypto_skcipher_set_flags(skcipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
996 ctx
->cdata
.keylen
= keylen
;
997 ctx
->cdata
.key_virt
= key
;
998 ctx
->cdata
.key_inline
= true;
1000 /* xts_skcipher_encrypt shared descriptor */
1001 flc
= &ctx
->flc
[ENCRYPT
];
1002 desc
= flc
->sh_desc
;
1003 cnstr_shdsc_xts_skcipher_encap(desc
, &ctx
->cdata
);
1004 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
1005 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
1006 sizeof(flc
->flc
) + desc_bytes(desc
),
1009 /* xts_skcipher_decrypt shared descriptor */
1010 flc
= &ctx
->flc
[DECRYPT
];
1011 desc
= flc
->sh_desc
;
1012 cnstr_shdsc_xts_skcipher_decap(desc
, &ctx
->cdata
);
1013 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
1014 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
1015 sizeof(flc
->flc
) + desc_bytes(desc
),
1021 static struct skcipher_edesc
*skcipher_edesc_alloc(struct skcipher_request
*req
)
1023 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1024 struct caam_request
*req_ctx
= skcipher_request_ctx(req
);
1025 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
1026 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
1027 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
1028 struct device
*dev
= ctx
->dev
;
1029 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
1030 GFP_KERNEL
: GFP_ATOMIC
;
1031 int src_nents
, mapped_src_nents
, dst_nents
= 0, mapped_dst_nents
= 0;
1032 struct skcipher_edesc
*edesc
;
1035 int ivsize
= crypto_skcipher_ivsize(skcipher
);
1036 int dst_sg_idx
, qm_sg_ents
, qm_sg_bytes
;
1037 struct dpaa2_sg_entry
*sg_table
;
1039 src_nents
= sg_nents_for_len(req
->src
, req
->cryptlen
);
1040 if (unlikely(src_nents
< 0)) {
1041 dev_err(dev
, "Insufficient bytes (%d) in src S/G\n",
1043 return ERR_PTR(src_nents
);
1046 if (unlikely(req
->dst
!= req
->src
)) {
1047 dst_nents
= sg_nents_for_len(req
->dst
, req
->cryptlen
);
1048 if (unlikely(dst_nents
< 0)) {
1049 dev_err(dev
, "Insufficient bytes (%d) in dst S/G\n",
1051 return ERR_PTR(dst_nents
);
1054 mapped_src_nents
= dma_map_sg(dev
, req
->src
, src_nents
,
1056 if (unlikely(!mapped_src_nents
)) {
1057 dev_err(dev
, "unable to map source\n");
1058 return ERR_PTR(-ENOMEM
);
1061 mapped_dst_nents
= dma_map_sg(dev
, req
->dst
, dst_nents
,
1063 if (unlikely(!mapped_dst_nents
)) {
1064 dev_err(dev
, "unable to map destination\n");
1065 dma_unmap_sg(dev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1066 return ERR_PTR(-ENOMEM
);
1069 mapped_src_nents
= dma_map_sg(dev
, req
->src
, src_nents
,
1071 if (unlikely(!mapped_src_nents
)) {
1072 dev_err(dev
, "unable to map source\n");
1073 return ERR_PTR(-ENOMEM
);
1077 qm_sg_ents
= 1 + mapped_src_nents
;
1078 dst_sg_idx
= qm_sg_ents
;
1080 qm_sg_ents
+= mapped_dst_nents
> 1 ? mapped_dst_nents
: 0;
1081 qm_sg_bytes
= qm_sg_ents
* sizeof(struct dpaa2_sg_entry
);
1082 if (unlikely(offsetof(struct skcipher_edesc
, sgt
) + qm_sg_bytes
+
1083 ivsize
> CAAM_QI_MEMCACHE_SIZE
)) {
1084 dev_err(dev
, "No space for %d S/G entries and/or %dB IV\n",
1085 qm_sg_ents
, ivsize
);
1086 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
, 0,
1088 return ERR_PTR(-ENOMEM
);
1091 /* allocate space for base edesc, link tables and IV */
1092 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
1093 if (unlikely(!edesc
)) {
1094 dev_err(dev
, "could not allocate extended descriptor\n");
1095 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
, 0,
1097 return ERR_PTR(-ENOMEM
);
1100 /* Make sure IV is located in a DMAable area */
1101 sg_table
= &edesc
->sgt
[0];
1102 iv
= (u8
*)(sg_table
+ qm_sg_ents
);
1103 memcpy(iv
, req
->iv
, ivsize
);
1105 iv_dma
= dma_map_single(dev
, iv
, ivsize
, DMA_TO_DEVICE
);
1106 if (dma_mapping_error(dev
, iv_dma
)) {
1107 dev_err(dev
, "unable to map IV\n");
1108 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
, 0,
1110 qi_cache_free(edesc
);
1111 return ERR_PTR(-ENOMEM
);
1114 edesc
->src_nents
= src_nents
;
1115 edesc
->dst_nents
= dst_nents
;
1116 edesc
->iv_dma
= iv_dma
;
1117 edesc
->qm_sg_bytes
= qm_sg_bytes
;
1119 dma_to_qm_sg_one(sg_table
, iv_dma
, ivsize
, 0);
1120 sg_to_qm_sg_last(req
->src
, mapped_src_nents
, sg_table
+ 1, 0);
1122 if (mapped_dst_nents
> 1)
1123 sg_to_qm_sg_last(req
->dst
, mapped_dst_nents
, sg_table
+
1126 edesc
->qm_sg_dma
= dma_map_single(dev
, sg_table
, edesc
->qm_sg_bytes
,
1128 if (dma_mapping_error(dev
, edesc
->qm_sg_dma
)) {
1129 dev_err(dev
, "unable to map S/G table\n");
1130 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
,
1131 iv_dma
, ivsize
, 0, 0);
1132 qi_cache_free(edesc
);
1133 return ERR_PTR(-ENOMEM
);
1136 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
1137 dpaa2_fl_set_final(in_fle
, true);
1138 dpaa2_fl_set_len(in_fle
, req
->cryptlen
+ ivsize
);
1139 dpaa2_fl_set_len(out_fle
, req
->cryptlen
);
1141 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
1142 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
1144 if (req
->src
== req
->dst
) {
1145 dpaa2_fl_set_format(out_fle
, dpaa2_fl_sg
);
1146 dpaa2_fl_set_addr(out_fle
, edesc
->qm_sg_dma
+
1148 } else if (mapped_dst_nents
> 1) {
1149 dpaa2_fl_set_format(out_fle
, dpaa2_fl_sg
);
1150 dpaa2_fl_set_addr(out_fle
, edesc
->qm_sg_dma
+ dst_sg_idx
*
1153 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
1154 dpaa2_fl_set_addr(out_fle
, sg_dma_address(req
->dst
));
1160 static void aead_unmap(struct device
*dev
, struct aead_edesc
*edesc
,
1161 struct aead_request
*req
)
1163 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1164 int ivsize
= crypto_aead_ivsize(aead
);
1166 caam_unmap(dev
, req
->src
, req
->dst
, edesc
->src_nents
, edesc
->dst_nents
,
1167 edesc
->iv_dma
, ivsize
, edesc
->qm_sg_dma
, edesc
->qm_sg_bytes
);
1168 dma_unmap_single(dev
, edesc
->assoclen_dma
, 4, DMA_TO_DEVICE
);
1171 static void skcipher_unmap(struct device
*dev
, struct skcipher_edesc
*edesc
,
1172 struct skcipher_request
*req
)
1174 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1175 int ivsize
= crypto_skcipher_ivsize(skcipher
);
1177 caam_unmap(dev
, req
->src
, req
->dst
, edesc
->src_nents
, edesc
->dst_nents
,
1178 edesc
->iv_dma
, ivsize
, edesc
->qm_sg_dma
, edesc
->qm_sg_bytes
);
1181 static void aead_encrypt_done(void *cbk_ctx
, u32 status
)
1183 struct crypto_async_request
*areq
= cbk_ctx
;
1184 struct aead_request
*req
= container_of(areq
, struct aead_request
,
1186 struct caam_request
*req_ctx
= to_caam_req(areq
);
1187 struct aead_edesc
*edesc
= req_ctx
->edesc
;
1188 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1189 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1192 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
1194 if (unlikely(status
)) {
1195 caam_qi2_strstatus(ctx
->dev
, status
);
1199 aead_unmap(ctx
->dev
, edesc
, req
);
1200 qi_cache_free(edesc
);
1201 aead_request_complete(req
, ecode
);
1204 static void aead_decrypt_done(void *cbk_ctx
, u32 status
)
1206 struct crypto_async_request
*areq
= cbk_ctx
;
1207 struct aead_request
*req
= container_of(areq
, struct aead_request
,
1209 struct caam_request
*req_ctx
= to_caam_req(areq
);
1210 struct aead_edesc
*edesc
= req_ctx
->edesc
;
1211 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1212 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1215 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
1217 if (unlikely(status
)) {
1218 caam_qi2_strstatus(ctx
->dev
, status
);
1220 * verify hw auth check passed else return -EBADMSG
1222 if ((status
& JRSTA_CCBERR_ERRID_MASK
) ==
1223 JRSTA_CCBERR_ERRID_ICVCHK
)
1229 aead_unmap(ctx
->dev
, edesc
, req
);
1230 qi_cache_free(edesc
);
1231 aead_request_complete(req
, ecode
);
1234 static int aead_encrypt(struct aead_request
*req
)
1236 struct aead_edesc
*edesc
;
1237 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1238 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1239 struct caam_request
*caam_req
= aead_request_ctx(req
);
1242 /* allocate extended descriptor */
1243 edesc
= aead_edesc_alloc(req
, true);
1245 return PTR_ERR(edesc
);
1247 caam_req
->flc
= &ctx
->flc
[ENCRYPT
];
1248 caam_req
->flc_dma
= ctx
->flc_dma
[ENCRYPT
];
1249 caam_req
->cbk
= aead_encrypt_done
;
1250 caam_req
->ctx
= &req
->base
;
1251 caam_req
->edesc
= edesc
;
1252 ret
= dpaa2_caam_enqueue(ctx
->dev
, caam_req
);
1253 if (ret
!= -EINPROGRESS
&&
1254 !(ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1255 aead_unmap(ctx
->dev
, edesc
, req
);
1256 qi_cache_free(edesc
);
1262 static int aead_decrypt(struct aead_request
*req
)
1264 struct aead_edesc
*edesc
;
1265 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1266 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1267 struct caam_request
*caam_req
= aead_request_ctx(req
);
1270 /* allocate extended descriptor */
1271 edesc
= aead_edesc_alloc(req
, false);
1273 return PTR_ERR(edesc
);
1275 caam_req
->flc
= &ctx
->flc
[DECRYPT
];
1276 caam_req
->flc_dma
= ctx
->flc_dma
[DECRYPT
];
1277 caam_req
->cbk
= aead_decrypt_done
;
1278 caam_req
->ctx
= &req
->base
;
1279 caam_req
->edesc
= edesc
;
1280 ret
= dpaa2_caam_enqueue(ctx
->dev
, caam_req
);
1281 if (ret
!= -EINPROGRESS
&&
1282 !(ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1283 aead_unmap(ctx
->dev
, edesc
, req
);
1284 qi_cache_free(edesc
);
1290 static int ipsec_gcm_encrypt(struct aead_request
*req
)
1292 if (req
->assoclen
< 8)
1295 return aead_encrypt(req
);
1298 static int ipsec_gcm_decrypt(struct aead_request
*req
)
1300 if (req
->assoclen
< 8)
1303 return aead_decrypt(req
);
1306 static void skcipher_encrypt_done(void *cbk_ctx
, u32 status
)
1308 struct crypto_async_request
*areq
= cbk_ctx
;
1309 struct skcipher_request
*req
= skcipher_request_cast(areq
);
1310 struct caam_request
*req_ctx
= to_caam_req(areq
);
1311 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1312 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
1313 struct skcipher_edesc
*edesc
= req_ctx
->edesc
;
1315 int ivsize
= crypto_skcipher_ivsize(skcipher
);
1317 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
1319 if (unlikely(status
)) {
1320 caam_qi2_strstatus(ctx
->dev
, status
);
1324 print_hex_dump_debug("dstiv @" __stringify(__LINE__
)": ",
1325 DUMP_PREFIX_ADDRESS
, 16, 4, req
->iv
,
1326 edesc
->src_nents
> 1 ? 100 : ivsize
, 1);
1327 caam_dump_sg(KERN_DEBUG
, "dst @" __stringify(__LINE__
)": ",
1328 DUMP_PREFIX_ADDRESS
, 16, 4, req
->dst
,
1329 edesc
->dst_nents
> 1 ? 100 : req
->cryptlen
, 1);
1331 skcipher_unmap(ctx
->dev
, edesc
, req
);
1334 * The crypto API expects us to set the IV (req->iv) to the last
1335 * ciphertext block. This is used e.g. by the CTS mode.
1337 scatterwalk_map_and_copy(req
->iv
, req
->dst
, req
->cryptlen
- ivsize
,
1340 qi_cache_free(edesc
);
1341 skcipher_request_complete(req
, ecode
);
1344 static void skcipher_decrypt_done(void *cbk_ctx
, u32 status
)
1346 struct crypto_async_request
*areq
= cbk_ctx
;
1347 struct skcipher_request
*req
= skcipher_request_cast(areq
);
1348 struct caam_request
*req_ctx
= to_caam_req(areq
);
1349 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1350 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
1351 struct skcipher_edesc
*edesc
= req_ctx
->edesc
;
1353 int ivsize
= crypto_skcipher_ivsize(skcipher
);
1355 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
1357 if (unlikely(status
)) {
1358 caam_qi2_strstatus(ctx
->dev
, status
);
1362 print_hex_dump_debug("dstiv @" __stringify(__LINE__
)": ",
1363 DUMP_PREFIX_ADDRESS
, 16, 4, req
->iv
,
1364 edesc
->src_nents
> 1 ? 100 : ivsize
, 1);
1365 caam_dump_sg(KERN_DEBUG
, "dst @" __stringify(__LINE__
)": ",
1366 DUMP_PREFIX_ADDRESS
, 16, 4, req
->dst
,
1367 edesc
->dst_nents
> 1 ? 100 : req
->cryptlen
, 1);
1369 skcipher_unmap(ctx
->dev
, edesc
, req
);
1370 qi_cache_free(edesc
);
1371 skcipher_request_complete(req
, ecode
);
1374 static int skcipher_encrypt(struct skcipher_request
*req
)
1376 struct skcipher_edesc
*edesc
;
1377 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1378 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
1379 struct caam_request
*caam_req
= skcipher_request_ctx(req
);
1382 /* allocate extended descriptor */
1383 edesc
= skcipher_edesc_alloc(req
);
1385 return PTR_ERR(edesc
);
1387 caam_req
->flc
= &ctx
->flc
[ENCRYPT
];
1388 caam_req
->flc_dma
= ctx
->flc_dma
[ENCRYPT
];
1389 caam_req
->cbk
= skcipher_encrypt_done
;
1390 caam_req
->ctx
= &req
->base
;
1391 caam_req
->edesc
= edesc
;
1392 ret
= dpaa2_caam_enqueue(ctx
->dev
, caam_req
);
1393 if (ret
!= -EINPROGRESS
&&
1394 !(ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1395 skcipher_unmap(ctx
->dev
, edesc
, req
);
1396 qi_cache_free(edesc
);
1402 static int skcipher_decrypt(struct skcipher_request
*req
)
1404 struct skcipher_edesc
*edesc
;
1405 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1406 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
1407 struct caam_request
*caam_req
= skcipher_request_ctx(req
);
1408 int ivsize
= crypto_skcipher_ivsize(skcipher
);
1411 /* allocate extended descriptor */
1412 edesc
= skcipher_edesc_alloc(req
);
1414 return PTR_ERR(edesc
);
1417 * The crypto API expects us to set the IV (req->iv) to the last
1420 scatterwalk_map_and_copy(req
->iv
, req
->src
, req
->cryptlen
- ivsize
,
1423 caam_req
->flc
= &ctx
->flc
[DECRYPT
];
1424 caam_req
->flc_dma
= ctx
->flc_dma
[DECRYPT
];
1425 caam_req
->cbk
= skcipher_decrypt_done
;
1426 caam_req
->ctx
= &req
->base
;
1427 caam_req
->edesc
= edesc
;
1428 ret
= dpaa2_caam_enqueue(ctx
->dev
, caam_req
);
1429 if (ret
!= -EINPROGRESS
&&
1430 !(ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1431 skcipher_unmap(ctx
->dev
, edesc
, req
);
1432 qi_cache_free(edesc
);
1438 static int caam_cra_init(struct caam_ctx
*ctx
, struct caam_alg_entry
*caam
,
1441 dma_addr_t dma_addr
;
1444 /* copy descriptor header template value */
1445 ctx
->cdata
.algtype
= OP_TYPE_CLASS1_ALG
| caam
->class1_alg_type
;
1446 ctx
->adata
.algtype
= OP_TYPE_CLASS2_ALG
| caam
->class2_alg_type
;
1448 ctx
->dev
= caam
->dev
;
1449 ctx
->dir
= uses_dkp
? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
;
1451 dma_addr
= dma_map_single_attrs(ctx
->dev
, ctx
->flc
,
1452 offsetof(struct caam_ctx
, flc_dma
),
1453 ctx
->dir
, DMA_ATTR_SKIP_CPU_SYNC
);
1454 if (dma_mapping_error(ctx
->dev
, dma_addr
)) {
1455 dev_err(ctx
->dev
, "unable to map key, shared descriptors\n");
1459 for (i
= 0; i
< NUM_OP
; i
++)
1460 ctx
->flc_dma
[i
] = dma_addr
+ i
* sizeof(ctx
->flc
[i
]);
1461 ctx
->key_dma
= dma_addr
+ NUM_OP
* sizeof(ctx
->flc
[0]);
1466 static int caam_cra_init_skcipher(struct crypto_skcipher
*tfm
)
1468 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
1469 struct caam_skcipher_alg
*caam_alg
=
1470 container_of(alg
, typeof(*caam_alg
), skcipher
);
1472 crypto_skcipher_set_reqsize(tfm
, sizeof(struct caam_request
));
1473 return caam_cra_init(crypto_skcipher_ctx(tfm
), &caam_alg
->caam
, false);
1476 static int caam_cra_init_aead(struct crypto_aead
*tfm
)
1478 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
1479 struct caam_aead_alg
*caam_alg
= container_of(alg
, typeof(*caam_alg
),
1482 crypto_aead_set_reqsize(tfm
, sizeof(struct caam_request
));
1483 return caam_cra_init(crypto_aead_ctx(tfm
), &caam_alg
->caam
,
1484 !caam_alg
->caam
.nodkp
);
1487 static void caam_exit_common(struct caam_ctx
*ctx
)
1489 dma_unmap_single_attrs(ctx
->dev
, ctx
->flc_dma
[0],
1490 offsetof(struct caam_ctx
, flc_dma
), ctx
->dir
,
1491 DMA_ATTR_SKIP_CPU_SYNC
);
1494 static void caam_cra_exit(struct crypto_skcipher
*tfm
)
1496 caam_exit_common(crypto_skcipher_ctx(tfm
));
1499 static void caam_cra_exit_aead(struct crypto_aead
*tfm
)
1501 caam_exit_common(crypto_aead_ctx(tfm
));
1504 static struct caam_skcipher_alg driver_algs
[] = {
1508 .cra_name
= "cbc(aes)",
1509 .cra_driver_name
= "cbc-aes-caam-qi2",
1510 .cra_blocksize
= AES_BLOCK_SIZE
,
1512 .setkey
= skcipher_setkey
,
1513 .encrypt
= skcipher_encrypt
,
1514 .decrypt
= skcipher_decrypt
,
1515 .min_keysize
= AES_MIN_KEY_SIZE
,
1516 .max_keysize
= AES_MAX_KEY_SIZE
,
1517 .ivsize
= AES_BLOCK_SIZE
,
1519 .caam
.class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1524 .cra_name
= "cbc(des3_ede)",
1525 .cra_driver_name
= "cbc-3des-caam-qi2",
1526 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1528 .setkey
= des3_skcipher_setkey
,
1529 .encrypt
= skcipher_encrypt
,
1530 .decrypt
= skcipher_decrypt
,
1531 .min_keysize
= DES3_EDE_KEY_SIZE
,
1532 .max_keysize
= DES3_EDE_KEY_SIZE
,
1533 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1535 .caam
.class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1540 .cra_name
= "cbc(des)",
1541 .cra_driver_name
= "cbc-des-caam-qi2",
1542 .cra_blocksize
= DES_BLOCK_SIZE
,
1544 .setkey
= skcipher_setkey
,
1545 .encrypt
= skcipher_encrypt
,
1546 .decrypt
= skcipher_decrypt
,
1547 .min_keysize
= DES_KEY_SIZE
,
1548 .max_keysize
= DES_KEY_SIZE
,
1549 .ivsize
= DES_BLOCK_SIZE
,
1551 .caam
.class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
1556 .cra_name
= "ctr(aes)",
1557 .cra_driver_name
= "ctr-aes-caam-qi2",
1560 .setkey
= skcipher_setkey
,
1561 .encrypt
= skcipher_encrypt
,
1562 .decrypt
= skcipher_decrypt
,
1563 .min_keysize
= AES_MIN_KEY_SIZE
,
1564 .max_keysize
= AES_MAX_KEY_SIZE
,
1565 .ivsize
= AES_BLOCK_SIZE
,
1566 .chunksize
= AES_BLOCK_SIZE
,
1568 .caam
.class1_alg_type
= OP_ALG_ALGSEL_AES
|
1569 OP_ALG_AAI_CTR_MOD128
,
1574 .cra_name
= "rfc3686(ctr(aes))",
1575 .cra_driver_name
= "rfc3686-ctr-aes-caam-qi2",
1578 .setkey
= skcipher_setkey
,
1579 .encrypt
= skcipher_encrypt
,
1580 .decrypt
= skcipher_decrypt
,
1581 .min_keysize
= AES_MIN_KEY_SIZE
+
1582 CTR_RFC3686_NONCE_SIZE
,
1583 .max_keysize
= AES_MAX_KEY_SIZE
+
1584 CTR_RFC3686_NONCE_SIZE
,
1585 .ivsize
= CTR_RFC3686_IV_SIZE
,
1586 .chunksize
= AES_BLOCK_SIZE
,
1589 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
1590 OP_ALG_AAI_CTR_MOD128
,
1597 .cra_name
= "xts(aes)",
1598 .cra_driver_name
= "xts-aes-caam-qi2",
1599 .cra_blocksize
= AES_BLOCK_SIZE
,
1601 .setkey
= xts_skcipher_setkey
,
1602 .encrypt
= skcipher_encrypt
,
1603 .decrypt
= skcipher_decrypt
,
1604 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1605 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1606 .ivsize
= AES_BLOCK_SIZE
,
1608 .caam
.class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_XTS
,
1613 .cra_name
= "chacha20",
1614 .cra_driver_name
= "chacha20-caam-qi2",
1617 .setkey
= skcipher_setkey
,
1618 .encrypt
= skcipher_encrypt
,
1619 .decrypt
= skcipher_decrypt
,
1620 .min_keysize
= CHACHA_KEY_SIZE
,
1621 .max_keysize
= CHACHA_KEY_SIZE
,
1622 .ivsize
= CHACHA_IV_SIZE
,
1624 .caam
.class1_alg_type
= OP_ALG_ALGSEL_CHACHA20
,
1628 static struct caam_aead_alg driver_aeads
[] = {
1632 .cra_name
= "rfc4106(gcm(aes))",
1633 .cra_driver_name
= "rfc4106-gcm-aes-caam-qi2",
1636 .setkey
= rfc4106_setkey
,
1637 .setauthsize
= rfc4106_setauthsize
,
1638 .encrypt
= ipsec_gcm_encrypt
,
1639 .decrypt
= ipsec_gcm_decrypt
,
1641 .maxauthsize
= AES_BLOCK_SIZE
,
1644 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_GCM
,
1651 .cra_name
= "rfc4543(gcm(aes))",
1652 .cra_driver_name
= "rfc4543-gcm-aes-caam-qi2",
1655 .setkey
= rfc4543_setkey
,
1656 .setauthsize
= rfc4543_setauthsize
,
1657 .encrypt
= ipsec_gcm_encrypt
,
1658 .decrypt
= ipsec_gcm_decrypt
,
1660 .maxauthsize
= AES_BLOCK_SIZE
,
1663 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_GCM
,
1667 /* Galois Counter Mode */
1671 .cra_name
= "gcm(aes)",
1672 .cra_driver_name
= "gcm-aes-caam-qi2",
1675 .setkey
= gcm_setkey
,
1676 .setauthsize
= gcm_setauthsize
,
1677 .encrypt
= aead_encrypt
,
1678 .decrypt
= aead_decrypt
,
1680 .maxauthsize
= AES_BLOCK_SIZE
,
1683 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_GCM
,
1687 /* single-pass ipsec_esp descriptor */
1691 .cra_name
= "authenc(hmac(md5),cbc(aes))",
1692 .cra_driver_name
= "authenc-hmac-md5-"
1694 .cra_blocksize
= AES_BLOCK_SIZE
,
1696 .setkey
= aead_setkey
,
1697 .setauthsize
= aead_setauthsize
,
1698 .encrypt
= aead_encrypt
,
1699 .decrypt
= aead_decrypt
,
1700 .ivsize
= AES_BLOCK_SIZE
,
1701 .maxauthsize
= MD5_DIGEST_SIZE
,
1704 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1705 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
1706 OP_ALG_AAI_HMAC_PRECOMP
,
1712 .cra_name
= "echainiv(authenc(hmac(md5),"
1714 .cra_driver_name
= "echainiv-authenc-hmac-md5-"
1716 .cra_blocksize
= AES_BLOCK_SIZE
,
1718 .setkey
= aead_setkey
,
1719 .setauthsize
= aead_setauthsize
,
1720 .encrypt
= aead_encrypt
,
1721 .decrypt
= aead_decrypt
,
1722 .ivsize
= AES_BLOCK_SIZE
,
1723 .maxauthsize
= MD5_DIGEST_SIZE
,
1726 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1727 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
1728 OP_ALG_AAI_HMAC_PRECOMP
,
1735 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
1736 .cra_driver_name
= "authenc-hmac-sha1-"
1738 .cra_blocksize
= AES_BLOCK_SIZE
,
1740 .setkey
= aead_setkey
,
1741 .setauthsize
= aead_setauthsize
,
1742 .encrypt
= aead_encrypt
,
1743 .decrypt
= aead_decrypt
,
1744 .ivsize
= AES_BLOCK_SIZE
,
1745 .maxauthsize
= SHA1_DIGEST_SIZE
,
1748 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1749 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
1750 OP_ALG_AAI_HMAC_PRECOMP
,
1756 .cra_name
= "echainiv(authenc(hmac(sha1),"
1758 .cra_driver_name
= "echainiv-authenc-"
1759 "hmac-sha1-cbc-aes-caam-qi2",
1760 .cra_blocksize
= AES_BLOCK_SIZE
,
1762 .setkey
= aead_setkey
,
1763 .setauthsize
= aead_setauthsize
,
1764 .encrypt
= aead_encrypt
,
1765 .decrypt
= aead_decrypt
,
1766 .ivsize
= AES_BLOCK_SIZE
,
1767 .maxauthsize
= SHA1_DIGEST_SIZE
,
1770 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1771 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
1772 OP_ALG_AAI_HMAC_PRECOMP
,
1779 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
1780 .cra_driver_name
= "authenc-hmac-sha224-"
1782 .cra_blocksize
= AES_BLOCK_SIZE
,
1784 .setkey
= aead_setkey
,
1785 .setauthsize
= aead_setauthsize
,
1786 .encrypt
= aead_encrypt
,
1787 .decrypt
= aead_decrypt
,
1788 .ivsize
= AES_BLOCK_SIZE
,
1789 .maxauthsize
= SHA224_DIGEST_SIZE
,
1792 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1793 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
1794 OP_ALG_AAI_HMAC_PRECOMP
,
1800 .cra_name
= "echainiv(authenc(hmac(sha224),"
1802 .cra_driver_name
= "echainiv-authenc-"
1803 "hmac-sha224-cbc-aes-caam-qi2",
1804 .cra_blocksize
= AES_BLOCK_SIZE
,
1806 .setkey
= aead_setkey
,
1807 .setauthsize
= aead_setauthsize
,
1808 .encrypt
= aead_encrypt
,
1809 .decrypt
= aead_decrypt
,
1810 .ivsize
= AES_BLOCK_SIZE
,
1811 .maxauthsize
= SHA224_DIGEST_SIZE
,
1814 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1815 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
1816 OP_ALG_AAI_HMAC_PRECOMP
,
1823 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
1824 .cra_driver_name
= "authenc-hmac-sha256-"
1826 .cra_blocksize
= AES_BLOCK_SIZE
,
1828 .setkey
= aead_setkey
,
1829 .setauthsize
= aead_setauthsize
,
1830 .encrypt
= aead_encrypt
,
1831 .decrypt
= aead_decrypt
,
1832 .ivsize
= AES_BLOCK_SIZE
,
1833 .maxauthsize
= SHA256_DIGEST_SIZE
,
1836 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1837 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
1838 OP_ALG_AAI_HMAC_PRECOMP
,
1844 .cra_name
= "echainiv(authenc(hmac(sha256),"
1846 .cra_driver_name
= "echainiv-authenc-"
1847 "hmac-sha256-cbc-aes-"
1849 .cra_blocksize
= AES_BLOCK_SIZE
,
1851 .setkey
= aead_setkey
,
1852 .setauthsize
= aead_setauthsize
,
1853 .encrypt
= aead_encrypt
,
1854 .decrypt
= aead_decrypt
,
1855 .ivsize
= AES_BLOCK_SIZE
,
1856 .maxauthsize
= SHA256_DIGEST_SIZE
,
1859 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1860 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
1861 OP_ALG_AAI_HMAC_PRECOMP
,
1868 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
1869 .cra_driver_name
= "authenc-hmac-sha384-"
1871 .cra_blocksize
= AES_BLOCK_SIZE
,
1873 .setkey
= aead_setkey
,
1874 .setauthsize
= aead_setauthsize
,
1875 .encrypt
= aead_encrypt
,
1876 .decrypt
= aead_decrypt
,
1877 .ivsize
= AES_BLOCK_SIZE
,
1878 .maxauthsize
= SHA384_DIGEST_SIZE
,
1881 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1882 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
1883 OP_ALG_AAI_HMAC_PRECOMP
,
1889 .cra_name
= "echainiv(authenc(hmac(sha384),"
1891 .cra_driver_name
= "echainiv-authenc-"
1892 "hmac-sha384-cbc-aes-"
1894 .cra_blocksize
= AES_BLOCK_SIZE
,
1896 .setkey
= aead_setkey
,
1897 .setauthsize
= aead_setauthsize
,
1898 .encrypt
= aead_encrypt
,
1899 .decrypt
= aead_decrypt
,
1900 .ivsize
= AES_BLOCK_SIZE
,
1901 .maxauthsize
= SHA384_DIGEST_SIZE
,
1904 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1905 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
1906 OP_ALG_AAI_HMAC_PRECOMP
,
1913 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
1914 .cra_driver_name
= "authenc-hmac-sha512-"
1916 .cra_blocksize
= AES_BLOCK_SIZE
,
1918 .setkey
= aead_setkey
,
1919 .setauthsize
= aead_setauthsize
,
1920 .encrypt
= aead_encrypt
,
1921 .decrypt
= aead_decrypt
,
1922 .ivsize
= AES_BLOCK_SIZE
,
1923 .maxauthsize
= SHA512_DIGEST_SIZE
,
1926 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1927 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
1928 OP_ALG_AAI_HMAC_PRECOMP
,
1934 .cra_name
= "echainiv(authenc(hmac(sha512),"
1936 .cra_driver_name
= "echainiv-authenc-"
1937 "hmac-sha512-cbc-aes-"
1939 .cra_blocksize
= AES_BLOCK_SIZE
,
1941 .setkey
= aead_setkey
,
1942 .setauthsize
= aead_setauthsize
,
1943 .encrypt
= aead_encrypt
,
1944 .decrypt
= aead_decrypt
,
1945 .ivsize
= AES_BLOCK_SIZE
,
1946 .maxauthsize
= SHA512_DIGEST_SIZE
,
1949 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1950 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
1951 OP_ALG_AAI_HMAC_PRECOMP
,
1958 .cra_name
= "authenc(hmac(md5),cbc(des3_ede))",
1959 .cra_driver_name
= "authenc-hmac-md5-"
1960 "cbc-des3_ede-caam-qi2",
1961 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1963 .setkey
= des3_aead_setkey
,
1964 .setauthsize
= aead_setauthsize
,
1965 .encrypt
= aead_encrypt
,
1966 .decrypt
= aead_decrypt
,
1967 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1968 .maxauthsize
= MD5_DIGEST_SIZE
,
1971 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1972 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
1973 OP_ALG_AAI_HMAC_PRECOMP
,
1979 .cra_name
= "echainiv(authenc(hmac(md5),"
1981 .cra_driver_name
= "echainiv-authenc-hmac-md5-"
1982 "cbc-des3_ede-caam-qi2",
1983 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1985 .setkey
= des3_aead_setkey
,
1986 .setauthsize
= aead_setauthsize
,
1987 .encrypt
= aead_encrypt
,
1988 .decrypt
= aead_decrypt
,
1989 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1990 .maxauthsize
= MD5_DIGEST_SIZE
,
1993 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1994 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
1995 OP_ALG_AAI_HMAC_PRECOMP
,
2002 .cra_name
= "authenc(hmac(sha1),"
2004 .cra_driver_name
= "authenc-hmac-sha1-"
2005 "cbc-des3_ede-caam-qi2",
2006 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2008 .setkey
= des3_aead_setkey
,
2009 .setauthsize
= aead_setauthsize
,
2010 .encrypt
= aead_encrypt
,
2011 .decrypt
= aead_decrypt
,
2012 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2013 .maxauthsize
= SHA1_DIGEST_SIZE
,
2016 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2017 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
2018 OP_ALG_AAI_HMAC_PRECOMP
,
2024 .cra_name
= "echainiv(authenc(hmac(sha1),"
2026 .cra_driver_name
= "echainiv-authenc-"
2028 "cbc-des3_ede-caam-qi2",
2029 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2031 .setkey
= des3_aead_setkey
,
2032 .setauthsize
= aead_setauthsize
,
2033 .encrypt
= aead_encrypt
,
2034 .decrypt
= aead_decrypt
,
2035 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2036 .maxauthsize
= SHA1_DIGEST_SIZE
,
2039 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2040 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
2041 OP_ALG_AAI_HMAC_PRECOMP
,
2048 .cra_name
= "authenc(hmac(sha224),"
2050 .cra_driver_name
= "authenc-hmac-sha224-"
2051 "cbc-des3_ede-caam-qi2",
2052 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2054 .setkey
= des3_aead_setkey
,
2055 .setauthsize
= aead_setauthsize
,
2056 .encrypt
= aead_encrypt
,
2057 .decrypt
= aead_decrypt
,
2058 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2059 .maxauthsize
= SHA224_DIGEST_SIZE
,
2062 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2063 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2064 OP_ALG_AAI_HMAC_PRECOMP
,
2070 .cra_name
= "echainiv(authenc(hmac(sha224),"
2072 .cra_driver_name
= "echainiv-authenc-"
2074 "cbc-des3_ede-caam-qi2",
2075 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2077 .setkey
= des3_aead_setkey
,
2078 .setauthsize
= aead_setauthsize
,
2079 .encrypt
= aead_encrypt
,
2080 .decrypt
= aead_decrypt
,
2081 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2082 .maxauthsize
= SHA224_DIGEST_SIZE
,
2085 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2086 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2087 OP_ALG_AAI_HMAC_PRECOMP
,
2094 .cra_name
= "authenc(hmac(sha256),"
2096 .cra_driver_name
= "authenc-hmac-sha256-"
2097 "cbc-des3_ede-caam-qi2",
2098 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2100 .setkey
= des3_aead_setkey
,
2101 .setauthsize
= aead_setauthsize
,
2102 .encrypt
= aead_encrypt
,
2103 .decrypt
= aead_decrypt
,
2104 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2105 .maxauthsize
= SHA256_DIGEST_SIZE
,
2108 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2109 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2110 OP_ALG_AAI_HMAC_PRECOMP
,
2116 .cra_name
= "echainiv(authenc(hmac(sha256),"
2118 .cra_driver_name
= "echainiv-authenc-"
2120 "cbc-des3_ede-caam-qi2",
2121 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2123 .setkey
= des3_aead_setkey
,
2124 .setauthsize
= aead_setauthsize
,
2125 .encrypt
= aead_encrypt
,
2126 .decrypt
= aead_decrypt
,
2127 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2128 .maxauthsize
= SHA256_DIGEST_SIZE
,
2131 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2132 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2133 OP_ALG_AAI_HMAC_PRECOMP
,
2140 .cra_name
= "authenc(hmac(sha384),"
2142 .cra_driver_name
= "authenc-hmac-sha384-"
2143 "cbc-des3_ede-caam-qi2",
2144 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2146 .setkey
= des3_aead_setkey
,
2147 .setauthsize
= aead_setauthsize
,
2148 .encrypt
= aead_encrypt
,
2149 .decrypt
= aead_decrypt
,
2150 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2151 .maxauthsize
= SHA384_DIGEST_SIZE
,
2154 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2155 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2156 OP_ALG_AAI_HMAC_PRECOMP
,
2162 .cra_name
= "echainiv(authenc(hmac(sha384),"
2164 .cra_driver_name
= "echainiv-authenc-"
2166 "cbc-des3_ede-caam-qi2",
2167 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2169 .setkey
= des3_aead_setkey
,
2170 .setauthsize
= aead_setauthsize
,
2171 .encrypt
= aead_encrypt
,
2172 .decrypt
= aead_decrypt
,
2173 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2174 .maxauthsize
= SHA384_DIGEST_SIZE
,
2177 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2178 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2179 OP_ALG_AAI_HMAC_PRECOMP
,
2186 .cra_name
= "authenc(hmac(sha512),"
2188 .cra_driver_name
= "authenc-hmac-sha512-"
2189 "cbc-des3_ede-caam-qi2",
2190 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2192 .setkey
= des3_aead_setkey
,
2193 .setauthsize
= aead_setauthsize
,
2194 .encrypt
= aead_encrypt
,
2195 .decrypt
= aead_decrypt
,
2196 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2197 .maxauthsize
= SHA512_DIGEST_SIZE
,
2200 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2201 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2202 OP_ALG_AAI_HMAC_PRECOMP
,
2208 .cra_name
= "echainiv(authenc(hmac(sha512),"
2210 .cra_driver_name
= "echainiv-authenc-"
2212 "cbc-des3_ede-caam-qi2",
2213 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2215 .setkey
= des3_aead_setkey
,
2216 .setauthsize
= aead_setauthsize
,
2217 .encrypt
= aead_encrypt
,
2218 .decrypt
= aead_decrypt
,
2219 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2220 .maxauthsize
= SHA512_DIGEST_SIZE
,
2223 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2224 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2225 OP_ALG_AAI_HMAC_PRECOMP
,
2232 .cra_name
= "authenc(hmac(md5),cbc(des))",
2233 .cra_driver_name
= "authenc-hmac-md5-"
2235 .cra_blocksize
= DES_BLOCK_SIZE
,
2237 .setkey
= aead_setkey
,
2238 .setauthsize
= aead_setauthsize
,
2239 .encrypt
= aead_encrypt
,
2240 .decrypt
= aead_decrypt
,
2241 .ivsize
= DES_BLOCK_SIZE
,
2242 .maxauthsize
= MD5_DIGEST_SIZE
,
2245 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2246 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
2247 OP_ALG_AAI_HMAC_PRECOMP
,
2253 .cra_name
= "echainiv(authenc(hmac(md5),"
2255 .cra_driver_name
= "echainiv-authenc-hmac-md5-"
2257 .cra_blocksize
= DES_BLOCK_SIZE
,
2259 .setkey
= aead_setkey
,
2260 .setauthsize
= aead_setauthsize
,
2261 .encrypt
= aead_encrypt
,
2262 .decrypt
= aead_decrypt
,
2263 .ivsize
= DES_BLOCK_SIZE
,
2264 .maxauthsize
= MD5_DIGEST_SIZE
,
2267 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2268 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
2269 OP_ALG_AAI_HMAC_PRECOMP
,
2276 .cra_name
= "authenc(hmac(sha1),cbc(des))",
2277 .cra_driver_name
= "authenc-hmac-sha1-"
2279 .cra_blocksize
= DES_BLOCK_SIZE
,
2281 .setkey
= aead_setkey
,
2282 .setauthsize
= aead_setauthsize
,
2283 .encrypt
= aead_encrypt
,
2284 .decrypt
= aead_decrypt
,
2285 .ivsize
= DES_BLOCK_SIZE
,
2286 .maxauthsize
= SHA1_DIGEST_SIZE
,
2289 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2290 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
2291 OP_ALG_AAI_HMAC_PRECOMP
,
2297 .cra_name
= "echainiv(authenc(hmac(sha1),"
2299 .cra_driver_name
= "echainiv-authenc-"
2300 "hmac-sha1-cbc-des-caam-qi2",
2301 .cra_blocksize
= DES_BLOCK_SIZE
,
2303 .setkey
= aead_setkey
,
2304 .setauthsize
= aead_setauthsize
,
2305 .encrypt
= aead_encrypt
,
2306 .decrypt
= aead_decrypt
,
2307 .ivsize
= DES_BLOCK_SIZE
,
2308 .maxauthsize
= SHA1_DIGEST_SIZE
,
2311 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2312 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
2313 OP_ALG_AAI_HMAC_PRECOMP
,
2320 .cra_name
= "authenc(hmac(sha224),cbc(des))",
2321 .cra_driver_name
= "authenc-hmac-sha224-"
2323 .cra_blocksize
= DES_BLOCK_SIZE
,
2325 .setkey
= aead_setkey
,
2326 .setauthsize
= aead_setauthsize
,
2327 .encrypt
= aead_encrypt
,
2328 .decrypt
= aead_decrypt
,
2329 .ivsize
= DES_BLOCK_SIZE
,
2330 .maxauthsize
= SHA224_DIGEST_SIZE
,
2333 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2334 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2335 OP_ALG_AAI_HMAC_PRECOMP
,
2341 .cra_name
= "echainiv(authenc(hmac(sha224),"
2343 .cra_driver_name
= "echainiv-authenc-"
2344 "hmac-sha224-cbc-des-"
2346 .cra_blocksize
= DES_BLOCK_SIZE
,
2348 .setkey
= aead_setkey
,
2349 .setauthsize
= aead_setauthsize
,
2350 .encrypt
= aead_encrypt
,
2351 .decrypt
= aead_decrypt
,
2352 .ivsize
= DES_BLOCK_SIZE
,
2353 .maxauthsize
= SHA224_DIGEST_SIZE
,
2356 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2357 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2358 OP_ALG_AAI_HMAC_PRECOMP
,
2365 .cra_name
= "authenc(hmac(sha256),cbc(des))",
2366 .cra_driver_name
= "authenc-hmac-sha256-"
2368 .cra_blocksize
= DES_BLOCK_SIZE
,
2370 .setkey
= aead_setkey
,
2371 .setauthsize
= aead_setauthsize
,
2372 .encrypt
= aead_encrypt
,
2373 .decrypt
= aead_decrypt
,
2374 .ivsize
= DES_BLOCK_SIZE
,
2375 .maxauthsize
= SHA256_DIGEST_SIZE
,
2378 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2379 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2380 OP_ALG_AAI_HMAC_PRECOMP
,
2386 .cra_name
= "echainiv(authenc(hmac(sha256),"
2388 .cra_driver_name
= "echainiv-authenc-"
2389 "hmac-sha256-cbc-desi-"
2391 .cra_blocksize
= DES_BLOCK_SIZE
,
2393 .setkey
= aead_setkey
,
2394 .setauthsize
= aead_setauthsize
,
2395 .encrypt
= aead_encrypt
,
2396 .decrypt
= aead_decrypt
,
2397 .ivsize
= DES_BLOCK_SIZE
,
2398 .maxauthsize
= SHA256_DIGEST_SIZE
,
2401 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2402 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2403 OP_ALG_AAI_HMAC_PRECOMP
,
2410 .cra_name
= "authenc(hmac(sha384),cbc(des))",
2411 .cra_driver_name
= "authenc-hmac-sha384-"
2413 .cra_blocksize
= DES_BLOCK_SIZE
,
2415 .setkey
= aead_setkey
,
2416 .setauthsize
= aead_setauthsize
,
2417 .encrypt
= aead_encrypt
,
2418 .decrypt
= aead_decrypt
,
2419 .ivsize
= DES_BLOCK_SIZE
,
2420 .maxauthsize
= SHA384_DIGEST_SIZE
,
2423 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2424 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2425 OP_ALG_AAI_HMAC_PRECOMP
,
2431 .cra_name
= "echainiv(authenc(hmac(sha384),"
2433 .cra_driver_name
= "echainiv-authenc-"
2434 "hmac-sha384-cbc-des-"
2436 .cra_blocksize
= DES_BLOCK_SIZE
,
2438 .setkey
= aead_setkey
,
2439 .setauthsize
= aead_setauthsize
,
2440 .encrypt
= aead_encrypt
,
2441 .decrypt
= aead_decrypt
,
2442 .ivsize
= DES_BLOCK_SIZE
,
2443 .maxauthsize
= SHA384_DIGEST_SIZE
,
2446 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2447 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2448 OP_ALG_AAI_HMAC_PRECOMP
,
2455 .cra_name
= "authenc(hmac(sha512),cbc(des))",
2456 .cra_driver_name
= "authenc-hmac-sha512-"
2458 .cra_blocksize
= DES_BLOCK_SIZE
,
2460 .setkey
= aead_setkey
,
2461 .setauthsize
= aead_setauthsize
,
2462 .encrypt
= aead_encrypt
,
2463 .decrypt
= aead_decrypt
,
2464 .ivsize
= DES_BLOCK_SIZE
,
2465 .maxauthsize
= SHA512_DIGEST_SIZE
,
2468 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2469 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2470 OP_ALG_AAI_HMAC_PRECOMP
,
2476 .cra_name
= "echainiv(authenc(hmac(sha512),"
2478 .cra_driver_name
= "echainiv-authenc-"
2479 "hmac-sha512-cbc-des-"
2481 .cra_blocksize
= DES_BLOCK_SIZE
,
2483 .setkey
= aead_setkey
,
2484 .setauthsize
= aead_setauthsize
,
2485 .encrypt
= aead_encrypt
,
2486 .decrypt
= aead_decrypt
,
2487 .ivsize
= DES_BLOCK_SIZE
,
2488 .maxauthsize
= SHA512_DIGEST_SIZE
,
2491 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2492 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2493 OP_ALG_AAI_HMAC_PRECOMP
,
2500 .cra_name
= "authenc(hmac(md5),"
2501 "rfc3686(ctr(aes)))",
2502 .cra_driver_name
= "authenc-hmac-md5-"
2503 "rfc3686-ctr-aes-caam-qi2",
2506 .setkey
= aead_setkey
,
2507 .setauthsize
= aead_setauthsize
,
2508 .encrypt
= aead_encrypt
,
2509 .decrypt
= aead_decrypt
,
2510 .ivsize
= CTR_RFC3686_IV_SIZE
,
2511 .maxauthsize
= MD5_DIGEST_SIZE
,
2514 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2515 OP_ALG_AAI_CTR_MOD128
,
2516 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
2517 OP_ALG_AAI_HMAC_PRECOMP
,
2524 .cra_name
= "seqiv(authenc("
2525 "hmac(md5),rfc3686(ctr(aes))))",
2526 .cra_driver_name
= "seqiv-authenc-hmac-md5-"
2527 "rfc3686-ctr-aes-caam-qi2",
2530 .setkey
= aead_setkey
,
2531 .setauthsize
= aead_setauthsize
,
2532 .encrypt
= aead_encrypt
,
2533 .decrypt
= aead_decrypt
,
2534 .ivsize
= CTR_RFC3686_IV_SIZE
,
2535 .maxauthsize
= MD5_DIGEST_SIZE
,
2538 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2539 OP_ALG_AAI_CTR_MOD128
,
2540 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
2541 OP_ALG_AAI_HMAC_PRECOMP
,
2549 .cra_name
= "authenc(hmac(sha1),"
2550 "rfc3686(ctr(aes)))",
2551 .cra_driver_name
= "authenc-hmac-sha1-"
2552 "rfc3686-ctr-aes-caam-qi2",
2555 .setkey
= aead_setkey
,
2556 .setauthsize
= aead_setauthsize
,
2557 .encrypt
= aead_encrypt
,
2558 .decrypt
= aead_decrypt
,
2559 .ivsize
= CTR_RFC3686_IV_SIZE
,
2560 .maxauthsize
= SHA1_DIGEST_SIZE
,
2563 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2564 OP_ALG_AAI_CTR_MOD128
,
2565 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
2566 OP_ALG_AAI_HMAC_PRECOMP
,
2573 .cra_name
= "seqiv(authenc("
2574 "hmac(sha1),rfc3686(ctr(aes))))",
2575 .cra_driver_name
= "seqiv-authenc-hmac-sha1-"
2576 "rfc3686-ctr-aes-caam-qi2",
2579 .setkey
= aead_setkey
,
2580 .setauthsize
= aead_setauthsize
,
2581 .encrypt
= aead_encrypt
,
2582 .decrypt
= aead_decrypt
,
2583 .ivsize
= CTR_RFC3686_IV_SIZE
,
2584 .maxauthsize
= SHA1_DIGEST_SIZE
,
2587 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2588 OP_ALG_AAI_CTR_MOD128
,
2589 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
2590 OP_ALG_AAI_HMAC_PRECOMP
,
2598 .cra_name
= "authenc(hmac(sha224),"
2599 "rfc3686(ctr(aes)))",
2600 .cra_driver_name
= "authenc-hmac-sha224-"
2601 "rfc3686-ctr-aes-caam-qi2",
2604 .setkey
= aead_setkey
,
2605 .setauthsize
= aead_setauthsize
,
2606 .encrypt
= aead_encrypt
,
2607 .decrypt
= aead_decrypt
,
2608 .ivsize
= CTR_RFC3686_IV_SIZE
,
2609 .maxauthsize
= SHA224_DIGEST_SIZE
,
2612 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2613 OP_ALG_AAI_CTR_MOD128
,
2614 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2615 OP_ALG_AAI_HMAC_PRECOMP
,
2622 .cra_name
= "seqiv(authenc("
2623 "hmac(sha224),rfc3686(ctr(aes))))",
2624 .cra_driver_name
= "seqiv-authenc-hmac-sha224-"
2625 "rfc3686-ctr-aes-caam-qi2",
2628 .setkey
= aead_setkey
,
2629 .setauthsize
= aead_setauthsize
,
2630 .encrypt
= aead_encrypt
,
2631 .decrypt
= aead_decrypt
,
2632 .ivsize
= CTR_RFC3686_IV_SIZE
,
2633 .maxauthsize
= SHA224_DIGEST_SIZE
,
2636 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2637 OP_ALG_AAI_CTR_MOD128
,
2638 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2639 OP_ALG_AAI_HMAC_PRECOMP
,
2647 .cra_name
= "authenc(hmac(sha256),"
2648 "rfc3686(ctr(aes)))",
2649 .cra_driver_name
= "authenc-hmac-sha256-"
2650 "rfc3686-ctr-aes-caam-qi2",
2653 .setkey
= aead_setkey
,
2654 .setauthsize
= aead_setauthsize
,
2655 .encrypt
= aead_encrypt
,
2656 .decrypt
= aead_decrypt
,
2657 .ivsize
= CTR_RFC3686_IV_SIZE
,
2658 .maxauthsize
= SHA256_DIGEST_SIZE
,
2661 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2662 OP_ALG_AAI_CTR_MOD128
,
2663 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2664 OP_ALG_AAI_HMAC_PRECOMP
,
2671 .cra_name
= "seqiv(authenc(hmac(sha256),"
2672 "rfc3686(ctr(aes))))",
2673 .cra_driver_name
= "seqiv-authenc-hmac-sha256-"
2674 "rfc3686-ctr-aes-caam-qi2",
2677 .setkey
= aead_setkey
,
2678 .setauthsize
= aead_setauthsize
,
2679 .encrypt
= aead_encrypt
,
2680 .decrypt
= aead_decrypt
,
2681 .ivsize
= CTR_RFC3686_IV_SIZE
,
2682 .maxauthsize
= SHA256_DIGEST_SIZE
,
2685 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2686 OP_ALG_AAI_CTR_MOD128
,
2687 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2688 OP_ALG_AAI_HMAC_PRECOMP
,
2696 .cra_name
= "authenc(hmac(sha384),"
2697 "rfc3686(ctr(aes)))",
2698 .cra_driver_name
= "authenc-hmac-sha384-"
2699 "rfc3686-ctr-aes-caam-qi2",
2702 .setkey
= aead_setkey
,
2703 .setauthsize
= aead_setauthsize
,
2704 .encrypt
= aead_encrypt
,
2705 .decrypt
= aead_decrypt
,
2706 .ivsize
= CTR_RFC3686_IV_SIZE
,
2707 .maxauthsize
= SHA384_DIGEST_SIZE
,
2710 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2711 OP_ALG_AAI_CTR_MOD128
,
2712 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2713 OP_ALG_AAI_HMAC_PRECOMP
,
2720 .cra_name
= "seqiv(authenc(hmac(sha384),"
2721 "rfc3686(ctr(aes))))",
2722 .cra_driver_name
= "seqiv-authenc-hmac-sha384-"
2723 "rfc3686-ctr-aes-caam-qi2",
2726 .setkey
= aead_setkey
,
2727 .setauthsize
= aead_setauthsize
,
2728 .encrypt
= aead_encrypt
,
2729 .decrypt
= aead_decrypt
,
2730 .ivsize
= CTR_RFC3686_IV_SIZE
,
2731 .maxauthsize
= SHA384_DIGEST_SIZE
,
2734 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2735 OP_ALG_AAI_CTR_MOD128
,
2736 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2737 OP_ALG_AAI_HMAC_PRECOMP
,
2745 .cra_name
= "rfc7539(chacha20,poly1305)",
2746 .cra_driver_name
= "rfc7539-chacha20-poly1305-"
2750 .setkey
= chachapoly_setkey
,
2751 .setauthsize
= chachapoly_setauthsize
,
2752 .encrypt
= aead_encrypt
,
2753 .decrypt
= aead_decrypt
,
2754 .ivsize
= CHACHAPOLY_IV_SIZE
,
2755 .maxauthsize
= POLY1305_DIGEST_SIZE
,
2758 .class1_alg_type
= OP_ALG_ALGSEL_CHACHA20
|
2760 .class2_alg_type
= OP_ALG_ALGSEL_POLY1305
|
2768 .cra_name
= "rfc7539esp(chacha20,poly1305)",
2769 .cra_driver_name
= "rfc7539esp-chacha20-"
2770 "poly1305-caam-qi2",
2773 .setkey
= chachapoly_setkey
,
2774 .setauthsize
= chachapoly_setauthsize
,
2775 .encrypt
= aead_encrypt
,
2776 .decrypt
= aead_decrypt
,
2778 .maxauthsize
= POLY1305_DIGEST_SIZE
,
2781 .class1_alg_type
= OP_ALG_ALGSEL_CHACHA20
|
2783 .class2_alg_type
= OP_ALG_ALGSEL_POLY1305
|
2791 .cra_name
= "authenc(hmac(sha512),"
2792 "rfc3686(ctr(aes)))",
2793 .cra_driver_name
= "authenc-hmac-sha512-"
2794 "rfc3686-ctr-aes-caam-qi2",
2797 .setkey
= aead_setkey
,
2798 .setauthsize
= aead_setauthsize
,
2799 .encrypt
= aead_encrypt
,
2800 .decrypt
= aead_decrypt
,
2801 .ivsize
= CTR_RFC3686_IV_SIZE
,
2802 .maxauthsize
= SHA512_DIGEST_SIZE
,
2805 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2806 OP_ALG_AAI_CTR_MOD128
,
2807 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2808 OP_ALG_AAI_HMAC_PRECOMP
,
2815 .cra_name
= "seqiv(authenc(hmac(sha512),"
2816 "rfc3686(ctr(aes))))",
2817 .cra_driver_name
= "seqiv-authenc-hmac-sha512-"
2818 "rfc3686-ctr-aes-caam-qi2",
2821 .setkey
= aead_setkey
,
2822 .setauthsize
= aead_setauthsize
,
2823 .encrypt
= aead_encrypt
,
2824 .decrypt
= aead_decrypt
,
2825 .ivsize
= CTR_RFC3686_IV_SIZE
,
2826 .maxauthsize
= SHA512_DIGEST_SIZE
,
2829 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2830 OP_ALG_AAI_CTR_MOD128
,
2831 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2832 OP_ALG_AAI_HMAC_PRECOMP
,
2839 static void caam_skcipher_alg_init(struct caam_skcipher_alg
*t_alg
)
2841 struct skcipher_alg
*alg
= &t_alg
->skcipher
;
2843 alg
->base
.cra_module
= THIS_MODULE
;
2844 alg
->base
.cra_priority
= CAAM_CRA_PRIORITY
;
2845 alg
->base
.cra_ctxsize
= sizeof(struct caam_ctx
);
2846 alg
->base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_KERN_DRIVER_ONLY
;
2848 alg
->init
= caam_cra_init_skcipher
;
2849 alg
->exit
= caam_cra_exit
;
2852 static void caam_aead_alg_init(struct caam_aead_alg
*t_alg
)
2854 struct aead_alg
*alg
= &t_alg
->aead
;
2856 alg
->base
.cra_module
= THIS_MODULE
;
2857 alg
->base
.cra_priority
= CAAM_CRA_PRIORITY
;
2858 alg
->base
.cra_ctxsize
= sizeof(struct caam_ctx
);
2859 alg
->base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_KERN_DRIVER_ONLY
;
2861 alg
->init
= caam_cra_init_aead
;
2862 alg
->exit
= caam_cra_exit_aead
;
2865 /* max hash key is max split key size */
2866 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
2868 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
2870 /* caam context sizes for hashes: running digest + 8 */
2871 #define HASH_MSG_LEN 8
2872 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
2883 * caam_hash_ctx - ahash per-session context
2884 * @flc: Flow Contexts array
2885 * @flc_dma: I/O virtual addresses of the Flow Contexts
2886 * @dev: dpseci device
2887 * @ctx_len: size of Context Register
2888 * @adata: hashing algorithm details
2890 struct caam_hash_ctx
{
2891 struct caam_flc flc
[HASH_NUM_OP
];
2892 dma_addr_t flc_dma
[HASH_NUM_OP
];
2895 struct alginfo adata
;
2899 struct caam_hash_state
{
2900 struct caam_request caam_req
;
2904 u8 buf_0
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
2906 u8 buf_1
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
2908 u8 caam_ctx
[MAX_CTX_LEN
] ____cacheline_aligned
;
2909 int (*update
)(struct ahash_request
*req
);
2910 int (*final
)(struct ahash_request
*req
);
2911 int (*finup
)(struct ahash_request
*req
);
2915 struct caam_export_state
{
2916 u8 buf
[CAAM_MAX_HASH_BLOCK_SIZE
];
2917 u8 caam_ctx
[MAX_CTX_LEN
];
2919 int (*update
)(struct ahash_request
*req
);
2920 int (*final
)(struct ahash_request
*req
);
2921 int (*finup
)(struct ahash_request
*req
);
2924 static inline void switch_buf(struct caam_hash_state
*state
)
2926 state
->current_buf
^= 1;
2929 static inline u8
*current_buf(struct caam_hash_state
*state
)
2931 return state
->current_buf
? state
->buf_1
: state
->buf_0
;
2934 static inline u8
*alt_buf(struct caam_hash_state
*state
)
2936 return state
->current_buf
? state
->buf_0
: state
->buf_1
;
2939 static inline int *current_buflen(struct caam_hash_state
*state
)
2941 return state
->current_buf
? &state
->buflen_1
: &state
->buflen_0
;
2944 static inline int *alt_buflen(struct caam_hash_state
*state
)
2946 return state
->current_buf
? &state
->buflen_0
: &state
->buflen_1
;
2949 /* Map current buffer in state (if length > 0) and put it in link table */
2950 static inline int buf_map_to_qm_sg(struct device
*dev
,
2951 struct dpaa2_sg_entry
*qm_sg
,
2952 struct caam_hash_state
*state
)
2954 int buflen
= *current_buflen(state
);
2959 state
->buf_dma
= dma_map_single(dev
, current_buf(state
), buflen
,
2961 if (dma_mapping_error(dev
, state
->buf_dma
)) {
2962 dev_err(dev
, "unable to map buf\n");
2967 dma_to_qm_sg_one(qm_sg
, state
->buf_dma
, buflen
, 0);
2972 /* Map state->caam_ctx, and add it to link table */
2973 static inline int ctx_map_to_qm_sg(struct device
*dev
,
2974 struct caam_hash_state
*state
, int ctx_len
,
2975 struct dpaa2_sg_entry
*qm_sg
, u32 flag
)
2977 state
->ctx_dma_len
= ctx_len
;
2978 state
->ctx_dma
= dma_map_single(dev
, state
->caam_ctx
, ctx_len
, flag
);
2979 if (dma_mapping_error(dev
, state
->ctx_dma
)) {
2980 dev_err(dev
, "unable to map ctx\n");
2985 dma_to_qm_sg_one(qm_sg
, state
->ctx_dma
, ctx_len
, 0);
2990 static int ahash_set_sh_desc(struct crypto_ahash
*ahash
)
2992 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
2993 int digestsize
= crypto_ahash_digestsize(ahash
);
2994 struct dpaa2_caam_priv
*priv
= dev_get_drvdata(ctx
->dev
);
2995 struct caam_flc
*flc
;
2998 /* ahash_update shared descriptor */
2999 flc
= &ctx
->flc
[UPDATE
];
3000 desc
= flc
->sh_desc
;
3001 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_UPDATE
, ctx
->ctx_len
,
3002 ctx
->ctx_len
, true, priv
->sec_attr
.era
);
3003 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
3004 dma_sync_single_for_device(ctx
->dev
, ctx
->flc_dma
[UPDATE
],
3005 desc_bytes(desc
), DMA_BIDIRECTIONAL
);
3006 print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__
)": ",
3007 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
3010 /* ahash_update_first shared descriptor */
3011 flc
= &ctx
->flc
[UPDATE_FIRST
];
3012 desc
= flc
->sh_desc
;
3013 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_INIT
, ctx
->ctx_len
,
3014 ctx
->ctx_len
, false, priv
->sec_attr
.era
);
3015 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
3016 dma_sync_single_for_device(ctx
->dev
, ctx
->flc_dma
[UPDATE_FIRST
],
3017 desc_bytes(desc
), DMA_BIDIRECTIONAL
);
3018 print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__
)": ",
3019 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
3022 /* ahash_final shared descriptor */
3023 flc
= &ctx
->flc
[FINALIZE
];
3024 desc
= flc
->sh_desc
;
3025 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_FINALIZE
, digestsize
,
3026 ctx
->ctx_len
, true, priv
->sec_attr
.era
);
3027 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
3028 dma_sync_single_for_device(ctx
->dev
, ctx
->flc_dma
[FINALIZE
],
3029 desc_bytes(desc
), DMA_BIDIRECTIONAL
);
3030 print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__
)": ",
3031 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
3034 /* ahash_digest shared descriptor */
3035 flc
= &ctx
->flc
[DIGEST
];
3036 desc
= flc
->sh_desc
;
3037 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_INITFINAL
, digestsize
,
3038 ctx
->ctx_len
, false, priv
->sec_attr
.era
);
3039 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
3040 dma_sync_single_for_device(ctx
->dev
, ctx
->flc_dma
[DIGEST
],
3041 desc_bytes(desc
), DMA_BIDIRECTIONAL
);
3042 print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__
)": ",
3043 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
3049 struct split_key_sh_result
{
3050 struct completion completion
;
3055 static void split_key_sh_done(void *cbk_ctx
, u32 err
)
3057 struct split_key_sh_result
*res
= cbk_ctx
;
3059 dev_dbg(res
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
3062 caam_qi2_strstatus(res
->dev
, err
);
3065 complete(&res
->completion
);
3068 /* Digest hash size if it is too large */
3069 static int hash_digest_key(struct caam_hash_ctx
*ctx
, u32
*keylen
, u8
*key
,
3072 struct caam_request
*req_ctx
;
3074 struct split_key_sh_result result
;
3076 struct caam_flc
*flc
;
3079 struct dpaa2_fl_entry
*in_fle
, *out_fle
;
3081 req_ctx
= kzalloc(sizeof(*req_ctx
), GFP_KERNEL
| GFP_DMA
);
3085 in_fle
= &req_ctx
->fd_flt
[1];
3086 out_fle
= &req_ctx
->fd_flt
[0];
3088 flc
= kzalloc(sizeof(*flc
), GFP_KERNEL
| GFP_DMA
);
3092 key_dma
= dma_map_single(ctx
->dev
, key
, *keylen
, DMA_BIDIRECTIONAL
);
3093 if (dma_mapping_error(ctx
->dev
, key_dma
)) {
3094 dev_err(ctx
->dev
, "unable to map key memory\n");
3098 desc
= flc
->sh_desc
;
3100 init_sh_desc(desc
, 0);
3102 /* descriptor to perform unkeyed hash on key_in */
3103 append_operation(desc
, ctx
->adata
.algtype
| OP_ALG_ENCRYPT
|
3104 OP_ALG_AS_INITFINAL
);
3105 append_seq_fifo_load(desc
, *keylen
, FIFOLD_CLASS_CLASS2
|
3106 FIFOLD_TYPE_LAST2
| FIFOLD_TYPE_MSG
);
3107 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
3108 LDST_SRCDST_BYTE_CONTEXT
);
3110 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
3111 flc_dma
= dma_map_single(ctx
->dev
, flc
, sizeof(flc
->flc
) +
3112 desc_bytes(desc
), DMA_TO_DEVICE
);
3113 if (dma_mapping_error(ctx
->dev
, flc_dma
)) {
3114 dev_err(ctx
->dev
, "unable to map shared descriptor\n");
3118 dpaa2_fl_set_final(in_fle
, true);
3119 dpaa2_fl_set_format(in_fle
, dpaa2_fl_single
);
3120 dpaa2_fl_set_addr(in_fle
, key_dma
);
3121 dpaa2_fl_set_len(in_fle
, *keylen
);
3122 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3123 dpaa2_fl_set_addr(out_fle
, key_dma
);
3124 dpaa2_fl_set_len(out_fle
, digestsize
);
3126 print_hex_dump_debug("key_in@" __stringify(__LINE__
)": ",
3127 DUMP_PREFIX_ADDRESS
, 16, 4, key
, *keylen
, 1);
3128 print_hex_dump_debug("shdesc@" __stringify(__LINE__
)": ",
3129 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
3133 init_completion(&result
.completion
);
3134 result
.dev
= ctx
->dev
;
3137 req_ctx
->flc_dma
= flc_dma
;
3138 req_ctx
->cbk
= split_key_sh_done
;
3139 req_ctx
->ctx
= &result
;
3141 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3142 if (ret
== -EINPROGRESS
) {
3144 wait_for_completion(&result
.completion
);
3146 print_hex_dump_debug("digested key@" __stringify(__LINE__
)": ",
3147 DUMP_PREFIX_ADDRESS
, 16, 4, key
,
3151 dma_unmap_single(ctx
->dev
, flc_dma
, sizeof(flc
->flc
) + desc_bytes(desc
),
3154 dma_unmap_single(ctx
->dev
, key_dma
, *keylen
, DMA_BIDIRECTIONAL
);
3160 *keylen
= digestsize
;
3165 static int ahash_setkey(struct crypto_ahash
*ahash
, const u8
*key
,
3166 unsigned int keylen
)
3168 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3169 unsigned int blocksize
= crypto_tfm_alg_blocksize(&ahash
->base
);
3170 unsigned int digestsize
= crypto_ahash_digestsize(ahash
);
3172 u8
*hashed_key
= NULL
;
3174 dev_dbg(ctx
->dev
, "keylen %d blocksize %d\n", keylen
, blocksize
);
3176 if (keylen
> blocksize
) {
3177 hashed_key
= kmemdup(key
, keylen
, GFP_KERNEL
| GFP_DMA
);
3180 ret
= hash_digest_key(ctx
, &keylen
, hashed_key
, digestsize
);
3186 ctx
->adata
.keylen
= keylen
;
3187 ctx
->adata
.keylen_pad
= split_key_len(ctx
->adata
.algtype
&
3188 OP_ALG_ALGSEL_MASK
);
3189 if (ctx
->adata
.keylen_pad
> CAAM_MAX_HASH_KEY_SIZE
)
3192 ctx
->adata
.key_virt
= key
;
3193 ctx
->adata
.key_inline
= true;
3195 ret
= ahash_set_sh_desc(ahash
);
3200 crypto_ahash_set_flags(ahash
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
3204 static inline void ahash_unmap(struct device
*dev
, struct ahash_edesc
*edesc
,
3205 struct ahash_request
*req
)
3207 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3209 if (edesc
->src_nents
)
3210 dma_unmap_sg(dev
, req
->src
, edesc
->src_nents
, DMA_TO_DEVICE
);
3212 if (edesc
->qm_sg_bytes
)
3213 dma_unmap_single(dev
, edesc
->qm_sg_dma
, edesc
->qm_sg_bytes
,
3216 if (state
->buf_dma
) {
3217 dma_unmap_single(dev
, state
->buf_dma
, *current_buflen(state
),
3223 static inline void ahash_unmap_ctx(struct device
*dev
,
3224 struct ahash_edesc
*edesc
,
3225 struct ahash_request
*req
, u32 flag
)
3227 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3229 if (state
->ctx_dma
) {
3230 dma_unmap_single(dev
, state
->ctx_dma
, state
->ctx_dma_len
, flag
);
3233 ahash_unmap(dev
, edesc
, req
);
3236 static void ahash_done(void *cbk_ctx
, u32 status
)
3238 struct crypto_async_request
*areq
= cbk_ctx
;
3239 struct ahash_request
*req
= ahash_request_cast(areq
);
3240 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3241 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3242 struct ahash_edesc
*edesc
= state
->caam_req
.edesc
;
3243 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3244 int digestsize
= crypto_ahash_digestsize(ahash
);
3247 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
3249 if (unlikely(status
)) {
3250 caam_qi2_strstatus(ctx
->dev
, status
);
3254 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_FROM_DEVICE
);
3255 memcpy(req
->result
, state
->caam_ctx
, digestsize
);
3256 qi_cache_free(edesc
);
3258 print_hex_dump_debug("ctx@" __stringify(__LINE__
)": ",
3259 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
3262 req
->base
.complete(&req
->base
, ecode
);
3265 static void ahash_done_bi(void *cbk_ctx
, u32 status
)
3267 struct crypto_async_request
*areq
= cbk_ctx
;
3268 struct ahash_request
*req
= ahash_request_cast(areq
);
3269 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3270 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3271 struct ahash_edesc
*edesc
= state
->caam_req
.edesc
;
3272 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3275 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
3277 if (unlikely(status
)) {
3278 caam_qi2_strstatus(ctx
->dev
, status
);
3282 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_BIDIRECTIONAL
);
3284 qi_cache_free(edesc
);
3286 print_hex_dump_debug("ctx@" __stringify(__LINE__
)": ",
3287 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
3290 print_hex_dump_debug("result@" __stringify(__LINE__
)": ",
3291 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
3292 crypto_ahash_digestsize(ahash
), 1);
3294 req
->base
.complete(&req
->base
, ecode
);
3297 static void ahash_done_ctx_src(void *cbk_ctx
, u32 status
)
3299 struct crypto_async_request
*areq
= cbk_ctx
;
3300 struct ahash_request
*req
= ahash_request_cast(areq
);
3301 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3302 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3303 struct ahash_edesc
*edesc
= state
->caam_req
.edesc
;
3304 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3305 int digestsize
= crypto_ahash_digestsize(ahash
);
3308 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
3310 if (unlikely(status
)) {
3311 caam_qi2_strstatus(ctx
->dev
, status
);
3315 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_BIDIRECTIONAL
);
3316 memcpy(req
->result
, state
->caam_ctx
, digestsize
);
3317 qi_cache_free(edesc
);
3319 print_hex_dump_debug("ctx@" __stringify(__LINE__
)": ",
3320 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
3323 req
->base
.complete(&req
->base
, ecode
);
3326 static void ahash_done_ctx_dst(void *cbk_ctx
, u32 status
)
3328 struct crypto_async_request
*areq
= cbk_ctx
;
3329 struct ahash_request
*req
= ahash_request_cast(areq
);
3330 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3331 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3332 struct ahash_edesc
*edesc
= state
->caam_req
.edesc
;
3333 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3336 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
3338 if (unlikely(status
)) {
3339 caam_qi2_strstatus(ctx
->dev
, status
);
3343 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_FROM_DEVICE
);
3345 qi_cache_free(edesc
);
3347 print_hex_dump_debug("ctx@" __stringify(__LINE__
)": ",
3348 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
3351 print_hex_dump_debug("result@" __stringify(__LINE__
)": ",
3352 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
3353 crypto_ahash_digestsize(ahash
), 1);
3355 req
->base
.complete(&req
->base
, ecode
);
3358 static int ahash_update_ctx(struct ahash_request
*req
)
3360 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3361 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3362 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3363 struct caam_request
*req_ctx
= &state
->caam_req
;
3364 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3365 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3366 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3367 GFP_KERNEL
: GFP_ATOMIC
;
3368 u8
*buf
= current_buf(state
);
3369 int *buflen
= current_buflen(state
);
3370 u8
*next_buf
= alt_buf(state
);
3371 int *next_buflen
= alt_buflen(state
), last_buflen
;
3372 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
3373 int src_nents
, mapped_nents
, qm_sg_bytes
, qm_sg_src_index
;
3374 struct ahash_edesc
*edesc
;
3377 last_buflen
= *next_buflen
;
3378 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
3379 to_hash
= in_len
- *next_buflen
;
3382 struct dpaa2_sg_entry
*sg_table
;
3384 src_nents
= sg_nents_for_len(req
->src
,
3385 req
->nbytes
- (*next_buflen
));
3386 if (src_nents
< 0) {
3387 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
3392 mapped_nents
= dma_map_sg(ctx
->dev
, req
->src
, src_nents
,
3394 if (!mapped_nents
) {
3395 dev_err(ctx
->dev
, "unable to DMA map source\n");
3402 /* allocate space for base edesc and link tables */
3403 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
3405 dma_unmap_sg(ctx
->dev
, req
->src
, src_nents
,
3410 edesc
->src_nents
= src_nents
;
3411 qm_sg_src_index
= 1 + (*buflen
? 1 : 0);
3412 qm_sg_bytes
= (qm_sg_src_index
+ mapped_nents
) *
3414 sg_table
= &edesc
->sgt
[0];
3416 ret
= ctx_map_to_qm_sg(ctx
->dev
, state
, ctx
->ctx_len
, sg_table
,
3421 ret
= buf_map_to_qm_sg(ctx
->dev
, sg_table
+ 1, state
);
3426 sg_to_qm_sg_last(req
->src
, mapped_nents
,
3427 sg_table
+ qm_sg_src_index
, 0);
3429 scatterwalk_map_and_copy(next_buf
, req
->src
,
3433 dpaa2_sg_set_final(sg_table
+ qm_sg_src_index
- 1,
3437 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
,
3438 qm_sg_bytes
, DMA_TO_DEVICE
);
3439 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
3440 dev_err(ctx
->dev
, "unable to map S/G table\n");
3444 edesc
->qm_sg_bytes
= qm_sg_bytes
;
3446 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
3447 dpaa2_fl_set_final(in_fle
, true);
3448 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
3449 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
3450 dpaa2_fl_set_len(in_fle
, ctx
->ctx_len
+ to_hash
);
3451 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3452 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
3453 dpaa2_fl_set_len(out_fle
, ctx
->ctx_len
);
3455 req_ctx
->flc
= &ctx
->flc
[UPDATE
];
3456 req_ctx
->flc_dma
= ctx
->flc_dma
[UPDATE
];
3457 req_ctx
->cbk
= ahash_done_bi
;
3458 req_ctx
->ctx
= &req
->base
;
3459 req_ctx
->edesc
= edesc
;
3461 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3462 if (ret
!= -EINPROGRESS
&&
3464 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3466 } else if (*next_buflen
) {
3467 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
3469 *buflen
= *next_buflen
;
3470 *next_buflen
= last_buflen
;
3473 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
3474 DUMP_PREFIX_ADDRESS
, 16, 4, buf
, *buflen
, 1);
3475 print_hex_dump_debug("next buf@" __stringify(__LINE__
)": ",
3476 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
, *next_buflen
,
3481 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_BIDIRECTIONAL
);
3482 qi_cache_free(edesc
);
3486 static int ahash_final_ctx(struct ahash_request
*req
)
3488 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3489 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3490 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3491 struct caam_request
*req_ctx
= &state
->caam_req
;
3492 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3493 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3494 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3495 GFP_KERNEL
: GFP_ATOMIC
;
3496 int buflen
= *current_buflen(state
);
3497 int qm_sg_bytes
, qm_sg_src_index
;
3498 int digestsize
= crypto_ahash_digestsize(ahash
);
3499 struct ahash_edesc
*edesc
;
3500 struct dpaa2_sg_entry
*sg_table
;
3503 /* allocate space for base edesc and link tables */
3504 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
3508 qm_sg_src_index
= 1 + (buflen
? 1 : 0);
3509 qm_sg_bytes
= qm_sg_src_index
* sizeof(*sg_table
);
3510 sg_table
= &edesc
->sgt
[0];
3512 ret
= ctx_map_to_qm_sg(ctx
->dev
, state
, ctx
->ctx_len
, sg_table
,
3517 ret
= buf_map_to_qm_sg(ctx
->dev
, sg_table
+ 1, state
);
3521 dpaa2_sg_set_final(sg_table
+ qm_sg_src_index
- 1, true);
3523 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
, qm_sg_bytes
,
3525 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
3526 dev_err(ctx
->dev
, "unable to map S/G table\n");
3530 edesc
->qm_sg_bytes
= qm_sg_bytes
;
3532 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
3533 dpaa2_fl_set_final(in_fle
, true);
3534 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
3535 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
3536 dpaa2_fl_set_len(in_fle
, ctx
->ctx_len
+ buflen
);
3537 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3538 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
3539 dpaa2_fl_set_len(out_fle
, digestsize
);
3541 req_ctx
->flc
= &ctx
->flc
[FINALIZE
];
3542 req_ctx
->flc_dma
= ctx
->flc_dma
[FINALIZE
];
3543 req_ctx
->cbk
= ahash_done_ctx_src
;
3544 req_ctx
->ctx
= &req
->base
;
3545 req_ctx
->edesc
= edesc
;
3547 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3548 if (ret
== -EINPROGRESS
||
3549 (ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3553 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_BIDIRECTIONAL
);
3554 qi_cache_free(edesc
);
3558 static int ahash_finup_ctx(struct ahash_request
*req
)
3560 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3561 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3562 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3563 struct caam_request
*req_ctx
= &state
->caam_req
;
3564 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3565 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3566 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3567 GFP_KERNEL
: GFP_ATOMIC
;
3568 int buflen
= *current_buflen(state
);
3569 int qm_sg_bytes
, qm_sg_src_index
;
3570 int src_nents
, mapped_nents
;
3571 int digestsize
= crypto_ahash_digestsize(ahash
);
3572 struct ahash_edesc
*edesc
;
3573 struct dpaa2_sg_entry
*sg_table
;
3576 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
3577 if (src_nents
< 0) {
3578 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
3583 mapped_nents
= dma_map_sg(ctx
->dev
, req
->src
, src_nents
,
3585 if (!mapped_nents
) {
3586 dev_err(ctx
->dev
, "unable to DMA map source\n");
3593 /* allocate space for base edesc and link tables */
3594 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
3596 dma_unmap_sg(ctx
->dev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
3600 edesc
->src_nents
= src_nents
;
3601 qm_sg_src_index
= 1 + (buflen
? 1 : 0);
3602 qm_sg_bytes
= (qm_sg_src_index
+ mapped_nents
) * sizeof(*sg_table
);
3603 sg_table
= &edesc
->sgt
[0];
3605 ret
= ctx_map_to_qm_sg(ctx
->dev
, state
, ctx
->ctx_len
, sg_table
,
3610 ret
= buf_map_to_qm_sg(ctx
->dev
, sg_table
+ 1, state
);
3614 sg_to_qm_sg_last(req
->src
, mapped_nents
, sg_table
+ qm_sg_src_index
, 0);
3616 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
, qm_sg_bytes
,
3618 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
3619 dev_err(ctx
->dev
, "unable to map S/G table\n");
3623 edesc
->qm_sg_bytes
= qm_sg_bytes
;
3625 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
3626 dpaa2_fl_set_final(in_fle
, true);
3627 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
3628 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
3629 dpaa2_fl_set_len(in_fle
, ctx
->ctx_len
+ buflen
+ req
->nbytes
);
3630 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3631 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
3632 dpaa2_fl_set_len(out_fle
, digestsize
);
3634 req_ctx
->flc
= &ctx
->flc
[FINALIZE
];
3635 req_ctx
->flc_dma
= ctx
->flc_dma
[FINALIZE
];
3636 req_ctx
->cbk
= ahash_done_ctx_src
;
3637 req_ctx
->ctx
= &req
->base
;
3638 req_ctx
->edesc
= edesc
;
3640 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3641 if (ret
== -EINPROGRESS
||
3642 (ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3646 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_BIDIRECTIONAL
);
3647 qi_cache_free(edesc
);
3651 static int ahash_digest(struct ahash_request
*req
)
3653 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3654 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3655 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3656 struct caam_request
*req_ctx
= &state
->caam_req
;
3657 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3658 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3659 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3660 GFP_KERNEL
: GFP_ATOMIC
;
3661 int digestsize
= crypto_ahash_digestsize(ahash
);
3662 int src_nents
, mapped_nents
;
3663 struct ahash_edesc
*edesc
;
3668 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
3669 if (src_nents
< 0) {
3670 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
3675 mapped_nents
= dma_map_sg(ctx
->dev
, req
->src
, src_nents
,
3677 if (!mapped_nents
) {
3678 dev_err(ctx
->dev
, "unable to map source for DMA\n");
3685 /* allocate space for base edesc and link tables */
3686 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
3688 dma_unmap_sg(ctx
->dev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
3692 edesc
->src_nents
= src_nents
;
3693 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
3695 if (mapped_nents
> 1) {
3697 struct dpaa2_sg_entry
*sg_table
= &edesc
->sgt
[0];
3699 qm_sg_bytes
= mapped_nents
* sizeof(*sg_table
);
3700 sg_to_qm_sg_last(req
->src
, mapped_nents
, sg_table
, 0);
3701 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
,
3702 qm_sg_bytes
, DMA_TO_DEVICE
);
3703 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
3704 dev_err(ctx
->dev
, "unable to map S/G table\n");
3707 edesc
->qm_sg_bytes
= qm_sg_bytes
;
3708 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
3709 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
3711 dpaa2_fl_set_format(in_fle
, dpaa2_fl_single
);
3712 dpaa2_fl_set_addr(in_fle
, sg_dma_address(req
->src
));
3715 state
->ctx_dma_len
= digestsize
;
3716 state
->ctx_dma
= dma_map_single(ctx
->dev
, state
->caam_ctx
, digestsize
,
3718 if (dma_mapping_error(ctx
->dev
, state
->ctx_dma
)) {
3719 dev_err(ctx
->dev
, "unable to map ctx\n");
3724 dpaa2_fl_set_final(in_fle
, true);
3725 dpaa2_fl_set_len(in_fle
, req
->nbytes
);
3726 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3727 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
3728 dpaa2_fl_set_len(out_fle
, digestsize
);
3730 req_ctx
->flc
= &ctx
->flc
[DIGEST
];
3731 req_ctx
->flc_dma
= ctx
->flc_dma
[DIGEST
];
3732 req_ctx
->cbk
= ahash_done
;
3733 req_ctx
->ctx
= &req
->base
;
3734 req_ctx
->edesc
= edesc
;
3735 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3736 if (ret
== -EINPROGRESS
||
3737 (ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3741 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_FROM_DEVICE
);
3742 qi_cache_free(edesc
);
3746 static int ahash_final_no_ctx(struct ahash_request
*req
)
3748 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3749 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3750 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3751 struct caam_request
*req_ctx
= &state
->caam_req
;
3752 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3753 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3754 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3755 GFP_KERNEL
: GFP_ATOMIC
;
3756 u8
*buf
= current_buf(state
);
3757 int buflen
= *current_buflen(state
);
3758 int digestsize
= crypto_ahash_digestsize(ahash
);
3759 struct ahash_edesc
*edesc
;
3762 /* allocate space for base edesc and link tables */
3763 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
3768 state
->buf_dma
= dma_map_single(ctx
->dev
, buf
, buflen
,
3770 if (dma_mapping_error(ctx
->dev
, state
->buf_dma
)) {
3771 dev_err(ctx
->dev
, "unable to map src\n");
3776 state
->ctx_dma_len
= digestsize
;
3777 state
->ctx_dma
= dma_map_single(ctx
->dev
, state
->caam_ctx
, digestsize
,
3779 if (dma_mapping_error(ctx
->dev
, state
->ctx_dma
)) {
3780 dev_err(ctx
->dev
, "unable to map ctx\n");
3785 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
3786 dpaa2_fl_set_final(in_fle
, true);
3788 * crypto engine requires the input entry to be present when
3789 * "frame list" FD is used.
3790 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3791 * in_fle zeroized (except for "Final" flag) is the best option.
3794 dpaa2_fl_set_format(in_fle
, dpaa2_fl_single
);
3795 dpaa2_fl_set_addr(in_fle
, state
->buf_dma
);
3796 dpaa2_fl_set_len(in_fle
, buflen
);
3798 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3799 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
3800 dpaa2_fl_set_len(out_fle
, digestsize
);
3802 req_ctx
->flc
= &ctx
->flc
[DIGEST
];
3803 req_ctx
->flc_dma
= ctx
->flc_dma
[DIGEST
];
3804 req_ctx
->cbk
= ahash_done
;
3805 req_ctx
->ctx
= &req
->base
;
3806 req_ctx
->edesc
= edesc
;
3808 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3809 if (ret
== -EINPROGRESS
||
3810 (ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3814 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_FROM_DEVICE
);
3815 qi_cache_free(edesc
);
3819 static int ahash_update_no_ctx(struct ahash_request
*req
)
3821 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3822 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3823 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3824 struct caam_request
*req_ctx
= &state
->caam_req
;
3825 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3826 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3827 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3828 GFP_KERNEL
: GFP_ATOMIC
;
3829 u8
*buf
= current_buf(state
);
3830 int *buflen
= current_buflen(state
);
3831 u8
*next_buf
= alt_buf(state
);
3832 int *next_buflen
= alt_buflen(state
);
3833 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
3834 int qm_sg_bytes
, src_nents
, mapped_nents
;
3835 struct ahash_edesc
*edesc
;
3838 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
3839 to_hash
= in_len
- *next_buflen
;
3842 struct dpaa2_sg_entry
*sg_table
;
3844 src_nents
= sg_nents_for_len(req
->src
,
3845 req
->nbytes
- *next_buflen
);
3846 if (src_nents
< 0) {
3847 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
3852 mapped_nents
= dma_map_sg(ctx
->dev
, req
->src
, src_nents
,
3854 if (!mapped_nents
) {
3855 dev_err(ctx
->dev
, "unable to DMA map source\n");
3862 /* allocate space for base edesc and link tables */
3863 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
3865 dma_unmap_sg(ctx
->dev
, req
->src
, src_nents
,
3870 edesc
->src_nents
= src_nents
;
3871 qm_sg_bytes
= (1 + mapped_nents
) * sizeof(*sg_table
);
3872 sg_table
= &edesc
->sgt
[0];
3874 ret
= buf_map_to_qm_sg(ctx
->dev
, sg_table
, state
);
3878 sg_to_qm_sg_last(req
->src
, mapped_nents
, sg_table
+ 1, 0);
3881 scatterwalk_map_and_copy(next_buf
, req
->src
,
3885 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
,
3886 qm_sg_bytes
, DMA_TO_DEVICE
);
3887 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
3888 dev_err(ctx
->dev
, "unable to map S/G table\n");
3892 edesc
->qm_sg_bytes
= qm_sg_bytes
;
3894 state
->ctx_dma_len
= ctx
->ctx_len
;
3895 state
->ctx_dma
= dma_map_single(ctx
->dev
, state
->caam_ctx
,
3896 ctx
->ctx_len
, DMA_FROM_DEVICE
);
3897 if (dma_mapping_error(ctx
->dev
, state
->ctx_dma
)) {
3898 dev_err(ctx
->dev
, "unable to map ctx\n");
3904 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
3905 dpaa2_fl_set_final(in_fle
, true);
3906 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
3907 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
3908 dpaa2_fl_set_len(in_fle
, to_hash
);
3909 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3910 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
3911 dpaa2_fl_set_len(out_fle
, ctx
->ctx_len
);
3913 req_ctx
->flc
= &ctx
->flc
[UPDATE_FIRST
];
3914 req_ctx
->flc_dma
= ctx
->flc_dma
[UPDATE_FIRST
];
3915 req_ctx
->cbk
= ahash_done_ctx_dst
;
3916 req_ctx
->ctx
= &req
->base
;
3917 req_ctx
->edesc
= edesc
;
3919 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3920 if (ret
!= -EINPROGRESS
&&
3922 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3925 state
->update
= ahash_update_ctx
;
3926 state
->finup
= ahash_finup_ctx
;
3927 state
->final
= ahash_final_ctx
;
3928 } else if (*next_buflen
) {
3929 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
3931 *buflen
= *next_buflen
;
3935 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
3936 DUMP_PREFIX_ADDRESS
, 16, 4, buf
, *buflen
, 1);
3937 print_hex_dump_debug("next buf@" __stringify(__LINE__
)": ",
3938 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
, *next_buflen
,
3943 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_TO_DEVICE
);
3944 qi_cache_free(edesc
);
3948 static int ahash_finup_no_ctx(struct ahash_request
*req
)
3950 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3951 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3952 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3953 struct caam_request
*req_ctx
= &state
->caam_req
;
3954 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3955 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3956 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3957 GFP_KERNEL
: GFP_ATOMIC
;
3958 int buflen
= *current_buflen(state
);
3959 int qm_sg_bytes
, src_nents
, mapped_nents
;
3960 int digestsize
= crypto_ahash_digestsize(ahash
);
3961 struct ahash_edesc
*edesc
;
3962 struct dpaa2_sg_entry
*sg_table
;
3965 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
3966 if (src_nents
< 0) {
3967 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
3972 mapped_nents
= dma_map_sg(ctx
->dev
, req
->src
, src_nents
,
3974 if (!mapped_nents
) {
3975 dev_err(ctx
->dev
, "unable to DMA map source\n");
3982 /* allocate space for base edesc and link tables */
3983 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
3985 dma_unmap_sg(ctx
->dev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
3989 edesc
->src_nents
= src_nents
;
3990 qm_sg_bytes
= (2 + mapped_nents
) * sizeof(*sg_table
);
3991 sg_table
= &edesc
->sgt
[0];
3993 ret
= buf_map_to_qm_sg(ctx
->dev
, sg_table
, state
);
3997 sg_to_qm_sg_last(req
->src
, mapped_nents
, sg_table
+ 1, 0);
3999 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
, qm_sg_bytes
,
4001 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
4002 dev_err(ctx
->dev
, "unable to map S/G table\n");
4006 edesc
->qm_sg_bytes
= qm_sg_bytes
;
4008 state
->ctx_dma_len
= digestsize
;
4009 state
->ctx_dma
= dma_map_single(ctx
->dev
, state
->caam_ctx
, digestsize
,
4011 if (dma_mapping_error(ctx
->dev
, state
->ctx_dma
)) {
4012 dev_err(ctx
->dev
, "unable to map ctx\n");
4018 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
4019 dpaa2_fl_set_final(in_fle
, true);
4020 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
4021 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
4022 dpaa2_fl_set_len(in_fle
, buflen
+ req
->nbytes
);
4023 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
4024 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
4025 dpaa2_fl_set_len(out_fle
, digestsize
);
4027 req_ctx
->flc
= &ctx
->flc
[DIGEST
];
4028 req_ctx
->flc_dma
= ctx
->flc_dma
[DIGEST
];
4029 req_ctx
->cbk
= ahash_done
;
4030 req_ctx
->ctx
= &req
->base
;
4031 req_ctx
->edesc
= edesc
;
4032 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
4033 if (ret
!= -EINPROGRESS
&&
4034 !(ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
4039 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_FROM_DEVICE
);
4040 qi_cache_free(edesc
);
4044 static int ahash_update_first(struct ahash_request
*req
)
4046 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
4047 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
4048 struct caam_hash_state
*state
= ahash_request_ctx(req
);
4049 struct caam_request
*req_ctx
= &state
->caam_req
;
4050 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
4051 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
4052 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
4053 GFP_KERNEL
: GFP_ATOMIC
;
4054 u8
*next_buf
= alt_buf(state
);
4055 int *next_buflen
= alt_buflen(state
);
4057 int src_nents
, mapped_nents
;
4058 struct ahash_edesc
*edesc
;
4061 *next_buflen
= req
->nbytes
& (crypto_tfm_alg_blocksize(&ahash
->base
) -
4063 to_hash
= req
->nbytes
- *next_buflen
;
4066 struct dpaa2_sg_entry
*sg_table
;
4068 src_nents
= sg_nents_for_len(req
->src
,
4069 req
->nbytes
- (*next_buflen
));
4070 if (src_nents
< 0) {
4071 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
4076 mapped_nents
= dma_map_sg(ctx
->dev
, req
->src
, src_nents
,
4078 if (!mapped_nents
) {
4079 dev_err(ctx
->dev
, "unable to map source for DMA\n");
4086 /* allocate space for base edesc and link tables */
4087 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
4089 dma_unmap_sg(ctx
->dev
, req
->src
, src_nents
,
4094 edesc
->src_nents
= src_nents
;
4095 sg_table
= &edesc
->sgt
[0];
4097 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
4098 dpaa2_fl_set_final(in_fle
, true);
4099 dpaa2_fl_set_len(in_fle
, to_hash
);
4101 if (mapped_nents
> 1) {
4104 sg_to_qm_sg_last(req
->src
, mapped_nents
, sg_table
, 0);
4105 qm_sg_bytes
= mapped_nents
* sizeof(*sg_table
);
4106 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
,
4109 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
4110 dev_err(ctx
->dev
, "unable to map S/G table\n");
4114 edesc
->qm_sg_bytes
= qm_sg_bytes
;
4115 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
4116 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
4118 dpaa2_fl_set_format(in_fle
, dpaa2_fl_single
);
4119 dpaa2_fl_set_addr(in_fle
, sg_dma_address(req
->src
));
4123 scatterwalk_map_and_copy(next_buf
, req
->src
, to_hash
,
4126 state
->ctx_dma_len
= ctx
->ctx_len
;
4127 state
->ctx_dma
= dma_map_single(ctx
->dev
, state
->caam_ctx
,
4128 ctx
->ctx_len
, DMA_FROM_DEVICE
);
4129 if (dma_mapping_error(ctx
->dev
, state
->ctx_dma
)) {
4130 dev_err(ctx
->dev
, "unable to map ctx\n");
4136 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
4137 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
4138 dpaa2_fl_set_len(out_fle
, ctx
->ctx_len
);
4140 req_ctx
->flc
= &ctx
->flc
[UPDATE_FIRST
];
4141 req_ctx
->flc_dma
= ctx
->flc_dma
[UPDATE_FIRST
];
4142 req_ctx
->cbk
= ahash_done_ctx_dst
;
4143 req_ctx
->ctx
= &req
->base
;
4144 req_ctx
->edesc
= edesc
;
4146 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
4147 if (ret
!= -EINPROGRESS
&&
4148 !(ret
== -EBUSY
&& req
->base
.flags
&
4149 CRYPTO_TFM_REQ_MAY_BACKLOG
))
4152 state
->update
= ahash_update_ctx
;
4153 state
->finup
= ahash_finup_ctx
;
4154 state
->final
= ahash_final_ctx
;
4155 } else if (*next_buflen
) {
4156 state
->update
= ahash_update_no_ctx
;
4157 state
->finup
= ahash_finup_no_ctx
;
4158 state
->final
= ahash_final_no_ctx
;
4159 scatterwalk_map_and_copy(next_buf
, req
->src
, 0,
4164 print_hex_dump_debug("next buf@" __stringify(__LINE__
)": ",
4165 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
, *next_buflen
,
4170 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_TO_DEVICE
);
4171 qi_cache_free(edesc
);
4175 static int ahash_finup_first(struct ahash_request
*req
)
4177 return ahash_digest(req
);
4180 static int ahash_init(struct ahash_request
*req
)
4182 struct caam_hash_state
*state
= ahash_request_ctx(req
);
4184 state
->update
= ahash_update_first
;
4185 state
->finup
= ahash_finup_first
;
4186 state
->final
= ahash_final_no_ctx
;
4189 state
->ctx_dma_len
= 0;
4190 state
->current_buf
= 0;
4192 state
->buflen_0
= 0;
4193 state
->buflen_1
= 0;
4198 static int ahash_update(struct ahash_request
*req
)
4200 struct caam_hash_state
*state
= ahash_request_ctx(req
);
4202 return state
->update(req
);
4205 static int ahash_finup(struct ahash_request
*req
)
4207 struct caam_hash_state
*state
= ahash_request_ctx(req
);
4209 return state
->finup(req
);
4212 static int ahash_final(struct ahash_request
*req
)
4214 struct caam_hash_state
*state
= ahash_request_ctx(req
);
4216 return state
->final(req
);
4219 static int ahash_export(struct ahash_request
*req
, void *out
)
4221 struct caam_hash_state
*state
= ahash_request_ctx(req
);
4222 struct caam_export_state
*export
= out
;
4226 if (state
->current_buf
) {
4228 len
= state
->buflen_1
;
4231 len
= state
->buflen_0
;
4234 memcpy(export
->buf
, buf
, len
);
4235 memcpy(export
->caam_ctx
, state
->caam_ctx
, sizeof(export
->caam_ctx
));
4236 export
->buflen
= len
;
4237 export
->update
= state
->update
;
4238 export
->final
= state
->final
;
4239 export
->finup
= state
->finup
;
4244 static int ahash_import(struct ahash_request
*req
, const void *in
)
4246 struct caam_hash_state
*state
= ahash_request_ctx(req
);
4247 const struct caam_export_state
*export
= in
;
4249 memset(state
, 0, sizeof(*state
));
4250 memcpy(state
->buf_0
, export
->buf
, export
->buflen
);
4251 memcpy(state
->caam_ctx
, export
->caam_ctx
, sizeof(state
->caam_ctx
));
4252 state
->buflen_0
= export
->buflen
;
4253 state
->update
= export
->update
;
4254 state
->final
= export
->final
;
4255 state
->finup
= export
->finup
;
4260 struct caam_hash_template
{
4261 char name
[CRYPTO_MAX_ALG_NAME
];
4262 char driver_name
[CRYPTO_MAX_ALG_NAME
];
4263 char hmac_name
[CRYPTO_MAX_ALG_NAME
];
4264 char hmac_driver_name
[CRYPTO_MAX_ALG_NAME
];
4265 unsigned int blocksize
;
4266 struct ahash_alg template_ahash
;
4270 /* ahash descriptors */
4271 static struct caam_hash_template driver_hash
[] = {
4274 .driver_name
= "sha1-caam-qi2",
4275 .hmac_name
= "hmac(sha1)",
4276 .hmac_driver_name
= "hmac-sha1-caam-qi2",
4277 .blocksize
= SHA1_BLOCK_SIZE
,
4280 .update
= ahash_update
,
4281 .final
= ahash_final
,
4282 .finup
= ahash_finup
,
4283 .digest
= ahash_digest
,
4284 .export
= ahash_export
,
4285 .import
= ahash_import
,
4286 .setkey
= ahash_setkey
,
4288 .digestsize
= SHA1_DIGEST_SIZE
,
4289 .statesize
= sizeof(struct caam_export_state
),
4292 .alg_type
= OP_ALG_ALGSEL_SHA1
,
4295 .driver_name
= "sha224-caam-qi2",
4296 .hmac_name
= "hmac(sha224)",
4297 .hmac_driver_name
= "hmac-sha224-caam-qi2",
4298 .blocksize
= SHA224_BLOCK_SIZE
,
4301 .update
= ahash_update
,
4302 .final
= ahash_final
,
4303 .finup
= ahash_finup
,
4304 .digest
= ahash_digest
,
4305 .export
= ahash_export
,
4306 .import
= ahash_import
,
4307 .setkey
= ahash_setkey
,
4309 .digestsize
= SHA224_DIGEST_SIZE
,
4310 .statesize
= sizeof(struct caam_export_state
),
4313 .alg_type
= OP_ALG_ALGSEL_SHA224
,
4316 .driver_name
= "sha256-caam-qi2",
4317 .hmac_name
= "hmac(sha256)",
4318 .hmac_driver_name
= "hmac-sha256-caam-qi2",
4319 .blocksize
= SHA256_BLOCK_SIZE
,
4322 .update
= ahash_update
,
4323 .final
= ahash_final
,
4324 .finup
= ahash_finup
,
4325 .digest
= ahash_digest
,
4326 .export
= ahash_export
,
4327 .import
= ahash_import
,
4328 .setkey
= ahash_setkey
,
4330 .digestsize
= SHA256_DIGEST_SIZE
,
4331 .statesize
= sizeof(struct caam_export_state
),
4334 .alg_type
= OP_ALG_ALGSEL_SHA256
,
4337 .driver_name
= "sha384-caam-qi2",
4338 .hmac_name
= "hmac(sha384)",
4339 .hmac_driver_name
= "hmac-sha384-caam-qi2",
4340 .blocksize
= SHA384_BLOCK_SIZE
,
4343 .update
= ahash_update
,
4344 .final
= ahash_final
,
4345 .finup
= ahash_finup
,
4346 .digest
= ahash_digest
,
4347 .export
= ahash_export
,
4348 .import
= ahash_import
,
4349 .setkey
= ahash_setkey
,
4351 .digestsize
= SHA384_DIGEST_SIZE
,
4352 .statesize
= sizeof(struct caam_export_state
),
4355 .alg_type
= OP_ALG_ALGSEL_SHA384
,
4358 .driver_name
= "sha512-caam-qi2",
4359 .hmac_name
= "hmac(sha512)",
4360 .hmac_driver_name
= "hmac-sha512-caam-qi2",
4361 .blocksize
= SHA512_BLOCK_SIZE
,
4364 .update
= ahash_update
,
4365 .final
= ahash_final
,
4366 .finup
= ahash_finup
,
4367 .digest
= ahash_digest
,
4368 .export
= ahash_export
,
4369 .import
= ahash_import
,
4370 .setkey
= ahash_setkey
,
4372 .digestsize
= SHA512_DIGEST_SIZE
,
4373 .statesize
= sizeof(struct caam_export_state
),
4376 .alg_type
= OP_ALG_ALGSEL_SHA512
,
4379 .driver_name
= "md5-caam-qi2",
4380 .hmac_name
= "hmac(md5)",
4381 .hmac_driver_name
= "hmac-md5-caam-qi2",
4382 .blocksize
= MD5_BLOCK_WORDS
* 4,
4385 .update
= ahash_update
,
4386 .final
= ahash_final
,
4387 .finup
= ahash_finup
,
4388 .digest
= ahash_digest
,
4389 .export
= ahash_export
,
4390 .import
= ahash_import
,
4391 .setkey
= ahash_setkey
,
4393 .digestsize
= MD5_DIGEST_SIZE
,
4394 .statesize
= sizeof(struct caam_export_state
),
4397 .alg_type
= OP_ALG_ALGSEL_MD5
,
4401 struct caam_hash_alg
{
4402 struct list_head entry
;
4405 struct ahash_alg ahash_alg
;
4408 static int caam_hash_cra_init(struct crypto_tfm
*tfm
)
4410 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
4411 struct crypto_alg
*base
= tfm
->__crt_alg
;
4412 struct hash_alg_common
*halg
=
4413 container_of(base
, struct hash_alg_common
, base
);
4414 struct ahash_alg
*alg
=
4415 container_of(halg
, struct ahash_alg
, halg
);
4416 struct caam_hash_alg
*caam_hash
=
4417 container_of(alg
, struct caam_hash_alg
, ahash_alg
);
4418 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
4419 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4420 static const u8 runninglen
[] = { HASH_MSG_LEN
+ MD5_DIGEST_SIZE
,
4421 HASH_MSG_LEN
+ SHA1_DIGEST_SIZE
,
4423 HASH_MSG_LEN
+ SHA256_DIGEST_SIZE
,
4425 HASH_MSG_LEN
+ SHA512_DIGEST_SIZE
};
4426 dma_addr_t dma_addr
;
4429 ctx
->dev
= caam_hash
->dev
;
4431 dma_addr
= dma_map_single_attrs(ctx
->dev
, ctx
->flc
, sizeof(ctx
->flc
),
4433 DMA_ATTR_SKIP_CPU_SYNC
);
4434 if (dma_mapping_error(ctx
->dev
, dma_addr
)) {
4435 dev_err(ctx
->dev
, "unable to map shared descriptors\n");
4439 for (i
= 0; i
< HASH_NUM_OP
; i
++)
4440 ctx
->flc_dma
[i
] = dma_addr
+ i
* sizeof(ctx
->flc
[i
]);
4442 /* copy descriptor header template value */
4443 ctx
->adata
.algtype
= OP_TYPE_CLASS2_ALG
| caam_hash
->alg_type
;
4445 ctx
->ctx_len
= runninglen
[(ctx
->adata
.algtype
&
4446 OP_ALG_ALGSEL_SUBMASK
) >>
4447 OP_ALG_ALGSEL_SHIFT
];
4449 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
4450 sizeof(struct caam_hash_state
));
4452 return ahash_set_sh_desc(ahash
);
4455 static void caam_hash_cra_exit(struct crypto_tfm
*tfm
)
4457 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
4459 dma_unmap_single_attrs(ctx
->dev
, ctx
->flc_dma
[0], sizeof(ctx
->flc
),
4460 DMA_BIDIRECTIONAL
, DMA_ATTR_SKIP_CPU_SYNC
);
4463 static struct caam_hash_alg
*caam_hash_alloc(struct device
*dev
,
4464 struct caam_hash_template
*template, bool keyed
)
4466 struct caam_hash_alg
*t_alg
;
4467 struct ahash_alg
*halg
;
4468 struct crypto_alg
*alg
;
4470 t_alg
= kzalloc(sizeof(*t_alg
), GFP_KERNEL
);
4472 return ERR_PTR(-ENOMEM
);
4474 t_alg
->ahash_alg
= template->template_ahash
;
4475 halg
= &t_alg
->ahash_alg
;
4476 alg
= &halg
->halg
.base
;
4479 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
4480 template->hmac_name
);
4481 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
4482 template->hmac_driver_name
);
4484 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
4486 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
4487 template->driver_name
);
4488 t_alg
->ahash_alg
.setkey
= NULL
;
4490 alg
->cra_module
= THIS_MODULE
;
4491 alg
->cra_init
= caam_hash_cra_init
;
4492 alg
->cra_exit
= caam_hash_cra_exit
;
4493 alg
->cra_ctxsize
= sizeof(struct caam_hash_ctx
);
4494 alg
->cra_priority
= CAAM_CRA_PRIORITY
;
4495 alg
->cra_blocksize
= template->blocksize
;
4496 alg
->cra_alignmask
= 0;
4497 alg
->cra_flags
= CRYPTO_ALG_ASYNC
;
4499 t_alg
->alg_type
= template->alg_type
;
4505 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx
*nctx
)
4507 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4509 ppriv
= container_of(nctx
, struct dpaa2_caam_priv_per_cpu
, nctx
);
4510 napi_schedule_irqoff(&ppriv
->napi
);
4513 static int __cold
dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv
*priv
)
4515 struct device
*dev
= priv
->dev
;
4516 struct dpaa2_io_notification_ctx
*nctx
;
4517 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4518 int err
, i
= 0, cpu
;
4520 for_each_online_cpu(cpu
) {
4521 ppriv
= per_cpu_ptr(priv
->ppriv
, cpu
);
4523 nctx
= &ppriv
->nctx
;
4525 nctx
->id
= ppriv
->rsp_fqid
;
4526 nctx
->desired_cpu
= cpu
;
4527 nctx
->cb
= dpaa2_caam_fqdan_cb
;
4529 /* Register notification callbacks */
4530 ppriv
->dpio
= dpaa2_io_service_select(cpu
);
4531 err
= dpaa2_io_service_register(ppriv
->dpio
, nctx
, dev
);
4532 if (unlikely(err
)) {
4533 dev_dbg(dev
, "No affine DPIO for cpu %d\n", cpu
);
4536 * If no affine DPIO for this core, there's probably
4537 * none available for next cores either. Signal we want
4538 * to retry later, in case the DPIO devices weren't
4541 err
= -EPROBE_DEFER
;
4545 ppriv
->store
= dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE
,
4547 if (unlikely(!ppriv
->store
)) {
4548 dev_err(dev
, "dpaa2_io_store_create() failed\n");
4553 if (++i
== priv
->num_pairs
)
4560 for_each_online_cpu(cpu
) {
4561 ppriv
= per_cpu_ptr(priv
->ppriv
, cpu
);
4562 if (!ppriv
->nctx
.cb
)
4564 dpaa2_io_service_deregister(ppriv
->dpio
, &ppriv
->nctx
, dev
);
4567 for_each_online_cpu(cpu
) {
4568 ppriv
= per_cpu_ptr(priv
->ppriv
, cpu
);
4571 dpaa2_io_store_destroy(ppriv
->store
);
4577 static void __cold
dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv
*priv
)
4579 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4582 for_each_online_cpu(cpu
) {
4583 ppriv
= per_cpu_ptr(priv
->ppriv
, cpu
);
4584 dpaa2_io_service_deregister(ppriv
->dpio
, &ppriv
->nctx
,
4586 dpaa2_io_store_destroy(ppriv
->store
);
4588 if (++i
== priv
->num_pairs
)
4593 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv
*priv
)
4595 struct dpseci_rx_queue_cfg rx_queue_cfg
;
4596 struct device
*dev
= priv
->dev
;
4597 struct fsl_mc_device
*ls_dev
= to_fsl_mc_device(dev
);
4598 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4599 int err
= 0, i
= 0, cpu
;
4601 /* Configure Rx queues */
4602 for_each_online_cpu(cpu
) {
4603 ppriv
= per_cpu_ptr(priv
->ppriv
, cpu
);
4605 rx_queue_cfg
.options
= DPSECI_QUEUE_OPT_DEST
|
4606 DPSECI_QUEUE_OPT_USER_CTX
;
4607 rx_queue_cfg
.order_preservation_en
= 0;
4608 rx_queue_cfg
.dest_cfg
.dest_type
= DPSECI_DEST_DPIO
;
4609 rx_queue_cfg
.dest_cfg
.dest_id
= ppriv
->nctx
.dpio_id
;
4611 * Rx priority (WQ) doesn't really matter, since we use
4612 * pull mode, i.e. volatile dequeues from specific FQs
4614 rx_queue_cfg
.dest_cfg
.priority
= 0;
4615 rx_queue_cfg
.user_ctx
= ppriv
->nctx
.qman64
;
4617 err
= dpseci_set_rx_queue(priv
->mc_io
, 0, ls_dev
->mc_handle
, i
,
4620 dev_err(dev
, "dpseci_set_rx_queue() failed with err %d\n",
4625 if (++i
== priv
->num_pairs
)
4632 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv
*priv
)
4634 struct device
*dev
= priv
->dev
;
4636 if (!priv
->cscn_mem
)
4639 dma_unmap_single(dev
, priv
->cscn_dma
, DPAA2_CSCN_SIZE
, DMA_FROM_DEVICE
);
4640 kfree(priv
->cscn_mem
);
4643 static void dpaa2_dpseci_free(struct dpaa2_caam_priv
*priv
)
4645 struct device
*dev
= priv
->dev
;
4646 struct fsl_mc_device
*ls_dev
= to_fsl_mc_device(dev
);
4648 dpaa2_dpseci_congestion_free(priv
);
4649 dpseci_close(priv
->mc_io
, 0, ls_dev
->mc_handle
);
4652 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv
*priv
,
4653 const struct dpaa2_fd
*fd
)
4655 struct caam_request
*req
;
4658 if (dpaa2_fd_get_format(fd
) != dpaa2_fd_list
) {
4659 dev_err(priv
->dev
, "Only Frame List FD format is supported!\n");
4663 fd_err
= dpaa2_fd_get_ctrl(fd
) & FD_CTRL_ERR_MASK
;
4664 if (unlikely(fd_err
))
4665 dev_err(priv
->dev
, "FD error: %08x\n", fd_err
);
4668 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4669 * in FD[ERR] or FD[FRC].
4671 req
= dpaa2_caam_iova_to_virt(priv
, dpaa2_fd_get_addr(fd
));
4672 dma_unmap_single(priv
->dev
, req
->fd_flt_dma
, sizeof(req
->fd_flt
),
4674 req
->cbk(req
->ctx
, dpaa2_fd_get_frc(fd
));
4677 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu
*ppriv
)
4681 /* Retry while portal is busy */
4683 err
= dpaa2_io_service_pull_fq(ppriv
->dpio
, ppriv
->rsp_fqid
,
4685 } while (err
== -EBUSY
);
4688 dev_err(ppriv
->priv
->dev
, "dpaa2_io_service_pull err %d", err
);
4693 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu
*ppriv
)
4695 struct dpaa2_dq
*dq
;
4696 int cleaned
= 0, is_last
;
4699 dq
= dpaa2_io_store_next(ppriv
->store
, &is_last
);
4700 if (unlikely(!dq
)) {
4701 if (unlikely(!is_last
)) {
4702 dev_dbg(ppriv
->priv
->dev
,
4703 "FQ %d returned no valid frames\n",
4706 * MUST retry until we get some sort of
4707 * valid response token (be it "empty dequeue"
4708 * or a valid frame).
4716 dpaa2_caam_process_fd(ppriv
->priv
, dpaa2_dq_fd(dq
));
4723 static int dpaa2_dpseci_poll(struct napi_struct
*napi
, int budget
)
4725 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4726 struct dpaa2_caam_priv
*priv
;
4727 int err
, cleaned
= 0, store_cleaned
;
4729 ppriv
= container_of(napi
, struct dpaa2_caam_priv_per_cpu
, napi
);
4732 if (unlikely(dpaa2_caam_pull_fq(ppriv
)))
4736 store_cleaned
= dpaa2_caam_store_consume(ppriv
);
4737 cleaned
+= store_cleaned
;
4739 if (store_cleaned
== 0 ||
4740 cleaned
> budget
- DPAA2_CAAM_STORE_SIZE
)
4743 /* Try to dequeue some more */
4744 err
= dpaa2_caam_pull_fq(ppriv
);
4749 if (cleaned
< budget
) {
4750 napi_complete_done(napi
, cleaned
);
4751 err
= dpaa2_io_service_rearm(ppriv
->dpio
, &ppriv
->nctx
);
4753 dev_err(priv
->dev
, "Notification rearm failed: %d\n",
4760 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv
*priv
,
4763 struct dpseci_congestion_notification_cfg cong_notif_cfg
= { 0 };
4764 struct device
*dev
= priv
->dev
;
4768 * Congestion group feature supported starting with DPSECI API v5.1
4769 * and only when object has been created with this capability.
4771 if ((DPSECI_VER(priv
->major_ver
, priv
->minor_ver
) < DPSECI_VER(5, 1)) ||
4772 !(priv
->dpseci_attr
.options
& DPSECI_OPT_HAS_CG
))
4775 priv
->cscn_mem
= kzalloc(DPAA2_CSCN_SIZE
+ DPAA2_CSCN_ALIGN
,
4776 GFP_KERNEL
| GFP_DMA
);
4777 if (!priv
->cscn_mem
)
4780 priv
->cscn_mem_aligned
= PTR_ALIGN(priv
->cscn_mem
, DPAA2_CSCN_ALIGN
);
4781 priv
->cscn_dma
= dma_map_single(dev
, priv
->cscn_mem_aligned
,
4782 DPAA2_CSCN_SIZE
, DMA_FROM_DEVICE
);
4783 if (dma_mapping_error(dev
, priv
->cscn_dma
)) {
4784 dev_err(dev
, "Error mapping CSCN memory area\n");
4789 cong_notif_cfg
.units
= DPSECI_CONGESTION_UNIT_BYTES
;
4790 cong_notif_cfg
.threshold_entry
= DPAA2_SEC_CONG_ENTRY_THRESH
;
4791 cong_notif_cfg
.threshold_exit
= DPAA2_SEC_CONG_EXIT_THRESH
;
4792 cong_notif_cfg
.message_ctx
= (uintptr_t)priv
;
4793 cong_notif_cfg
.message_iova
= priv
->cscn_dma
;
4794 cong_notif_cfg
.notification_mode
= DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER
|
4795 DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT
|
4796 DPSECI_CGN_MODE_COHERENT_WRITE
;
4798 err
= dpseci_set_congestion_notification(priv
->mc_io
, 0, token
,
4801 dev_err(dev
, "dpseci_set_congestion_notification failed\n");
4808 dma_unmap_single(dev
, priv
->cscn_dma
, DPAA2_CSCN_SIZE
, DMA_FROM_DEVICE
);
4810 kfree(priv
->cscn_mem
);
4815 static int __cold
dpaa2_dpseci_setup(struct fsl_mc_device
*ls_dev
)
4817 struct device
*dev
= &ls_dev
->dev
;
4818 struct dpaa2_caam_priv
*priv
;
4819 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4823 priv
= dev_get_drvdata(dev
);
4826 priv
->dpsec_id
= ls_dev
->obj_desc
.id
;
4828 /* Get a handle for the DPSECI this interface is associate with */
4829 err
= dpseci_open(priv
->mc_io
, 0, priv
->dpsec_id
, &ls_dev
->mc_handle
);
4831 dev_err(dev
, "dpseci_open() failed: %d\n", err
);
4835 err
= dpseci_get_api_version(priv
->mc_io
, 0, &priv
->major_ver
,
4838 dev_err(dev
, "dpseci_get_api_version() failed\n");
4842 dev_info(dev
, "dpseci v%d.%d\n", priv
->major_ver
, priv
->minor_ver
);
4844 err
= dpseci_get_attributes(priv
->mc_io
, 0, ls_dev
->mc_handle
,
4845 &priv
->dpseci_attr
);
4847 dev_err(dev
, "dpseci_get_attributes() failed\n");
4851 err
= dpseci_get_sec_attr(priv
->mc_io
, 0, ls_dev
->mc_handle
,
4854 dev_err(dev
, "dpseci_get_sec_attr() failed\n");
4858 err
= dpaa2_dpseci_congestion_setup(priv
, ls_dev
->mc_handle
);
4860 dev_err(dev
, "setup_congestion() failed\n");
4864 priv
->num_pairs
= min(priv
->dpseci_attr
.num_rx_queues
,
4865 priv
->dpseci_attr
.num_tx_queues
);
4866 if (priv
->num_pairs
> num_online_cpus()) {
4867 dev_warn(dev
, "%d queues won't be used\n",
4868 priv
->num_pairs
- num_online_cpus());
4869 priv
->num_pairs
= num_online_cpus();
4872 for (i
= 0; i
< priv
->dpseci_attr
.num_rx_queues
; i
++) {
4873 err
= dpseci_get_rx_queue(priv
->mc_io
, 0, ls_dev
->mc_handle
, i
,
4874 &priv
->rx_queue_attr
[i
]);
4876 dev_err(dev
, "dpseci_get_rx_queue() failed\n");
4877 goto err_get_rx_queue
;
4881 for (i
= 0; i
< priv
->dpseci_attr
.num_tx_queues
; i
++) {
4882 err
= dpseci_get_tx_queue(priv
->mc_io
, 0, ls_dev
->mc_handle
, i
,
4883 &priv
->tx_queue_attr
[i
]);
4885 dev_err(dev
, "dpseci_get_tx_queue() failed\n");
4886 goto err_get_rx_queue
;
4891 for_each_online_cpu(cpu
) {
4894 j
= i
% priv
->num_pairs
;
4896 ppriv
= per_cpu_ptr(priv
->ppriv
, cpu
);
4897 ppriv
->req_fqid
= priv
->tx_queue_attr
[j
].fqid
;
4900 * Allow all cores to enqueue, while only some of them
4901 * will take part in dequeuing.
4903 if (++i
> priv
->num_pairs
)
4906 ppriv
->rsp_fqid
= priv
->rx_queue_attr
[j
].fqid
;
4909 dev_dbg(dev
, "pair %d: rx queue %d, tx queue %d\n", j
,
4910 priv
->rx_queue_attr
[j
].fqid
,
4911 priv
->tx_queue_attr
[j
].fqid
);
4913 ppriv
->net_dev
.dev
= *dev
;
4914 INIT_LIST_HEAD(&ppriv
->net_dev
.napi_list
);
4915 netif_napi_add(&ppriv
->net_dev
, &ppriv
->napi
, dpaa2_dpseci_poll
,
4916 DPAA2_CAAM_NAPI_WEIGHT
);
4922 dpaa2_dpseci_congestion_free(priv
);
4924 dpseci_close(priv
->mc_io
, 0, ls_dev
->mc_handle
);
4929 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv
*priv
)
4931 struct device
*dev
= priv
->dev
;
4932 struct fsl_mc_device
*ls_dev
= to_fsl_mc_device(dev
);
4933 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4936 for (i
= 0; i
< priv
->num_pairs
; i
++) {
4937 ppriv
= per_cpu_ptr(priv
->ppriv
, i
);
4938 napi_enable(&ppriv
->napi
);
4941 return dpseci_enable(priv
->mc_io
, 0, ls_dev
->mc_handle
);
4944 static int __cold
dpaa2_dpseci_disable(struct dpaa2_caam_priv
*priv
)
4946 struct device
*dev
= priv
->dev
;
4947 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4948 struct fsl_mc_device
*ls_dev
= to_fsl_mc_device(dev
);
4949 int i
, err
= 0, enabled
;
4951 err
= dpseci_disable(priv
->mc_io
, 0, ls_dev
->mc_handle
);
4953 dev_err(dev
, "dpseci_disable() failed\n");
4957 err
= dpseci_is_enabled(priv
->mc_io
, 0, ls_dev
->mc_handle
, &enabled
);
4959 dev_err(dev
, "dpseci_is_enabled() failed\n");
4963 dev_dbg(dev
, "disable: %s\n", enabled
? "false" : "true");
4965 for (i
= 0; i
< priv
->num_pairs
; i
++) {
4966 ppriv
= per_cpu_ptr(priv
->ppriv
, i
);
4967 napi_disable(&ppriv
->napi
);
4968 netif_napi_del(&ppriv
->napi
);
4974 static struct list_head hash_list
;
4976 static int dpaa2_caam_probe(struct fsl_mc_device
*dpseci_dev
)
4979 struct dpaa2_caam_priv
*priv
;
4981 bool registered
= false;
4984 * There is no way to get CAAM endianness - there is no direct register
4985 * space access and MC f/w does not provide this attribute.
4986 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
4989 caam_little_end
= true;
4993 dev
= &dpseci_dev
->dev
;
4995 priv
= devm_kzalloc(dev
, sizeof(*priv
), GFP_KERNEL
);
4999 dev_set_drvdata(dev
, priv
);
5001 priv
->domain
= iommu_get_domain_for_dev(dev
);
5003 qi_cache
= kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE
,
5004 0, SLAB_CACHE_DMA
, NULL
);
5006 dev_err(dev
, "Can't allocate SEC cache\n");
5010 err
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(49));
5012 dev_err(dev
, "dma_set_mask_and_coherent() failed\n");
5016 /* Obtain a MC portal */
5017 err
= fsl_mc_portal_allocate(dpseci_dev
, 0, &priv
->mc_io
);
5020 err
= -EPROBE_DEFER
;
5022 dev_err(dev
, "MC portal allocation failed\n");
5027 priv
->ppriv
= alloc_percpu(*priv
->ppriv
);
5029 dev_err(dev
, "alloc_percpu() failed\n");
5031 goto err_alloc_ppriv
;
5034 /* DPSECI initialization */
5035 err
= dpaa2_dpseci_setup(dpseci_dev
);
5037 dev_err(dev
, "dpaa2_dpseci_setup() failed\n");
5038 goto err_dpseci_setup
;
5042 err
= dpaa2_dpseci_dpio_setup(priv
);
5044 if (err
!= -EPROBE_DEFER
)
5045 dev_err(dev
, "dpaa2_dpseci_dpio_setup() failed\n");
5046 goto err_dpio_setup
;
5049 /* DPSECI binding to DPIO */
5050 err
= dpaa2_dpseci_bind(priv
);
5052 dev_err(dev
, "dpaa2_dpseci_bind() failed\n");
5057 err
= dpaa2_dpseci_enable(priv
);
5059 dev_err(dev
, "dpaa2_dpseci_enable() failed\n");
5063 /* register crypto algorithms the device supports */
5064 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
5065 struct caam_skcipher_alg
*t_alg
= driver_algs
+ i
;
5066 u32 alg_sel
= t_alg
->caam
.class1_alg_type
& OP_ALG_ALGSEL_MASK
;
5068 /* Skip DES algorithms if not supported by device */
5069 if (!priv
->sec_attr
.des_acc_num
&&
5070 (alg_sel
== OP_ALG_ALGSEL_3DES
||
5071 alg_sel
== OP_ALG_ALGSEL_DES
))
5074 /* Skip AES algorithms if not supported by device */
5075 if (!priv
->sec_attr
.aes_acc_num
&&
5076 alg_sel
== OP_ALG_ALGSEL_AES
)
5079 /* Skip CHACHA20 algorithms if not supported by device */
5080 if (alg_sel
== OP_ALG_ALGSEL_CHACHA20
&&
5081 !priv
->sec_attr
.ccha_acc_num
)
5084 t_alg
->caam
.dev
= dev
;
5085 caam_skcipher_alg_init(t_alg
);
5087 err
= crypto_register_skcipher(&t_alg
->skcipher
);
5089 dev_warn(dev
, "%s alg registration failed: %d\n",
5090 t_alg
->skcipher
.base
.cra_driver_name
, err
);
5094 t_alg
->registered
= true;
5098 for (i
= 0; i
< ARRAY_SIZE(driver_aeads
); i
++) {
5099 struct caam_aead_alg
*t_alg
= driver_aeads
+ i
;
5100 u32 c1_alg_sel
= t_alg
->caam
.class1_alg_type
&
5102 u32 c2_alg_sel
= t_alg
->caam
.class2_alg_type
&
5105 /* Skip DES algorithms if not supported by device */
5106 if (!priv
->sec_attr
.des_acc_num
&&
5107 (c1_alg_sel
== OP_ALG_ALGSEL_3DES
||
5108 c1_alg_sel
== OP_ALG_ALGSEL_DES
))
5111 /* Skip AES algorithms if not supported by device */
5112 if (!priv
->sec_attr
.aes_acc_num
&&
5113 c1_alg_sel
== OP_ALG_ALGSEL_AES
)
5116 /* Skip CHACHA20 algorithms if not supported by device */
5117 if (c1_alg_sel
== OP_ALG_ALGSEL_CHACHA20
&&
5118 !priv
->sec_attr
.ccha_acc_num
)
5121 /* Skip POLY1305 algorithms if not supported by device */
5122 if (c2_alg_sel
== OP_ALG_ALGSEL_POLY1305
&&
5123 !priv
->sec_attr
.ptha_acc_num
)
5127 * Skip algorithms requiring message digests
5128 * if MD not supported by device.
5130 if ((c2_alg_sel
& ~OP_ALG_ALGSEL_SUBMASK
) == 0x40 &&
5131 !priv
->sec_attr
.md_acc_num
)
5134 t_alg
->caam
.dev
= dev
;
5135 caam_aead_alg_init(t_alg
);
5137 err
= crypto_register_aead(&t_alg
->aead
);
5139 dev_warn(dev
, "%s alg registration failed: %d\n",
5140 t_alg
->aead
.base
.cra_driver_name
, err
);
5144 t_alg
->registered
= true;
5148 dev_info(dev
, "algorithms registered in /proc/crypto\n");
5150 /* register hash algorithms the device supports */
5151 INIT_LIST_HEAD(&hash_list
);
5154 * Skip registration of any hashing algorithms if MD block
5157 if (!priv
->sec_attr
.md_acc_num
)
5160 for (i
= 0; i
< ARRAY_SIZE(driver_hash
); i
++) {
5161 struct caam_hash_alg
*t_alg
;
5162 struct caam_hash_template
*alg
= driver_hash
+ i
;
5164 /* register hmac version */
5165 t_alg
= caam_hash_alloc(dev
, alg
, true);
5166 if (IS_ERR(t_alg
)) {
5167 err
= PTR_ERR(t_alg
);
5168 dev_warn(dev
, "%s hash alg allocation failed: %d\n",
5169 alg
->driver_name
, err
);
5173 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
5175 dev_warn(dev
, "%s alg registration failed: %d\n",
5176 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
5180 list_add_tail(&t_alg
->entry
, &hash_list
);
5183 /* register unkeyed version */
5184 t_alg
= caam_hash_alloc(dev
, alg
, false);
5185 if (IS_ERR(t_alg
)) {
5186 err
= PTR_ERR(t_alg
);
5187 dev_warn(dev
, "%s alg allocation failed: %d\n",
5188 alg
->driver_name
, err
);
5192 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
5194 dev_warn(dev
, "%s alg registration failed: %d\n",
5195 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
5199 list_add_tail(&t_alg
->entry
, &hash_list
);
5202 if (!list_empty(&hash_list
))
5203 dev_info(dev
, "hash algorithms registered in /proc/crypto\n");
5208 dpaa2_dpseci_dpio_free(priv
);
5210 dpaa2_dpseci_free(priv
);
5212 free_percpu(priv
->ppriv
);
5214 fsl_mc_portal_free(priv
->mc_io
);
5216 kmem_cache_destroy(qi_cache
);
5221 static int __cold
dpaa2_caam_remove(struct fsl_mc_device
*ls_dev
)
5224 struct dpaa2_caam_priv
*priv
;
5228 priv
= dev_get_drvdata(dev
);
5230 for (i
= 0; i
< ARRAY_SIZE(driver_aeads
); i
++) {
5231 struct caam_aead_alg
*t_alg
= driver_aeads
+ i
;
5233 if (t_alg
->registered
)
5234 crypto_unregister_aead(&t_alg
->aead
);
5237 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
5238 struct caam_skcipher_alg
*t_alg
= driver_algs
+ i
;
5240 if (t_alg
->registered
)
5241 crypto_unregister_skcipher(&t_alg
->skcipher
);
5244 if (hash_list
.next
) {
5245 struct caam_hash_alg
*t_hash_alg
, *p
;
5247 list_for_each_entry_safe(t_hash_alg
, p
, &hash_list
, entry
) {
5248 crypto_unregister_ahash(&t_hash_alg
->ahash_alg
);
5249 list_del(&t_hash_alg
->entry
);
5254 dpaa2_dpseci_disable(priv
);
5255 dpaa2_dpseci_dpio_free(priv
);
5256 dpaa2_dpseci_free(priv
);
5257 free_percpu(priv
->ppriv
);
5258 fsl_mc_portal_free(priv
->mc_io
);
5259 kmem_cache_destroy(qi_cache
);
5264 int dpaa2_caam_enqueue(struct device
*dev
, struct caam_request
*req
)
5267 struct dpaa2_caam_priv
*priv
= dev_get_drvdata(dev
);
5268 struct dpaa2_caam_priv_per_cpu
*ppriv
;
5272 return PTR_ERR(req
);
5274 if (priv
->cscn_mem
) {
5275 dma_sync_single_for_cpu(priv
->dev
, priv
->cscn_dma
,
5278 if (unlikely(dpaa2_cscn_state_congested(priv
->cscn_mem_aligned
))) {
5279 dev_dbg_ratelimited(dev
, "Dropping request\n");
5284 dpaa2_fl_set_flc(&req
->fd_flt
[1], req
->flc_dma
);
5286 req
->fd_flt_dma
= dma_map_single(dev
, req
->fd_flt
, sizeof(req
->fd_flt
),
5288 if (dma_mapping_error(dev
, req
->fd_flt_dma
)) {
5289 dev_err(dev
, "DMA mapping error for QI enqueue request\n");
5293 memset(&fd
, 0, sizeof(fd
));
5294 dpaa2_fd_set_format(&fd
, dpaa2_fd_list
);
5295 dpaa2_fd_set_addr(&fd
, req
->fd_flt_dma
);
5296 dpaa2_fd_set_len(&fd
, dpaa2_fl_get_len(&req
->fd_flt
[1]));
5297 dpaa2_fd_set_flc(&fd
, req
->flc_dma
);
5299 ppriv
= this_cpu_ptr(priv
->ppriv
);
5300 for (i
= 0; i
< (priv
->dpseci_attr
.num_tx_queues
<< 1); i
++) {
5301 err
= dpaa2_io_service_enqueue_fq(ppriv
->dpio
, ppriv
->req_fqid
,
5309 if (unlikely(err
)) {
5310 dev_err_ratelimited(dev
, "Error enqueuing frame: %d\n", err
);
5314 return -EINPROGRESS
;
5317 dma_unmap_single(dev
, req
->fd_flt_dma
, sizeof(req
->fd_flt
),
5321 EXPORT_SYMBOL(dpaa2_caam_enqueue
);
5323 static const struct fsl_mc_device_id dpaa2_caam_match_id_table
[] = {
5325 .vendor
= FSL_MC_VENDOR_FREESCALE
,
5326 .obj_type
= "dpseci",
5331 static struct fsl_mc_driver dpaa2_caam_driver
= {
5333 .name
= KBUILD_MODNAME
,
5334 .owner
= THIS_MODULE
,
5336 .probe
= dpaa2_caam_probe
,
5337 .remove
= dpaa2_caam_remove
,
5338 .match_id_table
= dpaa2_caam_match_id_table
5341 MODULE_LICENSE("Dual BSD/GPL");
5342 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5343 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5345 module_fsl_mc_driver(dpaa2_caam_driver
);