1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
3 * Copyright 2015-2016 Freescale Semiconductor Inc.
4 * Copyright 2017-2018 NXP
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include <linux/fsl/mc.h>
19 #include <soc/fsl/dpaa2-io.h>
20 #include <soc/fsl/dpaa2-fd.h>
22 #define CAAM_CRA_PRIORITY 2000
24 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
25 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
26 SHA512_DIGEST_SIZE * 2)
29 * This is a a cache of buffers, from which the users of CAAM QI driver
30 * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
31 * NOTE: A more elegant solution would be to have some headroom in the frames
32 * being processed. This can be added by the dpaa2-eth driver. This would
33 * pose a problem for userspace application processing which cannot
34 * know of this limitation. So for now, this will work.
35 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
37 static struct kmem_cache
*qi_cache
;
39 struct caam_alg_entry
{
47 struct caam_aead_alg
{
49 struct caam_alg_entry caam
;
53 struct caam_skcipher_alg
{
54 struct skcipher_alg skcipher
;
55 struct caam_alg_entry caam
;
60 * caam_ctx - per-session context
61 * @flc: Flow Contexts array
62 * @key: [authentication key], encryption key
63 * @flc_dma: I/O virtual addresses of the Flow Contexts
64 * @key_dma: I/O virtual address of the key
65 * @dir: DMA direction for mapping key and Flow Contexts
67 * @adata: authentication algorithm details
68 * @cdata: encryption algorithm details
69 * @authsize: authentication tag (a.k.a. ICV / MAC) size
72 struct caam_flc flc
[NUM_OP
];
73 u8 key
[CAAM_MAX_KEY_SIZE
];
74 dma_addr_t flc_dma
[NUM_OP
];
76 enum dma_data_direction dir
;
80 unsigned int authsize
;
83 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv
*priv
,
86 phys_addr_t phys_addr
;
88 phys_addr
= priv
->domain
? iommu_iova_to_phys(priv
->domain
, iova_addr
) :
91 return phys_to_virt(phys_addr
);
95 * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
97 * Allocate data on the hotpath. Instead of using kzalloc, one can use the
98 * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
99 * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
100 * hosting 16 SG entries.
102 * @flags - flags that would be used for the equivalent kmalloc(..) call
104 * Returns a pointer to a retrieved buffer on success or NULL on failure.
106 static inline void *qi_cache_zalloc(gfp_t flags
)
108 return kmem_cache_zalloc(qi_cache
, flags
);
112 * qi_cache_free - Frees buffers allocated from CAAM-QI cache
114 * @obj - buffer previously allocated by qi_cache_zalloc
116 * No checking is being done, the call is a passthrough call to
117 * kmem_cache_free(...)
119 static inline void qi_cache_free(void *obj
)
121 kmem_cache_free(qi_cache
, obj
);
124 static struct caam_request
*to_caam_req(struct crypto_async_request
*areq
)
126 switch (crypto_tfm_alg_type(areq
->tfm
)) {
127 case CRYPTO_ALG_TYPE_SKCIPHER
:
128 return skcipher_request_ctx(skcipher_request_cast(areq
));
129 case CRYPTO_ALG_TYPE_AEAD
:
130 return aead_request_ctx(container_of(areq
, struct aead_request
,
132 case CRYPTO_ALG_TYPE_AHASH
:
133 return ahash_request_ctx(ahash_request_cast(areq
));
135 return ERR_PTR(-EINVAL
);
139 static void caam_unmap(struct device
*dev
, struct scatterlist
*src
,
140 struct scatterlist
*dst
, int src_nents
,
141 int dst_nents
, dma_addr_t iv_dma
, int ivsize
,
142 dma_addr_t qm_sg_dma
, int qm_sg_bytes
)
146 dma_unmap_sg(dev
, src
, src_nents
, DMA_TO_DEVICE
);
148 dma_unmap_sg(dev
, dst
, dst_nents
, DMA_FROM_DEVICE
);
150 dma_unmap_sg(dev
, src
, src_nents
, DMA_BIDIRECTIONAL
);
154 dma_unmap_single(dev
, iv_dma
, ivsize
, DMA_TO_DEVICE
);
157 dma_unmap_single(dev
, qm_sg_dma
, qm_sg_bytes
, DMA_TO_DEVICE
);
160 static int aead_set_sh_desc(struct crypto_aead
*aead
)
162 struct caam_aead_alg
*alg
= container_of(crypto_aead_alg(aead
),
164 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
165 unsigned int ivsize
= crypto_aead_ivsize(aead
);
166 struct device
*dev
= ctx
->dev
;
167 struct dpaa2_caam_priv
*priv
= dev_get_drvdata(dev
);
168 struct caam_flc
*flc
;
172 unsigned int data_len
[2];
174 const bool ctr_mode
= ((ctx
->cdata
.algtype
& OP_ALG_AAI_MASK
) ==
175 OP_ALG_AAI_CTR_MOD128
);
176 const bool is_rfc3686
= alg
->caam
.rfc3686
;
178 if (!ctx
->cdata
.keylen
|| !ctx
->authsize
)
182 * AES-CTR needs to load IV in CONTEXT1 reg
183 * at an offset of 128bits (16bytes)
184 * CONTEXT1[255:128] = IV
191 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
194 ctx1_iv_off
= 16 + CTR_RFC3686_NONCE_SIZE
;
195 nonce
= (u32
*)((void *)ctx
->key
+ ctx
->adata
.keylen_pad
+
196 ctx
->cdata
.keylen
- CTR_RFC3686_NONCE_SIZE
);
199 data_len
[0] = ctx
->adata
.keylen_pad
;
200 data_len
[1] = ctx
->cdata
.keylen
;
202 /* aead_encrypt shared descriptor */
203 if (desc_inline_query((alg
->caam
.geniv
? DESC_QI_AEAD_GIVENC_LEN
:
204 DESC_QI_AEAD_ENC_LEN
) +
205 (is_rfc3686
? DESC_AEAD_CTR_RFC3686_LEN
: 0),
206 DESC_JOB_IO_LEN
, data_len
, &inl_mask
,
207 ARRAY_SIZE(data_len
)) < 0)
211 ctx
->adata
.key_virt
= ctx
->key
;
213 ctx
->adata
.key_dma
= ctx
->key_dma
;
216 ctx
->cdata
.key_virt
= ctx
->key
+ ctx
->adata
.keylen_pad
;
218 ctx
->cdata
.key_dma
= ctx
->key_dma
+ ctx
->adata
.keylen_pad
;
220 ctx
->adata
.key_inline
= !!(inl_mask
& 1);
221 ctx
->cdata
.key_inline
= !!(inl_mask
& 2);
223 flc
= &ctx
->flc
[ENCRYPT
];
227 cnstr_shdsc_aead_givencap(desc
, &ctx
->cdata
, &ctx
->adata
,
228 ivsize
, ctx
->authsize
, is_rfc3686
,
229 nonce
, ctx1_iv_off
, true,
232 cnstr_shdsc_aead_encap(desc
, &ctx
->cdata
, &ctx
->adata
,
233 ivsize
, ctx
->authsize
, is_rfc3686
, nonce
,
234 ctx1_iv_off
, true, priv
->sec_attr
.era
);
236 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
237 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
238 sizeof(flc
->flc
) + desc_bytes(desc
),
241 /* aead_decrypt shared descriptor */
242 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN
+
243 (is_rfc3686
? DESC_AEAD_CTR_RFC3686_LEN
: 0),
244 DESC_JOB_IO_LEN
, data_len
, &inl_mask
,
245 ARRAY_SIZE(data_len
)) < 0)
249 ctx
->adata
.key_virt
= ctx
->key
;
251 ctx
->adata
.key_dma
= ctx
->key_dma
;
254 ctx
->cdata
.key_virt
= ctx
->key
+ ctx
->adata
.keylen_pad
;
256 ctx
->cdata
.key_dma
= ctx
->key_dma
+ ctx
->adata
.keylen_pad
;
258 ctx
->adata
.key_inline
= !!(inl_mask
& 1);
259 ctx
->cdata
.key_inline
= !!(inl_mask
& 2);
261 flc
= &ctx
->flc
[DECRYPT
];
263 cnstr_shdsc_aead_decap(desc
, &ctx
->cdata
, &ctx
->adata
,
264 ivsize
, ctx
->authsize
, alg
->caam
.geniv
,
265 is_rfc3686
, nonce
, ctx1_iv_off
, true,
267 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
268 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
269 sizeof(flc
->flc
) + desc_bytes(desc
),
275 static int aead_setauthsize(struct crypto_aead
*authenc
, unsigned int authsize
)
277 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
279 ctx
->authsize
= authsize
;
280 aead_set_sh_desc(authenc
);
285 static int aead_setkey(struct crypto_aead
*aead
, const u8
*key
,
288 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
289 struct device
*dev
= ctx
->dev
;
290 struct crypto_authenc_keys keys
;
292 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0)
295 dev_dbg(dev
, "keylen %d enckeylen %d authkeylen %d\n",
296 keys
.authkeylen
+ keys
.enckeylen
, keys
.enckeylen
,
298 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
299 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
301 ctx
->adata
.keylen
= keys
.authkeylen
;
302 ctx
->adata
.keylen_pad
= split_key_len(ctx
->adata
.algtype
&
305 if (ctx
->adata
.keylen_pad
+ keys
.enckeylen
> CAAM_MAX_KEY_SIZE
)
308 memcpy(ctx
->key
, keys
.authkey
, keys
.authkeylen
);
309 memcpy(ctx
->key
+ ctx
->adata
.keylen_pad
, keys
.enckey
, keys
.enckeylen
);
310 dma_sync_single_for_device(dev
, ctx
->key_dma
, ctx
->adata
.keylen_pad
+
311 keys
.enckeylen
, ctx
->dir
);
312 print_hex_dump_debug("ctx.key@" __stringify(__LINE__
)": ",
313 DUMP_PREFIX_ADDRESS
, 16, 4, ctx
->key
,
314 ctx
->adata
.keylen_pad
+ keys
.enckeylen
, 1);
316 ctx
->cdata
.keylen
= keys
.enckeylen
;
318 memzero_explicit(&keys
, sizeof(keys
));
319 return aead_set_sh_desc(aead
);
321 crypto_aead_set_flags(aead
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
322 memzero_explicit(&keys
, sizeof(keys
));
326 static struct aead_edesc
*aead_edesc_alloc(struct aead_request
*req
,
329 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
330 struct caam_request
*req_ctx
= aead_request_ctx(req
);
331 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
332 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
333 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
334 struct caam_aead_alg
*alg
= container_of(crypto_aead_alg(aead
),
336 struct device
*dev
= ctx
->dev
;
337 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
338 GFP_KERNEL
: GFP_ATOMIC
;
339 int src_nents
, mapped_src_nents
, dst_nents
= 0, mapped_dst_nents
= 0;
340 struct aead_edesc
*edesc
;
341 dma_addr_t qm_sg_dma
, iv_dma
= 0;
343 unsigned int authsize
= ctx
->authsize
;
344 int qm_sg_index
= 0, qm_sg_nents
= 0, qm_sg_bytes
;
346 struct dpaa2_sg_entry
*sg_table
;
348 /* allocate space for base edesc, link tables and IV */
349 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
350 if (unlikely(!edesc
)) {
351 dev_err(dev
, "could not allocate extended descriptor\n");
352 return ERR_PTR(-ENOMEM
);
355 if (unlikely(req
->dst
!= req
->src
)) {
356 src_nents
= sg_nents_for_len(req
->src
, req
->assoclen
+
358 if (unlikely(src_nents
< 0)) {
359 dev_err(dev
, "Insufficient bytes (%d) in src S/G\n",
360 req
->assoclen
+ req
->cryptlen
);
361 qi_cache_free(edesc
);
362 return ERR_PTR(src_nents
);
365 dst_nents
= sg_nents_for_len(req
->dst
, req
->assoclen
+
367 (encrypt
? authsize
:
369 if (unlikely(dst_nents
< 0)) {
370 dev_err(dev
, "Insufficient bytes (%d) in dst S/G\n",
371 req
->assoclen
+ req
->cryptlen
+
372 (encrypt
? authsize
: (-authsize
)));
373 qi_cache_free(edesc
);
374 return ERR_PTR(dst_nents
);
378 mapped_src_nents
= dma_map_sg(dev
, req
->src
, src_nents
,
380 if (unlikely(!mapped_src_nents
)) {
381 dev_err(dev
, "unable to map source\n");
382 qi_cache_free(edesc
);
383 return ERR_PTR(-ENOMEM
);
386 mapped_src_nents
= 0;
390 mapped_dst_nents
= dma_map_sg(dev
, req
->dst
, dst_nents
,
392 if (unlikely(!mapped_dst_nents
)) {
393 dev_err(dev
, "unable to map destination\n");
394 dma_unmap_sg(dev
, req
->src
, src_nents
,
396 qi_cache_free(edesc
);
397 return ERR_PTR(-ENOMEM
);
400 mapped_dst_nents
= 0;
403 src_nents
= sg_nents_for_len(req
->src
, req
->assoclen
+
405 (encrypt
? authsize
: 0));
406 if (unlikely(src_nents
< 0)) {
407 dev_err(dev
, "Insufficient bytes (%d) in src S/G\n",
408 req
->assoclen
+ req
->cryptlen
+
409 (encrypt
? authsize
: 0));
410 qi_cache_free(edesc
);
411 return ERR_PTR(src_nents
);
414 mapped_src_nents
= dma_map_sg(dev
, req
->src
, src_nents
,
416 if (unlikely(!mapped_src_nents
)) {
417 dev_err(dev
, "unable to map source\n");
418 qi_cache_free(edesc
);
419 return ERR_PTR(-ENOMEM
);
423 if ((alg
->caam
.rfc3686
&& encrypt
) || !alg
->caam
.geniv
)
424 ivsize
= crypto_aead_ivsize(aead
);
427 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
428 * Input is not contiguous.
430 qm_sg_nents
= 1 + !!ivsize
+ mapped_src_nents
+
431 (mapped_dst_nents
> 1 ? mapped_dst_nents
: 0);
432 sg_table
= &edesc
->sgt
[0];
433 qm_sg_bytes
= qm_sg_nents
* sizeof(*sg_table
);
434 if (unlikely(offsetof(struct aead_edesc
, sgt
) + qm_sg_bytes
+ ivsize
>
435 CAAM_QI_MEMCACHE_SIZE
)) {
436 dev_err(dev
, "No space for %d S/G entries and/or %dB IV\n",
437 qm_sg_nents
, ivsize
);
438 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
, 0,
440 qi_cache_free(edesc
);
441 return ERR_PTR(-ENOMEM
);
445 u8
*iv
= (u8
*)(sg_table
+ qm_sg_nents
);
447 /* Make sure IV is located in a DMAable area */
448 memcpy(iv
, req
->iv
, ivsize
);
450 iv_dma
= dma_map_single(dev
, iv
, ivsize
, DMA_TO_DEVICE
);
451 if (dma_mapping_error(dev
, iv_dma
)) {
452 dev_err(dev
, "unable to map IV\n");
453 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
,
454 dst_nents
, 0, 0, 0, 0);
455 qi_cache_free(edesc
);
456 return ERR_PTR(-ENOMEM
);
460 edesc
->src_nents
= src_nents
;
461 edesc
->dst_nents
= dst_nents
;
462 edesc
->iv_dma
= iv_dma
;
464 if ((alg
->caam
.class1_alg_type
& OP_ALG_ALGSEL_MASK
) ==
465 OP_ALG_ALGSEL_CHACHA20
&& ivsize
!= CHACHAPOLY_IV_SIZE
)
467 * The associated data comes already with the IV but we need
468 * to skip it when we authenticate or encrypt...
470 edesc
->assoclen
= cpu_to_caam32(req
->assoclen
- ivsize
);
472 edesc
->assoclen
= cpu_to_caam32(req
->assoclen
);
473 edesc
->assoclen_dma
= dma_map_single(dev
, &edesc
->assoclen
, 4,
475 if (dma_mapping_error(dev
, edesc
->assoclen_dma
)) {
476 dev_err(dev
, "unable to map assoclen\n");
477 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
,
478 iv_dma
, ivsize
, 0, 0);
479 qi_cache_free(edesc
);
480 return ERR_PTR(-ENOMEM
);
483 dma_to_qm_sg_one(sg_table
, edesc
->assoclen_dma
, 4, 0);
486 dma_to_qm_sg_one(sg_table
+ qm_sg_index
, iv_dma
, ivsize
, 0);
489 sg_to_qm_sg_last(req
->src
, mapped_src_nents
, sg_table
+ qm_sg_index
, 0);
490 qm_sg_index
+= mapped_src_nents
;
492 if (mapped_dst_nents
> 1)
493 sg_to_qm_sg_last(req
->dst
, mapped_dst_nents
, sg_table
+
496 qm_sg_dma
= dma_map_single(dev
, sg_table
, qm_sg_bytes
, DMA_TO_DEVICE
);
497 if (dma_mapping_error(dev
, qm_sg_dma
)) {
498 dev_err(dev
, "unable to map S/G table\n");
499 dma_unmap_single(dev
, edesc
->assoclen_dma
, 4, DMA_TO_DEVICE
);
500 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
,
501 iv_dma
, ivsize
, 0, 0);
502 qi_cache_free(edesc
);
503 return ERR_PTR(-ENOMEM
);
506 edesc
->qm_sg_dma
= qm_sg_dma
;
507 edesc
->qm_sg_bytes
= qm_sg_bytes
;
509 out_len
= req
->assoclen
+ req
->cryptlen
+
510 (encrypt
? ctx
->authsize
: (-ctx
->authsize
));
511 in_len
= 4 + ivsize
+ req
->assoclen
+ req
->cryptlen
;
513 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
514 dpaa2_fl_set_final(in_fle
, true);
515 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
516 dpaa2_fl_set_addr(in_fle
, qm_sg_dma
);
517 dpaa2_fl_set_len(in_fle
, in_len
);
519 if (req
->dst
== req
->src
) {
520 if (mapped_src_nents
== 1) {
521 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
522 dpaa2_fl_set_addr(out_fle
, sg_dma_address(req
->src
));
524 dpaa2_fl_set_format(out_fle
, dpaa2_fl_sg
);
525 dpaa2_fl_set_addr(out_fle
, qm_sg_dma
+
526 (1 + !!ivsize
) * sizeof(*sg_table
));
528 } else if (mapped_dst_nents
== 1) {
529 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
530 dpaa2_fl_set_addr(out_fle
, sg_dma_address(req
->dst
));
532 dpaa2_fl_set_format(out_fle
, dpaa2_fl_sg
);
533 dpaa2_fl_set_addr(out_fle
, qm_sg_dma
+ qm_sg_index
*
537 dpaa2_fl_set_len(out_fle
, out_len
);
542 static int chachapoly_set_sh_desc(struct crypto_aead
*aead
)
544 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
545 unsigned int ivsize
= crypto_aead_ivsize(aead
);
546 struct device
*dev
= ctx
->dev
;
547 struct caam_flc
*flc
;
550 if (!ctx
->cdata
.keylen
|| !ctx
->authsize
)
553 flc
= &ctx
->flc
[ENCRYPT
];
555 cnstr_shdsc_chachapoly(desc
, &ctx
->cdata
, &ctx
->adata
, ivsize
,
556 ctx
->authsize
, true, true);
557 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
558 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
559 sizeof(flc
->flc
) + desc_bytes(desc
),
562 flc
= &ctx
->flc
[DECRYPT
];
564 cnstr_shdsc_chachapoly(desc
, &ctx
->cdata
, &ctx
->adata
, ivsize
,
565 ctx
->authsize
, false, true);
566 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
567 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
568 sizeof(flc
->flc
) + desc_bytes(desc
),
574 static int chachapoly_setauthsize(struct crypto_aead
*aead
,
575 unsigned int authsize
)
577 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
579 if (authsize
!= POLY1305_DIGEST_SIZE
)
582 ctx
->authsize
= authsize
;
583 return chachapoly_set_sh_desc(aead
);
586 static int chachapoly_setkey(struct crypto_aead
*aead
, const u8
*key
,
589 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
590 unsigned int ivsize
= crypto_aead_ivsize(aead
);
591 unsigned int saltlen
= CHACHAPOLY_IV_SIZE
- ivsize
;
593 if (keylen
!= CHACHA_KEY_SIZE
+ saltlen
) {
594 crypto_aead_set_flags(aead
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
598 ctx
->cdata
.key_virt
= key
;
599 ctx
->cdata
.keylen
= keylen
- saltlen
;
601 return chachapoly_set_sh_desc(aead
);
604 static int gcm_set_sh_desc(struct crypto_aead
*aead
)
606 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
607 struct device
*dev
= ctx
->dev
;
608 unsigned int ivsize
= crypto_aead_ivsize(aead
);
609 struct caam_flc
*flc
;
611 int rem_bytes
= CAAM_DESC_BYTES_MAX
- DESC_JOB_IO_LEN
-
614 if (!ctx
->cdata
.keylen
|| !ctx
->authsize
)
618 * AES GCM encrypt shared descriptor
619 * Job Descriptor and Shared Descriptor
620 * must fit into the 64-word Descriptor h/w Buffer
622 if (rem_bytes
>= DESC_QI_GCM_ENC_LEN
) {
623 ctx
->cdata
.key_inline
= true;
624 ctx
->cdata
.key_virt
= ctx
->key
;
626 ctx
->cdata
.key_inline
= false;
627 ctx
->cdata
.key_dma
= ctx
->key_dma
;
630 flc
= &ctx
->flc
[ENCRYPT
];
632 cnstr_shdsc_gcm_encap(desc
, &ctx
->cdata
, ivsize
, ctx
->authsize
, true);
633 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
634 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
635 sizeof(flc
->flc
) + desc_bytes(desc
),
639 * Job Descriptor and Shared Descriptors
640 * must all fit into the 64-word Descriptor h/w Buffer
642 if (rem_bytes
>= DESC_QI_GCM_DEC_LEN
) {
643 ctx
->cdata
.key_inline
= true;
644 ctx
->cdata
.key_virt
= ctx
->key
;
646 ctx
->cdata
.key_inline
= false;
647 ctx
->cdata
.key_dma
= ctx
->key_dma
;
650 flc
= &ctx
->flc
[DECRYPT
];
652 cnstr_shdsc_gcm_decap(desc
, &ctx
->cdata
, ivsize
, ctx
->authsize
, true);
653 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
654 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
655 sizeof(flc
->flc
) + desc_bytes(desc
),
661 static int gcm_setauthsize(struct crypto_aead
*authenc
, unsigned int authsize
)
663 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
665 ctx
->authsize
= authsize
;
666 gcm_set_sh_desc(authenc
);
671 static int gcm_setkey(struct crypto_aead
*aead
,
672 const u8
*key
, unsigned int keylen
)
674 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
675 struct device
*dev
= ctx
->dev
;
677 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
678 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
680 memcpy(ctx
->key
, key
, keylen
);
681 dma_sync_single_for_device(dev
, ctx
->key_dma
, keylen
, ctx
->dir
);
682 ctx
->cdata
.keylen
= keylen
;
684 return gcm_set_sh_desc(aead
);
687 static int rfc4106_set_sh_desc(struct crypto_aead
*aead
)
689 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
690 struct device
*dev
= ctx
->dev
;
691 unsigned int ivsize
= crypto_aead_ivsize(aead
);
692 struct caam_flc
*flc
;
694 int rem_bytes
= CAAM_DESC_BYTES_MAX
- DESC_JOB_IO_LEN
-
697 if (!ctx
->cdata
.keylen
|| !ctx
->authsize
)
700 ctx
->cdata
.key_virt
= ctx
->key
;
703 * RFC4106 encrypt shared descriptor
704 * Job Descriptor and Shared Descriptor
705 * must fit into the 64-word Descriptor h/w Buffer
707 if (rem_bytes
>= DESC_QI_RFC4106_ENC_LEN
) {
708 ctx
->cdata
.key_inline
= true;
710 ctx
->cdata
.key_inline
= false;
711 ctx
->cdata
.key_dma
= ctx
->key_dma
;
714 flc
= &ctx
->flc
[ENCRYPT
];
716 cnstr_shdsc_rfc4106_encap(desc
, &ctx
->cdata
, ivsize
, ctx
->authsize
,
718 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
719 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
720 sizeof(flc
->flc
) + desc_bytes(desc
),
724 * Job Descriptor and Shared Descriptors
725 * must all fit into the 64-word Descriptor h/w Buffer
727 if (rem_bytes
>= DESC_QI_RFC4106_DEC_LEN
) {
728 ctx
->cdata
.key_inline
= true;
730 ctx
->cdata
.key_inline
= false;
731 ctx
->cdata
.key_dma
= ctx
->key_dma
;
734 flc
= &ctx
->flc
[DECRYPT
];
736 cnstr_shdsc_rfc4106_decap(desc
, &ctx
->cdata
, ivsize
, ctx
->authsize
,
738 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
739 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
740 sizeof(flc
->flc
) + desc_bytes(desc
),
746 static int rfc4106_setauthsize(struct crypto_aead
*authenc
,
747 unsigned int authsize
)
749 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
751 ctx
->authsize
= authsize
;
752 rfc4106_set_sh_desc(authenc
);
757 static int rfc4106_setkey(struct crypto_aead
*aead
,
758 const u8
*key
, unsigned int keylen
)
760 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
761 struct device
*dev
= ctx
->dev
;
766 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
767 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
769 memcpy(ctx
->key
, key
, keylen
);
771 * The last four bytes of the key material are used as the salt value
772 * in the nonce. Update the AES key length.
774 ctx
->cdata
.keylen
= keylen
- 4;
775 dma_sync_single_for_device(dev
, ctx
->key_dma
, ctx
->cdata
.keylen
,
778 return rfc4106_set_sh_desc(aead
);
781 static int rfc4543_set_sh_desc(struct crypto_aead
*aead
)
783 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
784 struct device
*dev
= ctx
->dev
;
785 unsigned int ivsize
= crypto_aead_ivsize(aead
);
786 struct caam_flc
*flc
;
788 int rem_bytes
= CAAM_DESC_BYTES_MAX
- DESC_JOB_IO_LEN
-
791 if (!ctx
->cdata
.keylen
|| !ctx
->authsize
)
794 ctx
->cdata
.key_virt
= ctx
->key
;
797 * RFC4543 encrypt shared descriptor
798 * Job Descriptor and Shared Descriptor
799 * must fit into the 64-word Descriptor h/w Buffer
801 if (rem_bytes
>= DESC_QI_RFC4543_ENC_LEN
) {
802 ctx
->cdata
.key_inline
= true;
804 ctx
->cdata
.key_inline
= false;
805 ctx
->cdata
.key_dma
= ctx
->key_dma
;
808 flc
= &ctx
->flc
[ENCRYPT
];
810 cnstr_shdsc_rfc4543_encap(desc
, &ctx
->cdata
, ivsize
, ctx
->authsize
,
812 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
813 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
814 sizeof(flc
->flc
) + desc_bytes(desc
),
818 * Job Descriptor and Shared Descriptors
819 * must all fit into the 64-word Descriptor h/w Buffer
821 if (rem_bytes
>= DESC_QI_RFC4543_DEC_LEN
) {
822 ctx
->cdata
.key_inline
= true;
824 ctx
->cdata
.key_inline
= false;
825 ctx
->cdata
.key_dma
= ctx
->key_dma
;
828 flc
= &ctx
->flc
[DECRYPT
];
830 cnstr_shdsc_rfc4543_decap(desc
, &ctx
->cdata
, ivsize
, ctx
->authsize
,
832 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
833 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
834 sizeof(flc
->flc
) + desc_bytes(desc
),
840 static int rfc4543_setauthsize(struct crypto_aead
*authenc
,
841 unsigned int authsize
)
843 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
845 ctx
->authsize
= authsize
;
846 rfc4543_set_sh_desc(authenc
);
851 static int rfc4543_setkey(struct crypto_aead
*aead
,
852 const u8
*key
, unsigned int keylen
)
854 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
855 struct device
*dev
= ctx
->dev
;
860 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
861 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
863 memcpy(ctx
->key
, key
, keylen
);
865 * The last four bytes of the key material are used as the salt value
866 * in the nonce. Update the AES key length.
868 ctx
->cdata
.keylen
= keylen
- 4;
869 dma_sync_single_for_device(dev
, ctx
->key_dma
, ctx
->cdata
.keylen
,
872 return rfc4543_set_sh_desc(aead
);
875 static int skcipher_setkey(struct crypto_skcipher
*skcipher
, const u8
*key
,
878 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
879 struct caam_skcipher_alg
*alg
=
880 container_of(crypto_skcipher_alg(skcipher
),
881 struct caam_skcipher_alg
, skcipher
);
882 struct device
*dev
= ctx
->dev
;
883 struct caam_flc
*flc
;
884 unsigned int ivsize
= crypto_skcipher_ivsize(skcipher
);
887 const bool ctr_mode
= ((ctx
->cdata
.algtype
& OP_ALG_AAI_MASK
) ==
888 OP_ALG_AAI_CTR_MOD128
) &&
889 ((ctx
->cdata
.algtype
& OP_ALG_ALGSEL_MASK
) !=
890 OP_ALG_ALGSEL_CHACHA20
);
891 const bool is_rfc3686
= alg
->caam
.rfc3686
;
893 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
894 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
897 * AES-CTR needs to load IV in CONTEXT1 reg
898 * at an offset of 128bits (16bytes)
899 * CONTEXT1[255:128] = IV
906 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
907 * | *key = {KEY, NONCE}
910 ctx1_iv_off
= 16 + CTR_RFC3686_NONCE_SIZE
;
911 keylen
-= CTR_RFC3686_NONCE_SIZE
;
914 ctx
->cdata
.keylen
= keylen
;
915 ctx
->cdata
.key_virt
= key
;
916 ctx
->cdata
.key_inline
= true;
918 /* skcipher_encrypt shared descriptor */
919 flc
= &ctx
->flc
[ENCRYPT
];
921 cnstr_shdsc_skcipher_encap(desc
, &ctx
->cdata
, ivsize
, is_rfc3686
,
923 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
924 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
925 sizeof(flc
->flc
) + desc_bytes(desc
),
928 /* skcipher_decrypt shared descriptor */
929 flc
= &ctx
->flc
[DECRYPT
];
931 cnstr_shdsc_skcipher_decap(desc
, &ctx
->cdata
, ivsize
, is_rfc3686
,
933 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
934 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
935 sizeof(flc
->flc
) + desc_bytes(desc
),
941 static int xts_skcipher_setkey(struct crypto_skcipher
*skcipher
, const u8
*key
,
944 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
945 struct device
*dev
= ctx
->dev
;
946 struct caam_flc
*flc
;
949 if (keylen
!= 2 * AES_MIN_KEY_SIZE
&& keylen
!= 2 * AES_MAX_KEY_SIZE
) {
950 dev_err(dev
, "key size mismatch\n");
951 crypto_skcipher_set_flags(skcipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
955 ctx
->cdata
.keylen
= keylen
;
956 ctx
->cdata
.key_virt
= key
;
957 ctx
->cdata
.key_inline
= true;
959 /* xts_skcipher_encrypt shared descriptor */
960 flc
= &ctx
->flc
[ENCRYPT
];
962 cnstr_shdsc_xts_skcipher_encap(desc
, &ctx
->cdata
);
963 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
964 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
965 sizeof(flc
->flc
) + desc_bytes(desc
),
968 /* xts_skcipher_decrypt shared descriptor */
969 flc
= &ctx
->flc
[DECRYPT
];
971 cnstr_shdsc_xts_skcipher_decap(desc
, &ctx
->cdata
);
972 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
973 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
974 sizeof(flc
->flc
) + desc_bytes(desc
),
980 static struct skcipher_edesc
*skcipher_edesc_alloc(struct skcipher_request
*req
)
982 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
983 struct caam_request
*req_ctx
= skcipher_request_ctx(req
);
984 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
985 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
986 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
987 struct device
*dev
= ctx
->dev
;
988 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
989 GFP_KERNEL
: GFP_ATOMIC
;
990 int src_nents
, mapped_src_nents
, dst_nents
= 0, mapped_dst_nents
= 0;
991 struct skcipher_edesc
*edesc
;
994 int ivsize
= crypto_skcipher_ivsize(skcipher
);
995 int dst_sg_idx
, qm_sg_ents
, qm_sg_bytes
;
996 struct dpaa2_sg_entry
*sg_table
;
998 src_nents
= sg_nents_for_len(req
->src
, req
->cryptlen
);
999 if (unlikely(src_nents
< 0)) {
1000 dev_err(dev
, "Insufficient bytes (%d) in src S/G\n",
1002 return ERR_PTR(src_nents
);
1005 if (unlikely(req
->dst
!= req
->src
)) {
1006 dst_nents
= sg_nents_for_len(req
->dst
, req
->cryptlen
);
1007 if (unlikely(dst_nents
< 0)) {
1008 dev_err(dev
, "Insufficient bytes (%d) in dst S/G\n",
1010 return ERR_PTR(dst_nents
);
1013 mapped_src_nents
= dma_map_sg(dev
, req
->src
, src_nents
,
1015 if (unlikely(!mapped_src_nents
)) {
1016 dev_err(dev
, "unable to map source\n");
1017 return ERR_PTR(-ENOMEM
);
1020 mapped_dst_nents
= dma_map_sg(dev
, req
->dst
, dst_nents
,
1022 if (unlikely(!mapped_dst_nents
)) {
1023 dev_err(dev
, "unable to map destination\n");
1024 dma_unmap_sg(dev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1025 return ERR_PTR(-ENOMEM
);
1028 mapped_src_nents
= dma_map_sg(dev
, req
->src
, src_nents
,
1030 if (unlikely(!mapped_src_nents
)) {
1031 dev_err(dev
, "unable to map source\n");
1032 return ERR_PTR(-ENOMEM
);
1036 qm_sg_ents
= 1 + mapped_src_nents
;
1037 dst_sg_idx
= qm_sg_ents
;
1039 qm_sg_ents
+= mapped_dst_nents
> 1 ? mapped_dst_nents
: 0;
1040 qm_sg_bytes
= qm_sg_ents
* sizeof(struct dpaa2_sg_entry
);
1041 if (unlikely(offsetof(struct skcipher_edesc
, sgt
) + qm_sg_bytes
+
1042 ivsize
> CAAM_QI_MEMCACHE_SIZE
)) {
1043 dev_err(dev
, "No space for %d S/G entries and/or %dB IV\n",
1044 qm_sg_ents
, ivsize
);
1045 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
, 0,
1047 return ERR_PTR(-ENOMEM
);
1050 /* allocate space for base edesc, link tables and IV */
1051 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
1052 if (unlikely(!edesc
)) {
1053 dev_err(dev
, "could not allocate extended descriptor\n");
1054 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
, 0,
1056 return ERR_PTR(-ENOMEM
);
1059 /* Make sure IV is located in a DMAable area */
1060 sg_table
= &edesc
->sgt
[0];
1061 iv
= (u8
*)(sg_table
+ qm_sg_ents
);
1062 memcpy(iv
, req
->iv
, ivsize
);
1064 iv_dma
= dma_map_single(dev
, iv
, ivsize
, DMA_TO_DEVICE
);
1065 if (dma_mapping_error(dev
, iv_dma
)) {
1066 dev_err(dev
, "unable to map IV\n");
1067 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
, 0,
1069 qi_cache_free(edesc
);
1070 return ERR_PTR(-ENOMEM
);
1073 edesc
->src_nents
= src_nents
;
1074 edesc
->dst_nents
= dst_nents
;
1075 edesc
->iv_dma
= iv_dma
;
1076 edesc
->qm_sg_bytes
= qm_sg_bytes
;
1078 dma_to_qm_sg_one(sg_table
, iv_dma
, ivsize
, 0);
1079 sg_to_qm_sg_last(req
->src
, mapped_src_nents
, sg_table
+ 1, 0);
1081 if (mapped_dst_nents
> 1)
1082 sg_to_qm_sg_last(req
->dst
, mapped_dst_nents
, sg_table
+
1085 edesc
->qm_sg_dma
= dma_map_single(dev
, sg_table
, edesc
->qm_sg_bytes
,
1087 if (dma_mapping_error(dev
, edesc
->qm_sg_dma
)) {
1088 dev_err(dev
, "unable to map S/G table\n");
1089 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
,
1090 iv_dma
, ivsize
, 0, 0);
1091 qi_cache_free(edesc
);
1092 return ERR_PTR(-ENOMEM
);
1095 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
1096 dpaa2_fl_set_final(in_fle
, true);
1097 dpaa2_fl_set_len(in_fle
, req
->cryptlen
+ ivsize
);
1098 dpaa2_fl_set_len(out_fle
, req
->cryptlen
);
1100 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
1101 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
1103 if (req
->src
== req
->dst
) {
1104 dpaa2_fl_set_format(out_fle
, dpaa2_fl_sg
);
1105 dpaa2_fl_set_addr(out_fle
, edesc
->qm_sg_dma
+
1107 } else if (mapped_dst_nents
> 1) {
1108 dpaa2_fl_set_format(out_fle
, dpaa2_fl_sg
);
1109 dpaa2_fl_set_addr(out_fle
, edesc
->qm_sg_dma
+ dst_sg_idx
*
1112 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
1113 dpaa2_fl_set_addr(out_fle
, sg_dma_address(req
->dst
));
1119 static void aead_unmap(struct device
*dev
, struct aead_edesc
*edesc
,
1120 struct aead_request
*req
)
1122 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1123 int ivsize
= crypto_aead_ivsize(aead
);
1125 caam_unmap(dev
, req
->src
, req
->dst
, edesc
->src_nents
, edesc
->dst_nents
,
1126 edesc
->iv_dma
, ivsize
, edesc
->qm_sg_dma
, edesc
->qm_sg_bytes
);
1127 dma_unmap_single(dev
, edesc
->assoclen_dma
, 4, DMA_TO_DEVICE
);
1130 static void skcipher_unmap(struct device
*dev
, struct skcipher_edesc
*edesc
,
1131 struct skcipher_request
*req
)
1133 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1134 int ivsize
= crypto_skcipher_ivsize(skcipher
);
1136 caam_unmap(dev
, req
->src
, req
->dst
, edesc
->src_nents
, edesc
->dst_nents
,
1137 edesc
->iv_dma
, ivsize
, edesc
->qm_sg_dma
, edesc
->qm_sg_bytes
);
1140 static void aead_encrypt_done(void *cbk_ctx
, u32 status
)
1142 struct crypto_async_request
*areq
= cbk_ctx
;
1143 struct aead_request
*req
= container_of(areq
, struct aead_request
,
1145 struct caam_request
*req_ctx
= to_caam_req(areq
);
1146 struct aead_edesc
*edesc
= req_ctx
->edesc
;
1147 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1148 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1151 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
1153 if (unlikely(status
)) {
1154 caam_qi2_strstatus(ctx
->dev
, status
);
1158 aead_unmap(ctx
->dev
, edesc
, req
);
1159 qi_cache_free(edesc
);
1160 aead_request_complete(req
, ecode
);
1163 static void aead_decrypt_done(void *cbk_ctx
, u32 status
)
1165 struct crypto_async_request
*areq
= cbk_ctx
;
1166 struct aead_request
*req
= container_of(areq
, struct aead_request
,
1168 struct caam_request
*req_ctx
= to_caam_req(areq
);
1169 struct aead_edesc
*edesc
= req_ctx
->edesc
;
1170 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1171 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1174 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
1176 if (unlikely(status
)) {
1177 caam_qi2_strstatus(ctx
->dev
, status
);
1179 * verify hw auth check passed else return -EBADMSG
1181 if ((status
& JRSTA_CCBERR_ERRID_MASK
) ==
1182 JRSTA_CCBERR_ERRID_ICVCHK
)
1188 aead_unmap(ctx
->dev
, edesc
, req
);
1189 qi_cache_free(edesc
);
1190 aead_request_complete(req
, ecode
);
1193 static int aead_encrypt(struct aead_request
*req
)
1195 struct aead_edesc
*edesc
;
1196 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1197 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1198 struct caam_request
*caam_req
= aead_request_ctx(req
);
1201 /* allocate extended descriptor */
1202 edesc
= aead_edesc_alloc(req
, true);
1204 return PTR_ERR(edesc
);
1206 caam_req
->flc
= &ctx
->flc
[ENCRYPT
];
1207 caam_req
->flc_dma
= ctx
->flc_dma
[ENCRYPT
];
1208 caam_req
->cbk
= aead_encrypt_done
;
1209 caam_req
->ctx
= &req
->base
;
1210 caam_req
->edesc
= edesc
;
1211 ret
= dpaa2_caam_enqueue(ctx
->dev
, caam_req
);
1212 if (ret
!= -EINPROGRESS
&&
1213 !(ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1214 aead_unmap(ctx
->dev
, edesc
, req
);
1215 qi_cache_free(edesc
);
1221 static int aead_decrypt(struct aead_request
*req
)
1223 struct aead_edesc
*edesc
;
1224 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1225 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1226 struct caam_request
*caam_req
= aead_request_ctx(req
);
1229 /* allocate extended descriptor */
1230 edesc
= aead_edesc_alloc(req
, false);
1232 return PTR_ERR(edesc
);
1234 caam_req
->flc
= &ctx
->flc
[DECRYPT
];
1235 caam_req
->flc_dma
= ctx
->flc_dma
[DECRYPT
];
1236 caam_req
->cbk
= aead_decrypt_done
;
1237 caam_req
->ctx
= &req
->base
;
1238 caam_req
->edesc
= edesc
;
1239 ret
= dpaa2_caam_enqueue(ctx
->dev
, caam_req
);
1240 if (ret
!= -EINPROGRESS
&&
1241 !(ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1242 aead_unmap(ctx
->dev
, edesc
, req
);
1243 qi_cache_free(edesc
);
1249 static int ipsec_gcm_encrypt(struct aead_request
*req
)
1251 if (req
->assoclen
< 8)
1254 return aead_encrypt(req
);
1257 static int ipsec_gcm_decrypt(struct aead_request
*req
)
1259 if (req
->assoclen
< 8)
1262 return aead_decrypt(req
);
1265 static void skcipher_encrypt_done(void *cbk_ctx
, u32 status
)
1267 struct crypto_async_request
*areq
= cbk_ctx
;
1268 struct skcipher_request
*req
= skcipher_request_cast(areq
);
1269 struct caam_request
*req_ctx
= to_caam_req(areq
);
1270 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1271 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
1272 struct skcipher_edesc
*edesc
= req_ctx
->edesc
;
1274 int ivsize
= crypto_skcipher_ivsize(skcipher
);
1276 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
1278 if (unlikely(status
)) {
1279 caam_qi2_strstatus(ctx
->dev
, status
);
1283 print_hex_dump_debug("dstiv @" __stringify(__LINE__
)": ",
1284 DUMP_PREFIX_ADDRESS
, 16, 4, req
->iv
,
1285 edesc
->src_nents
> 1 ? 100 : ivsize
, 1);
1286 caam_dump_sg(KERN_DEBUG
, "dst @" __stringify(__LINE__
)": ",
1287 DUMP_PREFIX_ADDRESS
, 16, 4, req
->dst
,
1288 edesc
->dst_nents
> 1 ? 100 : req
->cryptlen
, 1);
1290 skcipher_unmap(ctx
->dev
, edesc
, req
);
1293 * The crypto API expects us to set the IV (req->iv) to the last
1294 * ciphertext block. This is used e.g. by the CTS mode.
1296 scatterwalk_map_and_copy(req
->iv
, req
->dst
, req
->cryptlen
- ivsize
,
1299 qi_cache_free(edesc
);
1300 skcipher_request_complete(req
, ecode
);
1303 static void skcipher_decrypt_done(void *cbk_ctx
, u32 status
)
1305 struct crypto_async_request
*areq
= cbk_ctx
;
1306 struct skcipher_request
*req
= skcipher_request_cast(areq
);
1307 struct caam_request
*req_ctx
= to_caam_req(areq
);
1308 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1309 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
1310 struct skcipher_edesc
*edesc
= req_ctx
->edesc
;
1312 int ivsize
= crypto_skcipher_ivsize(skcipher
);
1314 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
1316 if (unlikely(status
)) {
1317 caam_qi2_strstatus(ctx
->dev
, status
);
1321 print_hex_dump_debug("dstiv @" __stringify(__LINE__
)": ",
1322 DUMP_PREFIX_ADDRESS
, 16, 4, req
->iv
,
1323 edesc
->src_nents
> 1 ? 100 : ivsize
, 1);
1324 caam_dump_sg(KERN_DEBUG
, "dst @" __stringify(__LINE__
)": ",
1325 DUMP_PREFIX_ADDRESS
, 16, 4, req
->dst
,
1326 edesc
->dst_nents
> 1 ? 100 : req
->cryptlen
, 1);
1328 skcipher_unmap(ctx
->dev
, edesc
, req
);
1329 qi_cache_free(edesc
);
1330 skcipher_request_complete(req
, ecode
);
1333 static int skcipher_encrypt(struct skcipher_request
*req
)
1335 struct skcipher_edesc
*edesc
;
1336 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1337 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
1338 struct caam_request
*caam_req
= skcipher_request_ctx(req
);
1341 /* allocate extended descriptor */
1342 edesc
= skcipher_edesc_alloc(req
);
1344 return PTR_ERR(edesc
);
1346 caam_req
->flc
= &ctx
->flc
[ENCRYPT
];
1347 caam_req
->flc_dma
= ctx
->flc_dma
[ENCRYPT
];
1348 caam_req
->cbk
= skcipher_encrypt_done
;
1349 caam_req
->ctx
= &req
->base
;
1350 caam_req
->edesc
= edesc
;
1351 ret
= dpaa2_caam_enqueue(ctx
->dev
, caam_req
);
1352 if (ret
!= -EINPROGRESS
&&
1353 !(ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1354 skcipher_unmap(ctx
->dev
, edesc
, req
);
1355 qi_cache_free(edesc
);
1361 static int skcipher_decrypt(struct skcipher_request
*req
)
1363 struct skcipher_edesc
*edesc
;
1364 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1365 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
1366 struct caam_request
*caam_req
= skcipher_request_ctx(req
);
1367 int ivsize
= crypto_skcipher_ivsize(skcipher
);
1370 /* allocate extended descriptor */
1371 edesc
= skcipher_edesc_alloc(req
);
1373 return PTR_ERR(edesc
);
1376 * The crypto API expects us to set the IV (req->iv) to the last
1379 scatterwalk_map_and_copy(req
->iv
, req
->src
, req
->cryptlen
- ivsize
,
1382 caam_req
->flc
= &ctx
->flc
[DECRYPT
];
1383 caam_req
->flc_dma
= ctx
->flc_dma
[DECRYPT
];
1384 caam_req
->cbk
= skcipher_decrypt_done
;
1385 caam_req
->ctx
= &req
->base
;
1386 caam_req
->edesc
= edesc
;
1387 ret
= dpaa2_caam_enqueue(ctx
->dev
, caam_req
);
1388 if (ret
!= -EINPROGRESS
&&
1389 !(ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1390 skcipher_unmap(ctx
->dev
, edesc
, req
);
1391 qi_cache_free(edesc
);
1397 static int caam_cra_init(struct caam_ctx
*ctx
, struct caam_alg_entry
*caam
,
1400 dma_addr_t dma_addr
;
1403 /* copy descriptor header template value */
1404 ctx
->cdata
.algtype
= OP_TYPE_CLASS1_ALG
| caam
->class1_alg_type
;
1405 ctx
->adata
.algtype
= OP_TYPE_CLASS2_ALG
| caam
->class2_alg_type
;
1407 ctx
->dev
= caam
->dev
;
1408 ctx
->dir
= uses_dkp
? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
;
1410 dma_addr
= dma_map_single_attrs(ctx
->dev
, ctx
->flc
,
1411 offsetof(struct caam_ctx
, flc_dma
),
1412 ctx
->dir
, DMA_ATTR_SKIP_CPU_SYNC
);
1413 if (dma_mapping_error(ctx
->dev
, dma_addr
)) {
1414 dev_err(ctx
->dev
, "unable to map key, shared descriptors\n");
1418 for (i
= 0; i
< NUM_OP
; i
++)
1419 ctx
->flc_dma
[i
] = dma_addr
+ i
* sizeof(ctx
->flc
[i
]);
1420 ctx
->key_dma
= dma_addr
+ NUM_OP
* sizeof(ctx
->flc
[0]);
1425 static int caam_cra_init_skcipher(struct crypto_skcipher
*tfm
)
1427 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
1428 struct caam_skcipher_alg
*caam_alg
=
1429 container_of(alg
, typeof(*caam_alg
), skcipher
);
1431 crypto_skcipher_set_reqsize(tfm
, sizeof(struct caam_request
));
1432 return caam_cra_init(crypto_skcipher_ctx(tfm
), &caam_alg
->caam
, false);
1435 static int caam_cra_init_aead(struct crypto_aead
*tfm
)
1437 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
1438 struct caam_aead_alg
*caam_alg
= container_of(alg
, typeof(*caam_alg
),
1441 crypto_aead_set_reqsize(tfm
, sizeof(struct caam_request
));
1442 return caam_cra_init(crypto_aead_ctx(tfm
), &caam_alg
->caam
,
1443 alg
->setkey
== aead_setkey
);
1446 static void caam_exit_common(struct caam_ctx
*ctx
)
1448 dma_unmap_single_attrs(ctx
->dev
, ctx
->flc_dma
[0],
1449 offsetof(struct caam_ctx
, flc_dma
), ctx
->dir
,
1450 DMA_ATTR_SKIP_CPU_SYNC
);
1453 static void caam_cra_exit(struct crypto_skcipher
*tfm
)
1455 caam_exit_common(crypto_skcipher_ctx(tfm
));
1458 static void caam_cra_exit_aead(struct crypto_aead
*tfm
)
1460 caam_exit_common(crypto_aead_ctx(tfm
));
1463 static struct caam_skcipher_alg driver_algs
[] = {
1467 .cra_name
= "cbc(aes)",
1468 .cra_driver_name
= "cbc-aes-caam-qi2",
1469 .cra_blocksize
= AES_BLOCK_SIZE
,
1471 .setkey
= skcipher_setkey
,
1472 .encrypt
= skcipher_encrypt
,
1473 .decrypt
= skcipher_decrypt
,
1474 .min_keysize
= AES_MIN_KEY_SIZE
,
1475 .max_keysize
= AES_MAX_KEY_SIZE
,
1476 .ivsize
= AES_BLOCK_SIZE
,
1478 .caam
.class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1483 .cra_name
= "cbc(des3_ede)",
1484 .cra_driver_name
= "cbc-3des-caam-qi2",
1485 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1487 .setkey
= skcipher_setkey
,
1488 .encrypt
= skcipher_encrypt
,
1489 .decrypt
= skcipher_decrypt
,
1490 .min_keysize
= DES3_EDE_KEY_SIZE
,
1491 .max_keysize
= DES3_EDE_KEY_SIZE
,
1492 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1494 .caam
.class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1499 .cra_name
= "cbc(des)",
1500 .cra_driver_name
= "cbc-des-caam-qi2",
1501 .cra_blocksize
= DES_BLOCK_SIZE
,
1503 .setkey
= skcipher_setkey
,
1504 .encrypt
= skcipher_encrypt
,
1505 .decrypt
= skcipher_decrypt
,
1506 .min_keysize
= DES_KEY_SIZE
,
1507 .max_keysize
= DES_KEY_SIZE
,
1508 .ivsize
= DES_BLOCK_SIZE
,
1510 .caam
.class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
1515 .cra_name
= "ctr(aes)",
1516 .cra_driver_name
= "ctr-aes-caam-qi2",
1519 .setkey
= skcipher_setkey
,
1520 .encrypt
= skcipher_encrypt
,
1521 .decrypt
= skcipher_decrypt
,
1522 .min_keysize
= AES_MIN_KEY_SIZE
,
1523 .max_keysize
= AES_MAX_KEY_SIZE
,
1524 .ivsize
= AES_BLOCK_SIZE
,
1525 .chunksize
= AES_BLOCK_SIZE
,
1527 .caam
.class1_alg_type
= OP_ALG_ALGSEL_AES
|
1528 OP_ALG_AAI_CTR_MOD128
,
1533 .cra_name
= "rfc3686(ctr(aes))",
1534 .cra_driver_name
= "rfc3686-ctr-aes-caam-qi2",
1537 .setkey
= skcipher_setkey
,
1538 .encrypt
= skcipher_encrypt
,
1539 .decrypt
= skcipher_decrypt
,
1540 .min_keysize
= AES_MIN_KEY_SIZE
+
1541 CTR_RFC3686_NONCE_SIZE
,
1542 .max_keysize
= AES_MAX_KEY_SIZE
+
1543 CTR_RFC3686_NONCE_SIZE
,
1544 .ivsize
= CTR_RFC3686_IV_SIZE
,
1545 .chunksize
= AES_BLOCK_SIZE
,
1548 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
1549 OP_ALG_AAI_CTR_MOD128
,
1556 .cra_name
= "xts(aes)",
1557 .cra_driver_name
= "xts-aes-caam-qi2",
1558 .cra_blocksize
= AES_BLOCK_SIZE
,
1560 .setkey
= xts_skcipher_setkey
,
1561 .encrypt
= skcipher_encrypt
,
1562 .decrypt
= skcipher_decrypt
,
1563 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1564 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1565 .ivsize
= AES_BLOCK_SIZE
,
1567 .caam
.class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_XTS
,
1572 .cra_name
= "chacha20",
1573 .cra_driver_name
= "chacha20-caam-qi2",
1576 .setkey
= skcipher_setkey
,
1577 .encrypt
= skcipher_encrypt
,
1578 .decrypt
= skcipher_decrypt
,
1579 .min_keysize
= CHACHA_KEY_SIZE
,
1580 .max_keysize
= CHACHA_KEY_SIZE
,
1581 .ivsize
= CHACHA_IV_SIZE
,
1583 .caam
.class1_alg_type
= OP_ALG_ALGSEL_CHACHA20
,
1587 static struct caam_aead_alg driver_aeads
[] = {
1591 .cra_name
= "rfc4106(gcm(aes))",
1592 .cra_driver_name
= "rfc4106-gcm-aes-caam-qi2",
1595 .setkey
= rfc4106_setkey
,
1596 .setauthsize
= rfc4106_setauthsize
,
1597 .encrypt
= ipsec_gcm_encrypt
,
1598 .decrypt
= ipsec_gcm_decrypt
,
1600 .maxauthsize
= AES_BLOCK_SIZE
,
1603 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_GCM
,
1609 .cra_name
= "rfc4543(gcm(aes))",
1610 .cra_driver_name
= "rfc4543-gcm-aes-caam-qi2",
1613 .setkey
= rfc4543_setkey
,
1614 .setauthsize
= rfc4543_setauthsize
,
1615 .encrypt
= ipsec_gcm_encrypt
,
1616 .decrypt
= ipsec_gcm_decrypt
,
1618 .maxauthsize
= AES_BLOCK_SIZE
,
1621 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_GCM
,
1624 /* Galois Counter Mode */
1628 .cra_name
= "gcm(aes)",
1629 .cra_driver_name
= "gcm-aes-caam-qi2",
1632 .setkey
= gcm_setkey
,
1633 .setauthsize
= gcm_setauthsize
,
1634 .encrypt
= aead_encrypt
,
1635 .decrypt
= aead_decrypt
,
1637 .maxauthsize
= AES_BLOCK_SIZE
,
1640 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_GCM
,
1643 /* single-pass ipsec_esp descriptor */
1647 .cra_name
= "authenc(hmac(md5),cbc(aes))",
1648 .cra_driver_name
= "authenc-hmac-md5-"
1650 .cra_blocksize
= AES_BLOCK_SIZE
,
1652 .setkey
= aead_setkey
,
1653 .setauthsize
= aead_setauthsize
,
1654 .encrypt
= aead_encrypt
,
1655 .decrypt
= aead_decrypt
,
1656 .ivsize
= AES_BLOCK_SIZE
,
1657 .maxauthsize
= MD5_DIGEST_SIZE
,
1660 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1661 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
1662 OP_ALG_AAI_HMAC_PRECOMP
,
1668 .cra_name
= "echainiv(authenc(hmac(md5),"
1670 .cra_driver_name
= "echainiv-authenc-hmac-md5-"
1672 .cra_blocksize
= AES_BLOCK_SIZE
,
1674 .setkey
= aead_setkey
,
1675 .setauthsize
= aead_setauthsize
,
1676 .encrypt
= aead_encrypt
,
1677 .decrypt
= aead_decrypt
,
1678 .ivsize
= AES_BLOCK_SIZE
,
1679 .maxauthsize
= MD5_DIGEST_SIZE
,
1682 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1683 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
1684 OP_ALG_AAI_HMAC_PRECOMP
,
1691 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
1692 .cra_driver_name
= "authenc-hmac-sha1-"
1694 .cra_blocksize
= AES_BLOCK_SIZE
,
1696 .setkey
= aead_setkey
,
1697 .setauthsize
= aead_setauthsize
,
1698 .encrypt
= aead_encrypt
,
1699 .decrypt
= aead_decrypt
,
1700 .ivsize
= AES_BLOCK_SIZE
,
1701 .maxauthsize
= SHA1_DIGEST_SIZE
,
1704 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1705 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
1706 OP_ALG_AAI_HMAC_PRECOMP
,
1712 .cra_name
= "echainiv(authenc(hmac(sha1),"
1714 .cra_driver_name
= "echainiv-authenc-"
1715 "hmac-sha1-cbc-aes-caam-qi2",
1716 .cra_blocksize
= AES_BLOCK_SIZE
,
1718 .setkey
= aead_setkey
,
1719 .setauthsize
= aead_setauthsize
,
1720 .encrypt
= aead_encrypt
,
1721 .decrypt
= aead_decrypt
,
1722 .ivsize
= AES_BLOCK_SIZE
,
1723 .maxauthsize
= SHA1_DIGEST_SIZE
,
1726 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1727 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
1728 OP_ALG_AAI_HMAC_PRECOMP
,
1735 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
1736 .cra_driver_name
= "authenc-hmac-sha224-"
1738 .cra_blocksize
= AES_BLOCK_SIZE
,
1740 .setkey
= aead_setkey
,
1741 .setauthsize
= aead_setauthsize
,
1742 .encrypt
= aead_encrypt
,
1743 .decrypt
= aead_decrypt
,
1744 .ivsize
= AES_BLOCK_SIZE
,
1745 .maxauthsize
= SHA224_DIGEST_SIZE
,
1748 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1749 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
1750 OP_ALG_AAI_HMAC_PRECOMP
,
1756 .cra_name
= "echainiv(authenc(hmac(sha224),"
1758 .cra_driver_name
= "echainiv-authenc-"
1759 "hmac-sha224-cbc-aes-caam-qi2",
1760 .cra_blocksize
= AES_BLOCK_SIZE
,
1762 .setkey
= aead_setkey
,
1763 .setauthsize
= aead_setauthsize
,
1764 .encrypt
= aead_encrypt
,
1765 .decrypt
= aead_decrypt
,
1766 .ivsize
= AES_BLOCK_SIZE
,
1767 .maxauthsize
= SHA224_DIGEST_SIZE
,
1770 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1771 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
1772 OP_ALG_AAI_HMAC_PRECOMP
,
1779 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
1780 .cra_driver_name
= "authenc-hmac-sha256-"
1782 .cra_blocksize
= AES_BLOCK_SIZE
,
1784 .setkey
= aead_setkey
,
1785 .setauthsize
= aead_setauthsize
,
1786 .encrypt
= aead_encrypt
,
1787 .decrypt
= aead_decrypt
,
1788 .ivsize
= AES_BLOCK_SIZE
,
1789 .maxauthsize
= SHA256_DIGEST_SIZE
,
1792 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1793 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
1794 OP_ALG_AAI_HMAC_PRECOMP
,
1800 .cra_name
= "echainiv(authenc(hmac(sha256),"
1802 .cra_driver_name
= "echainiv-authenc-"
1803 "hmac-sha256-cbc-aes-"
1805 .cra_blocksize
= AES_BLOCK_SIZE
,
1807 .setkey
= aead_setkey
,
1808 .setauthsize
= aead_setauthsize
,
1809 .encrypt
= aead_encrypt
,
1810 .decrypt
= aead_decrypt
,
1811 .ivsize
= AES_BLOCK_SIZE
,
1812 .maxauthsize
= SHA256_DIGEST_SIZE
,
1815 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1816 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
1817 OP_ALG_AAI_HMAC_PRECOMP
,
1824 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
1825 .cra_driver_name
= "authenc-hmac-sha384-"
1827 .cra_blocksize
= AES_BLOCK_SIZE
,
1829 .setkey
= aead_setkey
,
1830 .setauthsize
= aead_setauthsize
,
1831 .encrypt
= aead_encrypt
,
1832 .decrypt
= aead_decrypt
,
1833 .ivsize
= AES_BLOCK_SIZE
,
1834 .maxauthsize
= SHA384_DIGEST_SIZE
,
1837 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1838 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
1839 OP_ALG_AAI_HMAC_PRECOMP
,
1845 .cra_name
= "echainiv(authenc(hmac(sha384),"
1847 .cra_driver_name
= "echainiv-authenc-"
1848 "hmac-sha384-cbc-aes-"
1850 .cra_blocksize
= AES_BLOCK_SIZE
,
1852 .setkey
= aead_setkey
,
1853 .setauthsize
= aead_setauthsize
,
1854 .encrypt
= aead_encrypt
,
1855 .decrypt
= aead_decrypt
,
1856 .ivsize
= AES_BLOCK_SIZE
,
1857 .maxauthsize
= SHA384_DIGEST_SIZE
,
1860 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1861 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
1862 OP_ALG_AAI_HMAC_PRECOMP
,
1869 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
1870 .cra_driver_name
= "authenc-hmac-sha512-"
1872 .cra_blocksize
= AES_BLOCK_SIZE
,
1874 .setkey
= aead_setkey
,
1875 .setauthsize
= aead_setauthsize
,
1876 .encrypt
= aead_encrypt
,
1877 .decrypt
= aead_decrypt
,
1878 .ivsize
= AES_BLOCK_SIZE
,
1879 .maxauthsize
= SHA512_DIGEST_SIZE
,
1882 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1883 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
1884 OP_ALG_AAI_HMAC_PRECOMP
,
1890 .cra_name
= "echainiv(authenc(hmac(sha512),"
1892 .cra_driver_name
= "echainiv-authenc-"
1893 "hmac-sha512-cbc-aes-"
1895 .cra_blocksize
= AES_BLOCK_SIZE
,
1897 .setkey
= aead_setkey
,
1898 .setauthsize
= aead_setauthsize
,
1899 .encrypt
= aead_encrypt
,
1900 .decrypt
= aead_decrypt
,
1901 .ivsize
= AES_BLOCK_SIZE
,
1902 .maxauthsize
= SHA512_DIGEST_SIZE
,
1905 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1906 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
1907 OP_ALG_AAI_HMAC_PRECOMP
,
1914 .cra_name
= "authenc(hmac(md5),cbc(des3_ede))",
1915 .cra_driver_name
= "authenc-hmac-md5-"
1916 "cbc-des3_ede-caam-qi2",
1917 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1919 .setkey
= aead_setkey
,
1920 .setauthsize
= aead_setauthsize
,
1921 .encrypt
= aead_encrypt
,
1922 .decrypt
= aead_decrypt
,
1923 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1924 .maxauthsize
= MD5_DIGEST_SIZE
,
1927 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1928 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
1929 OP_ALG_AAI_HMAC_PRECOMP
,
1935 .cra_name
= "echainiv(authenc(hmac(md5),"
1937 .cra_driver_name
= "echainiv-authenc-hmac-md5-"
1938 "cbc-des3_ede-caam-qi2",
1939 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1941 .setkey
= aead_setkey
,
1942 .setauthsize
= aead_setauthsize
,
1943 .encrypt
= aead_encrypt
,
1944 .decrypt
= aead_decrypt
,
1945 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1946 .maxauthsize
= MD5_DIGEST_SIZE
,
1949 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1950 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
1951 OP_ALG_AAI_HMAC_PRECOMP
,
1958 .cra_name
= "authenc(hmac(sha1),"
1960 .cra_driver_name
= "authenc-hmac-sha1-"
1961 "cbc-des3_ede-caam-qi2",
1962 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1964 .setkey
= aead_setkey
,
1965 .setauthsize
= aead_setauthsize
,
1966 .encrypt
= aead_encrypt
,
1967 .decrypt
= aead_decrypt
,
1968 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1969 .maxauthsize
= SHA1_DIGEST_SIZE
,
1972 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1973 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
1974 OP_ALG_AAI_HMAC_PRECOMP
,
1980 .cra_name
= "echainiv(authenc(hmac(sha1),"
1982 .cra_driver_name
= "echainiv-authenc-"
1984 "cbc-des3_ede-caam-qi2",
1985 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1987 .setkey
= aead_setkey
,
1988 .setauthsize
= aead_setauthsize
,
1989 .encrypt
= aead_encrypt
,
1990 .decrypt
= aead_decrypt
,
1991 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1992 .maxauthsize
= SHA1_DIGEST_SIZE
,
1995 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1996 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
1997 OP_ALG_AAI_HMAC_PRECOMP
,
2004 .cra_name
= "authenc(hmac(sha224),"
2006 .cra_driver_name
= "authenc-hmac-sha224-"
2007 "cbc-des3_ede-caam-qi2",
2008 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2010 .setkey
= aead_setkey
,
2011 .setauthsize
= aead_setauthsize
,
2012 .encrypt
= aead_encrypt
,
2013 .decrypt
= aead_decrypt
,
2014 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2015 .maxauthsize
= SHA224_DIGEST_SIZE
,
2018 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2019 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2020 OP_ALG_AAI_HMAC_PRECOMP
,
2026 .cra_name
= "echainiv(authenc(hmac(sha224),"
2028 .cra_driver_name
= "echainiv-authenc-"
2030 "cbc-des3_ede-caam-qi2",
2031 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2033 .setkey
= aead_setkey
,
2034 .setauthsize
= aead_setauthsize
,
2035 .encrypt
= aead_encrypt
,
2036 .decrypt
= aead_decrypt
,
2037 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2038 .maxauthsize
= SHA224_DIGEST_SIZE
,
2041 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2042 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2043 OP_ALG_AAI_HMAC_PRECOMP
,
2050 .cra_name
= "authenc(hmac(sha256),"
2052 .cra_driver_name
= "authenc-hmac-sha256-"
2053 "cbc-des3_ede-caam-qi2",
2054 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2056 .setkey
= aead_setkey
,
2057 .setauthsize
= aead_setauthsize
,
2058 .encrypt
= aead_encrypt
,
2059 .decrypt
= aead_decrypt
,
2060 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2061 .maxauthsize
= SHA256_DIGEST_SIZE
,
2064 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2065 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2066 OP_ALG_AAI_HMAC_PRECOMP
,
2072 .cra_name
= "echainiv(authenc(hmac(sha256),"
2074 .cra_driver_name
= "echainiv-authenc-"
2076 "cbc-des3_ede-caam-qi2",
2077 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2079 .setkey
= aead_setkey
,
2080 .setauthsize
= aead_setauthsize
,
2081 .encrypt
= aead_encrypt
,
2082 .decrypt
= aead_decrypt
,
2083 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2084 .maxauthsize
= SHA256_DIGEST_SIZE
,
2087 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2088 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2089 OP_ALG_AAI_HMAC_PRECOMP
,
2096 .cra_name
= "authenc(hmac(sha384),"
2098 .cra_driver_name
= "authenc-hmac-sha384-"
2099 "cbc-des3_ede-caam-qi2",
2100 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2102 .setkey
= aead_setkey
,
2103 .setauthsize
= aead_setauthsize
,
2104 .encrypt
= aead_encrypt
,
2105 .decrypt
= aead_decrypt
,
2106 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2107 .maxauthsize
= SHA384_DIGEST_SIZE
,
2110 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2111 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2112 OP_ALG_AAI_HMAC_PRECOMP
,
2118 .cra_name
= "echainiv(authenc(hmac(sha384),"
2120 .cra_driver_name
= "echainiv-authenc-"
2122 "cbc-des3_ede-caam-qi2",
2123 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2125 .setkey
= aead_setkey
,
2126 .setauthsize
= aead_setauthsize
,
2127 .encrypt
= aead_encrypt
,
2128 .decrypt
= aead_decrypt
,
2129 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2130 .maxauthsize
= SHA384_DIGEST_SIZE
,
2133 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2134 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2135 OP_ALG_AAI_HMAC_PRECOMP
,
2142 .cra_name
= "authenc(hmac(sha512),"
2144 .cra_driver_name
= "authenc-hmac-sha512-"
2145 "cbc-des3_ede-caam-qi2",
2146 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2148 .setkey
= aead_setkey
,
2149 .setauthsize
= aead_setauthsize
,
2150 .encrypt
= aead_encrypt
,
2151 .decrypt
= aead_decrypt
,
2152 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2153 .maxauthsize
= SHA512_DIGEST_SIZE
,
2156 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2157 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2158 OP_ALG_AAI_HMAC_PRECOMP
,
2164 .cra_name
= "echainiv(authenc(hmac(sha512),"
2166 .cra_driver_name
= "echainiv-authenc-"
2168 "cbc-des3_ede-caam-qi2",
2169 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2171 .setkey
= aead_setkey
,
2172 .setauthsize
= aead_setauthsize
,
2173 .encrypt
= aead_encrypt
,
2174 .decrypt
= aead_decrypt
,
2175 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2176 .maxauthsize
= SHA512_DIGEST_SIZE
,
2179 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2180 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2181 OP_ALG_AAI_HMAC_PRECOMP
,
2188 .cra_name
= "authenc(hmac(md5),cbc(des))",
2189 .cra_driver_name
= "authenc-hmac-md5-"
2191 .cra_blocksize
= DES_BLOCK_SIZE
,
2193 .setkey
= aead_setkey
,
2194 .setauthsize
= aead_setauthsize
,
2195 .encrypt
= aead_encrypt
,
2196 .decrypt
= aead_decrypt
,
2197 .ivsize
= DES_BLOCK_SIZE
,
2198 .maxauthsize
= MD5_DIGEST_SIZE
,
2201 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2202 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
2203 OP_ALG_AAI_HMAC_PRECOMP
,
2209 .cra_name
= "echainiv(authenc(hmac(md5),"
2211 .cra_driver_name
= "echainiv-authenc-hmac-md5-"
2213 .cra_blocksize
= DES_BLOCK_SIZE
,
2215 .setkey
= aead_setkey
,
2216 .setauthsize
= aead_setauthsize
,
2217 .encrypt
= aead_encrypt
,
2218 .decrypt
= aead_decrypt
,
2219 .ivsize
= DES_BLOCK_SIZE
,
2220 .maxauthsize
= MD5_DIGEST_SIZE
,
2223 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2224 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
2225 OP_ALG_AAI_HMAC_PRECOMP
,
2232 .cra_name
= "authenc(hmac(sha1),cbc(des))",
2233 .cra_driver_name
= "authenc-hmac-sha1-"
2235 .cra_blocksize
= DES_BLOCK_SIZE
,
2237 .setkey
= aead_setkey
,
2238 .setauthsize
= aead_setauthsize
,
2239 .encrypt
= aead_encrypt
,
2240 .decrypt
= aead_decrypt
,
2241 .ivsize
= DES_BLOCK_SIZE
,
2242 .maxauthsize
= SHA1_DIGEST_SIZE
,
2245 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2246 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
2247 OP_ALG_AAI_HMAC_PRECOMP
,
2253 .cra_name
= "echainiv(authenc(hmac(sha1),"
2255 .cra_driver_name
= "echainiv-authenc-"
2256 "hmac-sha1-cbc-des-caam-qi2",
2257 .cra_blocksize
= DES_BLOCK_SIZE
,
2259 .setkey
= aead_setkey
,
2260 .setauthsize
= aead_setauthsize
,
2261 .encrypt
= aead_encrypt
,
2262 .decrypt
= aead_decrypt
,
2263 .ivsize
= DES_BLOCK_SIZE
,
2264 .maxauthsize
= SHA1_DIGEST_SIZE
,
2267 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2268 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
2269 OP_ALG_AAI_HMAC_PRECOMP
,
2276 .cra_name
= "authenc(hmac(sha224),cbc(des))",
2277 .cra_driver_name
= "authenc-hmac-sha224-"
2279 .cra_blocksize
= DES_BLOCK_SIZE
,
2281 .setkey
= aead_setkey
,
2282 .setauthsize
= aead_setauthsize
,
2283 .encrypt
= aead_encrypt
,
2284 .decrypt
= aead_decrypt
,
2285 .ivsize
= DES_BLOCK_SIZE
,
2286 .maxauthsize
= SHA224_DIGEST_SIZE
,
2289 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2290 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2291 OP_ALG_AAI_HMAC_PRECOMP
,
2297 .cra_name
= "echainiv(authenc(hmac(sha224),"
2299 .cra_driver_name
= "echainiv-authenc-"
2300 "hmac-sha224-cbc-des-"
2302 .cra_blocksize
= DES_BLOCK_SIZE
,
2304 .setkey
= aead_setkey
,
2305 .setauthsize
= aead_setauthsize
,
2306 .encrypt
= aead_encrypt
,
2307 .decrypt
= aead_decrypt
,
2308 .ivsize
= DES_BLOCK_SIZE
,
2309 .maxauthsize
= SHA224_DIGEST_SIZE
,
2312 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2313 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2314 OP_ALG_AAI_HMAC_PRECOMP
,
2321 .cra_name
= "authenc(hmac(sha256),cbc(des))",
2322 .cra_driver_name
= "authenc-hmac-sha256-"
2324 .cra_blocksize
= DES_BLOCK_SIZE
,
2326 .setkey
= aead_setkey
,
2327 .setauthsize
= aead_setauthsize
,
2328 .encrypt
= aead_encrypt
,
2329 .decrypt
= aead_decrypt
,
2330 .ivsize
= DES_BLOCK_SIZE
,
2331 .maxauthsize
= SHA256_DIGEST_SIZE
,
2334 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2335 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2336 OP_ALG_AAI_HMAC_PRECOMP
,
2342 .cra_name
= "echainiv(authenc(hmac(sha256),"
2344 .cra_driver_name
= "echainiv-authenc-"
2345 "hmac-sha256-cbc-desi-"
2347 .cra_blocksize
= DES_BLOCK_SIZE
,
2349 .setkey
= aead_setkey
,
2350 .setauthsize
= aead_setauthsize
,
2351 .encrypt
= aead_encrypt
,
2352 .decrypt
= aead_decrypt
,
2353 .ivsize
= DES_BLOCK_SIZE
,
2354 .maxauthsize
= SHA256_DIGEST_SIZE
,
2357 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2358 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2359 OP_ALG_AAI_HMAC_PRECOMP
,
2366 .cra_name
= "authenc(hmac(sha384),cbc(des))",
2367 .cra_driver_name
= "authenc-hmac-sha384-"
2369 .cra_blocksize
= DES_BLOCK_SIZE
,
2371 .setkey
= aead_setkey
,
2372 .setauthsize
= aead_setauthsize
,
2373 .encrypt
= aead_encrypt
,
2374 .decrypt
= aead_decrypt
,
2375 .ivsize
= DES_BLOCK_SIZE
,
2376 .maxauthsize
= SHA384_DIGEST_SIZE
,
2379 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2380 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2381 OP_ALG_AAI_HMAC_PRECOMP
,
2387 .cra_name
= "echainiv(authenc(hmac(sha384),"
2389 .cra_driver_name
= "echainiv-authenc-"
2390 "hmac-sha384-cbc-des-"
2392 .cra_blocksize
= DES_BLOCK_SIZE
,
2394 .setkey
= aead_setkey
,
2395 .setauthsize
= aead_setauthsize
,
2396 .encrypt
= aead_encrypt
,
2397 .decrypt
= aead_decrypt
,
2398 .ivsize
= DES_BLOCK_SIZE
,
2399 .maxauthsize
= SHA384_DIGEST_SIZE
,
2402 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2403 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2404 OP_ALG_AAI_HMAC_PRECOMP
,
2411 .cra_name
= "authenc(hmac(sha512),cbc(des))",
2412 .cra_driver_name
= "authenc-hmac-sha512-"
2414 .cra_blocksize
= DES_BLOCK_SIZE
,
2416 .setkey
= aead_setkey
,
2417 .setauthsize
= aead_setauthsize
,
2418 .encrypt
= aead_encrypt
,
2419 .decrypt
= aead_decrypt
,
2420 .ivsize
= DES_BLOCK_SIZE
,
2421 .maxauthsize
= SHA512_DIGEST_SIZE
,
2424 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2425 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2426 OP_ALG_AAI_HMAC_PRECOMP
,
2432 .cra_name
= "echainiv(authenc(hmac(sha512),"
2434 .cra_driver_name
= "echainiv-authenc-"
2435 "hmac-sha512-cbc-des-"
2437 .cra_blocksize
= DES_BLOCK_SIZE
,
2439 .setkey
= aead_setkey
,
2440 .setauthsize
= aead_setauthsize
,
2441 .encrypt
= aead_encrypt
,
2442 .decrypt
= aead_decrypt
,
2443 .ivsize
= DES_BLOCK_SIZE
,
2444 .maxauthsize
= SHA512_DIGEST_SIZE
,
2447 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2448 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2449 OP_ALG_AAI_HMAC_PRECOMP
,
2456 .cra_name
= "authenc(hmac(md5),"
2457 "rfc3686(ctr(aes)))",
2458 .cra_driver_name
= "authenc-hmac-md5-"
2459 "rfc3686-ctr-aes-caam-qi2",
2462 .setkey
= aead_setkey
,
2463 .setauthsize
= aead_setauthsize
,
2464 .encrypt
= aead_encrypt
,
2465 .decrypt
= aead_decrypt
,
2466 .ivsize
= CTR_RFC3686_IV_SIZE
,
2467 .maxauthsize
= MD5_DIGEST_SIZE
,
2470 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2471 OP_ALG_AAI_CTR_MOD128
,
2472 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
2473 OP_ALG_AAI_HMAC_PRECOMP
,
2480 .cra_name
= "seqiv(authenc("
2481 "hmac(md5),rfc3686(ctr(aes))))",
2482 .cra_driver_name
= "seqiv-authenc-hmac-md5-"
2483 "rfc3686-ctr-aes-caam-qi2",
2486 .setkey
= aead_setkey
,
2487 .setauthsize
= aead_setauthsize
,
2488 .encrypt
= aead_encrypt
,
2489 .decrypt
= aead_decrypt
,
2490 .ivsize
= CTR_RFC3686_IV_SIZE
,
2491 .maxauthsize
= MD5_DIGEST_SIZE
,
2494 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2495 OP_ALG_AAI_CTR_MOD128
,
2496 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
2497 OP_ALG_AAI_HMAC_PRECOMP
,
2505 .cra_name
= "authenc(hmac(sha1),"
2506 "rfc3686(ctr(aes)))",
2507 .cra_driver_name
= "authenc-hmac-sha1-"
2508 "rfc3686-ctr-aes-caam-qi2",
2511 .setkey
= aead_setkey
,
2512 .setauthsize
= aead_setauthsize
,
2513 .encrypt
= aead_encrypt
,
2514 .decrypt
= aead_decrypt
,
2515 .ivsize
= CTR_RFC3686_IV_SIZE
,
2516 .maxauthsize
= SHA1_DIGEST_SIZE
,
2519 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2520 OP_ALG_AAI_CTR_MOD128
,
2521 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
2522 OP_ALG_AAI_HMAC_PRECOMP
,
2529 .cra_name
= "seqiv(authenc("
2530 "hmac(sha1),rfc3686(ctr(aes))))",
2531 .cra_driver_name
= "seqiv-authenc-hmac-sha1-"
2532 "rfc3686-ctr-aes-caam-qi2",
2535 .setkey
= aead_setkey
,
2536 .setauthsize
= aead_setauthsize
,
2537 .encrypt
= aead_encrypt
,
2538 .decrypt
= aead_decrypt
,
2539 .ivsize
= CTR_RFC3686_IV_SIZE
,
2540 .maxauthsize
= SHA1_DIGEST_SIZE
,
2543 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2544 OP_ALG_AAI_CTR_MOD128
,
2545 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
2546 OP_ALG_AAI_HMAC_PRECOMP
,
2554 .cra_name
= "authenc(hmac(sha224),"
2555 "rfc3686(ctr(aes)))",
2556 .cra_driver_name
= "authenc-hmac-sha224-"
2557 "rfc3686-ctr-aes-caam-qi2",
2560 .setkey
= aead_setkey
,
2561 .setauthsize
= aead_setauthsize
,
2562 .encrypt
= aead_encrypt
,
2563 .decrypt
= aead_decrypt
,
2564 .ivsize
= CTR_RFC3686_IV_SIZE
,
2565 .maxauthsize
= SHA224_DIGEST_SIZE
,
2568 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2569 OP_ALG_AAI_CTR_MOD128
,
2570 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2571 OP_ALG_AAI_HMAC_PRECOMP
,
2578 .cra_name
= "seqiv(authenc("
2579 "hmac(sha224),rfc3686(ctr(aes))))",
2580 .cra_driver_name
= "seqiv-authenc-hmac-sha224-"
2581 "rfc3686-ctr-aes-caam-qi2",
2584 .setkey
= aead_setkey
,
2585 .setauthsize
= aead_setauthsize
,
2586 .encrypt
= aead_encrypt
,
2587 .decrypt
= aead_decrypt
,
2588 .ivsize
= CTR_RFC3686_IV_SIZE
,
2589 .maxauthsize
= SHA224_DIGEST_SIZE
,
2592 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2593 OP_ALG_AAI_CTR_MOD128
,
2594 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2595 OP_ALG_AAI_HMAC_PRECOMP
,
2603 .cra_name
= "authenc(hmac(sha256),"
2604 "rfc3686(ctr(aes)))",
2605 .cra_driver_name
= "authenc-hmac-sha256-"
2606 "rfc3686-ctr-aes-caam-qi2",
2609 .setkey
= aead_setkey
,
2610 .setauthsize
= aead_setauthsize
,
2611 .encrypt
= aead_encrypt
,
2612 .decrypt
= aead_decrypt
,
2613 .ivsize
= CTR_RFC3686_IV_SIZE
,
2614 .maxauthsize
= SHA256_DIGEST_SIZE
,
2617 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2618 OP_ALG_AAI_CTR_MOD128
,
2619 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2620 OP_ALG_AAI_HMAC_PRECOMP
,
2627 .cra_name
= "seqiv(authenc(hmac(sha256),"
2628 "rfc3686(ctr(aes))))",
2629 .cra_driver_name
= "seqiv-authenc-hmac-sha256-"
2630 "rfc3686-ctr-aes-caam-qi2",
2633 .setkey
= aead_setkey
,
2634 .setauthsize
= aead_setauthsize
,
2635 .encrypt
= aead_encrypt
,
2636 .decrypt
= aead_decrypt
,
2637 .ivsize
= CTR_RFC3686_IV_SIZE
,
2638 .maxauthsize
= SHA256_DIGEST_SIZE
,
2641 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2642 OP_ALG_AAI_CTR_MOD128
,
2643 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2644 OP_ALG_AAI_HMAC_PRECOMP
,
2652 .cra_name
= "authenc(hmac(sha384),"
2653 "rfc3686(ctr(aes)))",
2654 .cra_driver_name
= "authenc-hmac-sha384-"
2655 "rfc3686-ctr-aes-caam-qi2",
2658 .setkey
= aead_setkey
,
2659 .setauthsize
= aead_setauthsize
,
2660 .encrypt
= aead_encrypt
,
2661 .decrypt
= aead_decrypt
,
2662 .ivsize
= CTR_RFC3686_IV_SIZE
,
2663 .maxauthsize
= SHA384_DIGEST_SIZE
,
2666 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2667 OP_ALG_AAI_CTR_MOD128
,
2668 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2669 OP_ALG_AAI_HMAC_PRECOMP
,
2676 .cra_name
= "seqiv(authenc(hmac(sha384),"
2677 "rfc3686(ctr(aes))))",
2678 .cra_driver_name
= "seqiv-authenc-hmac-sha384-"
2679 "rfc3686-ctr-aes-caam-qi2",
2682 .setkey
= aead_setkey
,
2683 .setauthsize
= aead_setauthsize
,
2684 .encrypt
= aead_encrypt
,
2685 .decrypt
= aead_decrypt
,
2686 .ivsize
= CTR_RFC3686_IV_SIZE
,
2687 .maxauthsize
= SHA384_DIGEST_SIZE
,
2690 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2691 OP_ALG_AAI_CTR_MOD128
,
2692 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2693 OP_ALG_AAI_HMAC_PRECOMP
,
2701 .cra_name
= "rfc7539(chacha20,poly1305)",
2702 .cra_driver_name
= "rfc7539-chacha20-poly1305-"
2706 .setkey
= chachapoly_setkey
,
2707 .setauthsize
= chachapoly_setauthsize
,
2708 .encrypt
= aead_encrypt
,
2709 .decrypt
= aead_decrypt
,
2710 .ivsize
= CHACHAPOLY_IV_SIZE
,
2711 .maxauthsize
= POLY1305_DIGEST_SIZE
,
2714 .class1_alg_type
= OP_ALG_ALGSEL_CHACHA20
|
2716 .class2_alg_type
= OP_ALG_ALGSEL_POLY1305
|
2723 .cra_name
= "rfc7539esp(chacha20,poly1305)",
2724 .cra_driver_name
= "rfc7539esp-chacha20-"
2725 "poly1305-caam-qi2",
2728 .setkey
= chachapoly_setkey
,
2729 .setauthsize
= chachapoly_setauthsize
,
2730 .encrypt
= aead_encrypt
,
2731 .decrypt
= aead_decrypt
,
2733 .maxauthsize
= POLY1305_DIGEST_SIZE
,
2736 .class1_alg_type
= OP_ALG_ALGSEL_CHACHA20
|
2738 .class2_alg_type
= OP_ALG_ALGSEL_POLY1305
|
2745 .cra_name
= "authenc(hmac(sha512),"
2746 "rfc3686(ctr(aes)))",
2747 .cra_driver_name
= "authenc-hmac-sha512-"
2748 "rfc3686-ctr-aes-caam-qi2",
2751 .setkey
= aead_setkey
,
2752 .setauthsize
= aead_setauthsize
,
2753 .encrypt
= aead_encrypt
,
2754 .decrypt
= aead_decrypt
,
2755 .ivsize
= CTR_RFC3686_IV_SIZE
,
2756 .maxauthsize
= SHA512_DIGEST_SIZE
,
2759 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2760 OP_ALG_AAI_CTR_MOD128
,
2761 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2762 OP_ALG_AAI_HMAC_PRECOMP
,
2769 .cra_name
= "seqiv(authenc(hmac(sha512),"
2770 "rfc3686(ctr(aes))))",
2771 .cra_driver_name
= "seqiv-authenc-hmac-sha512-"
2772 "rfc3686-ctr-aes-caam-qi2",
2775 .setkey
= aead_setkey
,
2776 .setauthsize
= aead_setauthsize
,
2777 .encrypt
= aead_encrypt
,
2778 .decrypt
= aead_decrypt
,
2779 .ivsize
= CTR_RFC3686_IV_SIZE
,
2780 .maxauthsize
= SHA512_DIGEST_SIZE
,
2783 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2784 OP_ALG_AAI_CTR_MOD128
,
2785 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2786 OP_ALG_AAI_HMAC_PRECOMP
,
2793 static void caam_skcipher_alg_init(struct caam_skcipher_alg
*t_alg
)
2795 struct skcipher_alg
*alg
= &t_alg
->skcipher
;
2797 alg
->base
.cra_module
= THIS_MODULE
;
2798 alg
->base
.cra_priority
= CAAM_CRA_PRIORITY
;
2799 alg
->base
.cra_ctxsize
= sizeof(struct caam_ctx
);
2800 alg
->base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_KERN_DRIVER_ONLY
;
2802 alg
->init
= caam_cra_init_skcipher
;
2803 alg
->exit
= caam_cra_exit
;
2806 static void caam_aead_alg_init(struct caam_aead_alg
*t_alg
)
2808 struct aead_alg
*alg
= &t_alg
->aead
;
2810 alg
->base
.cra_module
= THIS_MODULE
;
2811 alg
->base
.cra_priority
= CAAM_CRA_PRIORITY
;
2812 alg
->base
.cra_ctxsize
= sizeof(struct caam_ctx
);
2813 alg
->base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_KERN_DRIVER_ONLY
;
2815 alg
->init
= caam_cra_init_aead
;
2816 alg
->exit
= caam_cra_exit_aead
;
2819 /* max hash key is max split key size */
2820 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
2822 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
2824 /* caam context sizes for hashes: running digest + 8 */
2825 #define HASH_MSG_LEN 8
2826 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
2837 * caam_hash_ctx - ahash per-session context
2838 * @flc: Flow Contexts array
2839 * @flc_dma: I/O virtual addresses of the Flow Contexts
2840 * @dev: dpseci device
2841 * @ctx_len: size of Context Register
2842 * @adata: hashing algorithm details
2844 struct caam_hash_ctx
{
2845 struct caam_flc flc
[HASH_NUM_OP
];
2846 dma_addr_t flc_dma
[HASH_NUM_OP
];
2849 struct alginfo adata
;
2853 struct caam_hash_state
{
2854 struct caam_request caam_req
;
2858 u8 buf_0
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
2860 u8 buf_1
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
2862 u8 caam_ctx
[MAX_CTX_LEN
] ____cacheline_aligned
;
2863 int (*update
)(struct ahash_request
*req
);
2864 int (*final
)(struct ahash_request
*req
);
2865 int (*finup
)(struct ahash_request
*req
);
2869 struct caam_export_state
{
2870 u8 buf
[CAAM_MAX_HASH_BLOCK_SIZE
];
2871 u8 caam_ctx
[MAX_CTX_LEN
];
2873 int (*update
)(struct ahash_request
*req
);
2874 int (*final
)(struct ahash_request
*req
);
2875 int (*finup
)(struct ahash_request
*req
);
2878 static inline void switch_buf(struct caam_hash_state
*state
)
2880 state
->current_buf
^= 1;
2883 static inline u8
*current_buf(struct caam_hash_state
*state
)
2885 return state
->current_buf
? state
->buf_1
: state
->buf_0
;
2888 static inline u8
*alt_buf(struct caam_hash_state
*state
)
2890 return state
->current_buf
? state
->buf_0
: state
->buf_1
;
2893 static inline int *current_buflen(struct caam_hash_state
*state
)
2895 return state
->current_buf
? &state
->buflen_1
: &state
->buflen_0
;
2898 static inline int *alt_buflen(struct caam_hash_state
*state
)
2900 return state
->current_buf
? &state
->buflen_0
: &state
->buflen_1
;
2903 /* Map current buffer in state (if length > 0) and put it in link table */
2904 static inline int buf_map_to_qm_sg(struct device
*dev
,
2905 struct dpaa2_sg_entry
*qm_sg
,
2906 struct caam_hash_state
*state
)
2908 int buflen
= *current_buflen(state
);
2913 state
->buf_dma
= dma_map_single(dev
, current_buf(state
), buflen
,
2915 if (dma_mapping_error(dev
, state
->buf_dma
)) {
2916 dev_err(dev
, "unable to map buf\n");
2921 dma_to_qm_sg_one(qm_sg
, state
->buf_dma
, buflen
, 0);
2926 /* Map state->caam_ctx, and add it to link table */
2927 static inline int ctx_map_to_qm_sg(struct device
*dev
,
2928 struct caam_hash_state
*state
, int ctx_len
,
2929 struct dpaa2_sg_entry
*qm_sg
, u32 flag
)
2931 state
->ctx_dma_len
= ctx_len
;
2932 state
->ctx_dma
= dma_map_single(dev
, state
->caam_ctx
, ctx_len
, flag
);
2933 if (dma_mapping_error(dev
, state
->ctx_dma
)) {
2934 dev_err(dev
, "unable to map ctx\n");
2939 dma_to_qm_sg_one(qm_sg
, state
->ctx_dma
, ctx_len
, 0);
2944 static int ahash_set_sh_desc(struct crypto_ahash
*ahash
)
2946 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
2947 int digestsize
= crypto_ahash_digestsize(ahash
);
2948 struct dpaa2_caam_priv
*priv
= dev_get_drvdata(ctx
->dev
);
2949 struct caam_flc
*flc
;
2952 /* ahash_update shared descriptor */
2953 flc
= &ctx
->flc
[UPDATE
];
2954 desc
= flc
->sh_desc
;
2955 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_UPDATE
, ctx
->ctx_len
,
2956 ctx
->ctx_len
, true, priv
->sec_attr
.era
);
2957 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
2958 dma_sync_single_for_device(ctx
->dev
, ctx
->flc_dma
[UPDATE
],
2959 desc_bytes(desc
), DMA_BIDIRECTIONAL
);
2960 print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__
)": ",
2961 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
2964 /* ahash_update_first shared descriptor */
2965 flc
= &ctx
->flc
[UPDATE_FIRST
];
2966 desc
= flc
->sh_desc
;
2967 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_INIT
, ctx
->ctx_len
,
2968 ctx
->ctx_len
, false, priv
->sec_attr
.era
);
2969 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
2970 dma_sync_single_for_device(ctx
->dev
, ctx
->flc_dma
[UPDATE_FIRST
],
2971 desc_bytes(desc
), DMA_BIDIRECTIONAL
);
2972 print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__
)": ",
2973 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
2976 /* ahash_final shared descriptor */
2977 flc
= &ctx
->flc
[FINALIZE
];
2978 desc
= flc
->sh_desc
;
2979 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_FINALIZE
, digestsize
,
2980 ctx
->ctx_len
, true, priv
->sec_attr
.era
);
2981 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
2982 dma_sync_single_for_device(ctx
->dev
, ctx
->flc_dma
[FINALIZE
],
2983 desc_bytes(desc
), DMA_BIDIRECTIONAL
);
2984 print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__
)": ",
2985 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
2988 /* ahash_digest shared descriptor */
2989 flc
= &ctx
->flc
[DIGEST
];
2990 desc
= flc
->sh_desc
;
2991 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_INITFINAL
, digestsize
,
2992 ctx
->ctx_len
, false, priv
->sec_attr
.era
);
2993 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
2994 dma_sync_single_for_device(ctx
->dev
, ctx
->flc_dma
[DIGEST
],
2995 desc_bytes(desc
), DMA_BIDIRECTIONAL
);
2996 print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__
)": ",
2997 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
3003 struct split_key_sh_result
{
3004 struct completion completion
;
3009 static void split_key_sh_done(void *cbk_ctx
, u32 err
)
3011 struct split_key_sh_result
*res
= cbk_ctx
;
3013 dev_dbg(res
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
3016 caam_qi2_strstatus(res
->dev
, err
);
3019 complete(&res
->completion
);
3022 /* Digest hash size if it is too large */
3023 static int hash_digest_key(struct caam_hash_ctx
*ctx
, u32
*keylen
, u8
*key
,
3026 struct caam_request
*req_ctx
;
3028 struct split_key_sh_result result
;
3030 struct caam_flc
*flc
;
3033 struct dpaa2_fl_entry
*in_fle
, *out_fle
;
3035 req_ctx
= kzalloc(sizeof(*req_ctx
), GFP_KERNEL
| GFP_DMA
);
3039 in_fle
= &req_ctx
->fd_flt
[1];
3040 out_fle
= &req_ctx
->fd_flt
[0];
3042 flc
= kzalloc(sizeof(*flc
), GFP_KERNEL
| GFP_DMA
);
3046 key_dma
= dma_map_single(ctx
->dev
, key
, *keylen
, DMA_BIDIRECTIONAL
);
3047 if (dma_mapping_error(ctx
->dev
, key_dma
)) {
3048 dev_err(ctx
->dev
, "unable to map key memory\n");
3052 desc
= flc
->sh_desc
;
3054 init_sh_desc(desc
, 0);
3056 /* descriptor to perform unkeyed hash on key_in */
3057 append_operation(desc
, ctx
->adata
.algtype
| OP_ALG_ENCRYPT
|
3058 OP_ALG_AS_INITFINAL
);
3059 append_seq_fifo_load(desc
, *keylen
, FIFOLD_CLASS_CLASS2
|
3060 FIFOLD_TYPE_LAST2
| FIFOLD_TYPE_MSG
);
3061 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
3062 LDST_SRCDST_BYTE_CONTEXT
);
3064 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
3065 flc_dma
= dma_map_single(ctx
->dev
, flc
, sizeof(flc
->flc
) +
3066 desc_bytes(desc
), DMA_TO_DEVICE
);
3067 if (dma_mapping_error(ctx
->dev
, flc_dma
)) {
3068 dev_err(ctx
->dev
, "unable to map shared descriptor\n");
3072 dpaa2_fl_set_final(in_fle
, true);
3073 dpaa2_fl_set_format(in_fle
, dpaa2_fl_single
);
3074 dpaa2_fl_set_addr(in_fle
, key_dma
);
3075 dpaa2_fl_set_len(in_fle
, *keylen
);
3076 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3077 dpaa2_fl_set_addr(out_fle
, key_dma
);
3078 dpaa2_fl_set_len(out_fle
, digestsize
);
3080 print_hex_dump_debug("key_in@" __stringify(__LINE__
)": ",
3081 DUMP_PREFIX_ADDRESS
, 16, 4, key
, *keylen
, 1);
3082 print_hex_dump_debug("shdesc@" __stringify(__LINE__
)": ",
3083 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
3087 init_completion(&result
.completion
);
3088 result
.dev
= ctx
->dev
;
3091 req_ctx
->flc_dma
= flc_dma
;
3092 req_ctx
->cbk
= split_key_sh_done
;
3093 req_ctx
->ctx
= &result
;
3095 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3096 if (ret
== -EINPROGRESS
) {
3098 wait_for_completion(&result
.completion
);
3100 print_hex_dump_debug("digested key@" __stringify(__LINE__
)": ",
3101 DUMP_PREFIX_ADDRESS
, 16, 4, key
,
3105 dma_unmap_single(ctx
->dev
, flc_dma
, sizeof(flc
->flc
) + desc_bytes(desc
),
3108 dma_unmap_single(ctx
->dev
, key_dma
, *keylen
, DMA_BIDIRECTIONAL
);
3114 *keylen
= digestsize
;
3119 static int ahash_setkey(struct crypto_ahash
*ahash
, const u8
*key
,
3120 unsigned int keylen
)
3122 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3123 unsigned int blocksize
= crypto_tfm_alg_blocksize(&ahash
->base
);
3124 unsigned int digestsize
= crypto_ahash_digestsize(ahash
);
3126 u8
*hashed_key
= NULL
;
3128 dev_dbg(ctx
->dev
, "keylen %d blocksize %d\n", keylen
, blocksize
);
3130 if (keylen
> blocksize
) {
3131 hashed_key
= kmemdup(key
, keylen
, GFP_KERNEL
| GFP_DMA
);
3134 ret
= hash_digest_key(ctx
, &keylen
, hashed_key
, digestsize
);
3140 ctx
->adata
.keylen
= keylen
;
3141 ctx
->adata
.keylen_pad
= split_key_len(ctx
->adata
.algtype
&
3142 OP_ALG_ALGSEL_MASK
);
3143 if (ctx
->adata
.keylen_pad
> CAAM_MAX_HASH_KEY_SIZE
)
3146 ctx
->adata
.key_virt
= key
;
3147 ctx
->adata
.key_inline
= true;
3149 ret
= ahash_set_sh_desc(ahash
);
3154 crypto_ahash_set_flags(ahash
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
3158 static inline void ahash_unmap(struct device
*dev
, struct ahash_edesc
*edesc
,
3159 struct ahash_request
*req
)
3161 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3163 if (edesc
->src_nents
)
3164 dma_unmap_sg(dev
, req
->src
, edesc
->src_nents
, DMA_TO_DEVICE
);
3166 if (edesc
->qm_sg_bytes
)
3167 dma_unmap_single(dev
, edesc
->qm_sg_dma
, edesc
->qm_sg_bytes
,
3170 if (state
->buf_dma
) {
3171 dma_unmap_single(dev
, state
->buf_dma
, *current_buflen(state
),
3177 static inline void ahash_unmap_ctx(struct device
*dev
,
3178 struct ahash_edesc
*edesc
,
3179 struct ahash_request
*req
, u32 flag
)
3181 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3183 if (state
->ctx_dma
) {
3184 dma_unmap_single(dev
, state
->ctx_dma
, state
->ctx_dma_len
, flag
);
3187 ahash_unmap(dev
, edesc
, req
);
3190 static void ahash_done(void *cbk_ctx
, u32 status
)
3192 struct crypto_async_request
*areq
= cbk_ctx
;
3193 struct ahash_request
*req
= ahash_request_cast(areq
);
3194 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3195 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3196 struct ahash_edesc
*edesc
= state
->caam_req
.edesc
;
3197 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3198 int digestsize
= crypto_ahash_digestsize(ahash
);
3201 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
3203 if (unlikely(status
)) {
3204 caam_qi2_strstatus(ctx
->dev
, status
);
3208 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_FROM_DEVICE
);
3209 memcpy(req
->result
, state
->caam_ctx
, digestsize
);
3210 qi_cache_free(edesc
);
3212 print_hex_dump_debug("ctx@" __stringify(__LINE__
)": ",
3213 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
3216 req
->base
.complete(&req
->base
, ecode
);
3219 static void ahash_done_bi(void *cbk_ctx
, u32 status
)
3221 struct crypto_async_request
*areq
= cbk_ctx
;
3222 struct ahash_request
*req
= ahash_request_cast(areq
);
3223 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3224 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3225 struct ahash_edesc
*edesc
= state
->caam_req
.edesc
;
3226 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3229 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
3231 if (unlikely(status
)) {
3232 caam_qi2_strstatus(ctx
->dev
, status
);
3236 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_BIDIRECTIONAL
);
3238 qi_cache_free(edesc
);
3240 print_hex_dump_debug("ctx@" __stringify(__LINE__
)": ",
3241 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
3244 print_hex_dump_debug("result@" __stringify(__LINE__
)": ",
3245 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
3246 crypto_ahash_digestsize(ahash
), 1);
3248 req
->base
.complete(&req
->base
, ecode
);
3251 static void ahash_done_ctx_src(void *cbk_ctx
, u32 status
)
3253 struct crypto_async_request
*areq
= cbk_ctx
;
3254 struct ahash_request
*req
= ahash_request_cast(areq
);
3255 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3256 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3257 struct ahash_edesc
*edesc
= state
->caam_req
.edesc
;
3258 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3259 int digestsize
= crypto_ahash_digestsize(ahash
);
3262 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
3264 if (unlikely(status
)) {
3265 caam_qi2_strstatus(ctx
->dev
, status
);
3269 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_BIDIRECTIONAL
);
3270 memcpy(req
->result
, state
->caam_ctx
, digestsize
);
3271 qi_cache_free(edesc
);
3273 print_hex_dump_debug("ctx@" __stringify(__LINE__
)": ",
3274 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
3277 req
->base
.complete(&req
->base
, ecode
);
3280 static void ahash_done_ctx_dst(void *cbk_ctx
, u32 status
)
3282 struct crypto_async_request
*areq
= cbk_ctx
;
3283 struct ahash_request
*req
= ahash_request_cast(areq
);
3284 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3285 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3286 struct ahash_edesc
*edesc
= state
->caam_req
.edesc
;
3287 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3290 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
3292 if (unlikely(status
)) {
3293 caam_qi2_strstatus(ctx
->dev
, status
);
3297 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_FROM_DEVICE
);
3299 qi_cache_free(edesc
);
3301 print_hex_dump_debug("ctx@" __stringify(__LINE__
)": ",
3302 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
3305 print_hex_dump_debug("result@" __stringify(__LINE__
)": ",
3306 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
3307 crypto_ahash_digestsize(ahash
), 1);
3309 req
->base
.complete(&req
->base
, ecode
);
3312 static int ahash_update_ctx(struct ahash_request
*req
)
3314 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3315 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3316 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3317 struct caam_request
*req_ctx
= &state
->caam_req
;
3318 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3319 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3320 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3321 GFP_KERNEL
: GFP_ATOMIC
;
3322 u8
*buf
= current_buf(state
);
3323 int *buflen
= current_buflen(state
);
3324 u8
*next_buf
= alt_buf(state
);
3325 int *next_buflen
= alt_buflen(state
), last_buflen
;
3326 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
3327 int src_nents
, mapped_nents
, qm_sg_bytes
, qm_sg_src_index
;
3328 struct ahash_edesc
*edesc
;
3331 last_buflen
= *next_buflen
;
3332 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
3333 to_hash
= in_len
- *next_buflen
;
3336 struct dpaa2_sg_entry
*sg_table
;
3338 src_nents
= sg_nents_for_len(req
->src
,
3339 req
->nbytes
- (*next_buflen
));
3340 if (src_nents
< 0) {
3341 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
3346 mapped_nents
= dma_map_sg(ctx
->dev
, req
->src
, src_nents
,
3348 if (!mapped_nents
) {
3349 dev_err(ctx
->dev
, "unable to DMA map source\n");
3356 /* allocate space for base edesc and link tables */
3357 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
3359 dma_unmap_sg(ctx
->dev
, req
->src
, src_nents
,
3364 edesc
->src_nents
= src_nents
;
3365 qm_sg_src_index
= 1 + (*buflen
? 1 : 0);
3366 qm_sg_bytes
= (qm_sg_src_index
+ mapped_nents
) *
3368 sg_table
= &edesc
->sgt
[0];
3370 ret
= ctx_map_to_qm_sg(ctx
->dev
, state
, ctx
->ctx_len
, sg_table
,
3375 ret
= buf_map_to_qm_sg(ctx
->dev
, sg_table
+ 1, state
);
3380 sg_to_qm_sg_last(req
->src
, mapped_nents
,
3381 sg_table
+ qm_sg_src_index
, 0);
3383 scatterwalk_map_and_copy(next_buf
, req
->src
,
3387 dpaa2_sg_set_final(sg_table
+ qm_sg_src_index
- 1,
3391 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
,
3392 qm_sg_bytes
, DMA_TO_DEVICE
);
3393 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
3394 dev_err(ctx
->dev
, "unable to map S/G table\n");
3398 edesc
->qm_sg_bytes
= qm_sg_bytes
;
3400 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
3401 dpaa2_fl_set_final(in_fle
, true);
3402 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
3403 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
3404 dpaa2_fl_set_len(in_fle
, ctx
->ctx_len
+ to_hash
);
3405 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3406 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
3407 dpaa2_fl_set_len(out_fle
, ctx
->ctx_len
);
3409 req_ctx
->flc
= &ctx
->flc
[UPDATE
];
3410 req_ctx
->flc_dma
= ctx
->flc_dma
[UPDATE
];
3411 req_ctx
->cbk
= ahash_done_bi
;
3412 req_ctx
->ctx
= &req
->base
;
3413 req_ctx
->edesc
= edesc
;
3415 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3416 if (ret
!= -EINPROGRESS
&&
3418 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3420 } else if (*next_buflen
) {
3421 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
3423 *buflen
= *next_buflen
;
3424 *next_buflen
= last_buflen
;
3427 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
3428 DUMP_PREFIX_ADDRESS
, 16, 4, buf
, *buflen
, 1);
3429 print_hex_dump_debug("next buf@" __stringify(__LINE__
)": ",
3430 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
, *next_buflen
,
3435 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_BIDIRECTIONAL
);
3436 qi_cache_free(edesc
);
3440 static int ahash_final_ctx(struct ahash_request
*req
)
3442 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3443 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3444 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3445 struct caam_request
*req_ctx
= &state
->caam_req
;
3446 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3447 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3448 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3449 GFP_KERNEL
: GFP_ATOMIC
;
3450 int buflen
= *current_buflen(state
);
3451 int qm_sg_bytes
, qm_sg_src_index
;
3452 int digestsize
= crypto_ahash_digestsize(ahash
);
3453 struct ahash_edesc
*edesc
;
3454 struct dpaa2_sg_entry
*sg_table
;
3457 /* allocate space for base edesc and link tables */
3458 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
3462 qm_sg_src_index
= 1 + (buflen
? 1 : 0);
3463 qm_sg_bytes
= qm_sg_src_index
* sizeof(*sg_table
);
3464 sg_table
= &edesc
->sgt
[0];
3466 ret
= ctx_map_to_qm_sg(ctx
->dev
, state
, ctx
->ctx_len
, sg_table
,
3471 ret
= buf_map_to_qm_sg(ctx
->dev
, sg_table
+ 1, state
);
3475 dpaa2_sg_set_final(sg_table
+ qm_sg_src_index
- 1, true);
3477 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
, qm_sg_bytes
,
3479 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
3480 dev_err(ctx
->dev
, "unable to map S/G table\n");
3484 edesc
->qm_sg_bytes
= qm_sg_bytes
;
3486 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
3487 dpaa2_fl_set_final(in_fle
, true);
3488 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
3489 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
3490 dpaa2_fl_set_len(in_fle
, ctx
->ctx_len
+ buflen
);
3491 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3492 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
3493 dpaa2_fl_set_len(out_fle
, digestsize
);
3495 req_ctx
->flc
= &ctx
->flc
[FINALIZE
];
3496 req_ctx
->flc_dma
= ctx
->flc_dma
[FINALIZE
];
3497 req_ctx
->cbk
= ahash_done_ctx_src
;
3498 req_ctx
->ctx
= &req
->base
;
3499 req_ctx
->edesc
= edesc
;
3501 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3502 if (ret
== -EINPROGRESS
||
3503 (ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3507 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_BIDIRECTIONAL
);
3508 qi_cache_free(edesc
);
3512 static int ahash_finup_ctx(struct ahash_request
*req
)
3514 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3515 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3516 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3517 struct caam_request
*req_ctx
= &state
->caam_req
;
3518 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3519 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3520 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3521 GFP_KERNEL
: GFP_ATOMIC
;
3522 int buflen
= *current_buflen(state
);
3523 int qm_sg_bytes
, qm_sg_src_index
;
3524 int src_nents
, mapped_nents
;
3525 int digestsize
= crypto_ahash_digestsize(ahash
);
3526 struct ahash_edesc
*edesc
;
3527 struct dpaa2_sg_entry
*sg_table
;
3530 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
3531 if (src_nents
< 0) {
3532 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
3537 mapped_nents
= dma_map_sg(ctx
->dev
, req
->src
, src_nents
,
3539 if (!mapped_nents
) {
3540 dev_err(ctx
->dev
, "unable to DMA map source\n");
3547 /* allocate space for base edesc and link tables */
3548 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
3550 dma_unmap_sg(ctx
->dev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
3554 edesc
->src_nents
= src_nents
;
3555 qm_sg_src_index
= 1 + (buflen
? 1 : 0);
3556 qm_sg_bytes
= (qm_sg_src_index
+ mapped_nents
) * sizeof(*sg_table
);
3557 sg_table
= &edesc
->sgt
[0];
3559 ret
= ctx_map_to_qm_sg(ctx
->dev
, state
, ctx
->ctx_len
, sg_table
,
3564 ret
= buf_map_to_qm_sg(ctx
->dev
, sg_table
+ 1, state
);
3568 sg_to_qm_sg_last(req
->src
, mapped_nents
, sg_table
+ qm_sg_src_index
, 0);
3570 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
, qm_sg_bytes
,
3572 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
3573 dev_err(ctx
->dev
, "unable to map S/G table\n");
3577 edesc
->qm_sg_bytes
= qm_sg_bytes
;
3579 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
3580 dpaa2_fl_set_final(in_fle
, true);
3581 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
3582 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
3583 dpaa2_fl_set_len(in_fle
, ctx
->ctx_len
+ buflen
+ req
->nbytes
);
3584 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3585 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
3586 dpaa2_fl_set_len(out_fle
, digestsize
);
3588 req_ctx
->flc
= &ctx
->flc
[FINALIZE
];
3589 req_ctx
->flc_dma
= ctx
->flc_dma
[FINALIZE
];
3590 req_ctx
->cbk
= ahash_done_ctx_src
;
3591 req_ctx
->ctx
= &req
->base
;
3592 req_ctx
->edesc
= edesc
;
3594 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3595 if (ret
== -EINPROGRESS
||
3596 (ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3600 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_BIDIRECTIONAL
);
3601 qi_cache_free(edesc
);
3605 static int ahash_digest(struct ahash_request
*req
)
3607 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3608 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3609 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3610 struct caam_request
*req_ctx
= &state
->caam_req
;
3611 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3612 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3613 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3614 GFP_KERNEL
: GFP_ATOMIC
;
3615 int digestsize
= crypto_ahash_digestsize(ahash
);
3616 int src_nents
, mapped_nents
;
3617 struct ahash_edesc
*edesc
;
3622 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
3623 if (src_nents
< 0) {
3624 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
3629 mapped_nents
= dma_map_sg(ctx
->dev
, req
->src
, src_nents
,
3631 if (!mapped_nents
) {
3632 dev_err(ctx
->dev
, "unable to map source for DMA\n");
3639 /* allocate space for base edesc and link tables */
3640 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
3642 dma_unmap_sg(ctx
->dev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
3646 edesc
->src_nents
= src_nents
;
3647 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
3649 if (mapped_nents
> 1) {
3651 struct dpaa2_sg_entry
*sg_table
= &edesc
->sgt
[0];
3653 qm_sg_bytes
= mapped_nents
* sizeof(*sg_table
);
3654 sg_to_qm_sg_last(req
->src
, mapped_nents
, sg_table
, 0);
3655 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
,
3656 qm_sg_bytes
, DMA_TO_DEVICE
);
3657 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
3658 dev_err(ctx
->dev
, "unable to map S/G table\n");
3661 edesc
->qm_sg_bytes
= qm_sg_bytes
;
3662 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
3663 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
3665 dpaa2_fl_set_format(in_fle
, dpaa2_fl_single
);
3666 dpaa2_fl_set_addr(in_fle
, sg_dma_address(req
->src
));
3669 state
->ctx_dma_len
= digestsize
;
3670 state
->ctx_dma
= dma_map_single(ctx
->dev
, state
->caam_ctx
, digestsize
,
3672 if (dma_mapping_error(ctx
->dev
, state
->ctx_dma
)) {
3673 dev_err(ctx
->dev
, "unable to map ctx\n");
3678 dpaa2_fl_set_final(in_fle
, true);
3679 dpaa2_fl_set_len(in_fle
, req
->nbytes
);
3680 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3681 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
3682 dpaa2_fl_set_len(out_fle
, digestsize
);
3684 req_ctx
->flc
= &ctx
->flc
[DIGEST
];
3685 req_ctx
->flc_dma
= ctx
->flc_dma
[DIGEST
];
3686 req_ctx
->cbk
= ahash_done
;
3687 req_ctx
->ctx
= &req
->base
;
3688 req_ctx
->edesc
= edesc
;
3689 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3690 if (ret
== -EINPROGRESS
||
3691 (ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3695 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_FROM_DEVICE
);
3696 qi_cache_free(edesc
);
3700 static int ahash_final_no_ctx(struct ahash_request
*req
)
3702 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3703 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3704 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3705 struct caam_request
*req_ctx
= &state
->caam_req
;
3706 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3707 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3708 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3709 GFP_KERNEL
: GFP_ATOMIC
;
3710 u8
*buf
= current_buf(state
);
3711 int buflen
= *current_buflen(state
);
3712 int digestsize
= crypto_ahash_digestsize(ahash
);
3713 struct ahash_edesc
*edesc
;
3716 /* allocate space for base edesc and link tables */
3717 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
3722 state
->buf_dma
= dma_map_single(ctx
->dev
, buf
, buflen
,
3724 if (dma_mapping_error(ctx
->dev
, state
->buf_dma
)) {
3725 dev_err(ctx
->dev
, "unable to map src\n");
3730 state
->ctx_dma_len
= digestsize
;
3731 state
->ctx_dma
= dma_map_single(ctx
->dev
, state
->caam_ctx
, digestsize
,
3733 if (dma_mapping_error(ctx
->dev
, state
->ctx_dma
)) {
3734 dev_err(ctx
->dev
, "unable to map ctx\n");
3739 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
3740 dpaa2_fl_set_final(in_fle
, true);
3742 * crypto engine requires the input entry to be present when
3743 * "frame list" FD is used.
3744 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3745 * in_fle zeroized (except for "Final" flag) is the best option.
3748 dpaa2_fl_set_format(in_fle
, dpaa2_fl_single
);
3749 dpaa2_fl_set_addr(in_fle
, state
->buf_dma
);
3750 dpaa2_fl_set_len(in_fle
, buflen
);
3752 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3753 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
3754 dpaa2_fl_set_len(out_fle
, digestsize
);
3756 req_ctx
->flc
= &ctx
->flc
[DIGEST
];
3757 req_ctx
->flc_dma
= ctx
->flc_dma
[DIGEST
];
3758 req_ctx
->cbk
= ahash_done
;
3759 req_ctx
->ctx
= &req
->base
;
3760 req_ctx
->edesc
= edesc
;
3762 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3763 if (ret
== -EINPROGRESS
||
3764 (ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3768 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_FROM_DEVICE
);
3769 qi_cache_free(edesc
);
3773 static int ahash_update_no_ctx(struct ahash_request
*req
)
3775 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3776 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3777 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3778 struct caam_request
*req_ctx
= &state
->caam_req
;
3779 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3780 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3781 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3782 GFP_KERNEL
: GFP_ATOMIC
;
3783 u8
*buf
= current_buf(state
);
3784 int *buflen
= current_buflen(state
);
3785 u8
*next_buf
= alt_buf(state
);
3786 int *next_buflen
= alt_buflen(state
);
3787 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
3788 int qm_sg_bytes
, src_nents
, mapped_nents
;
3789 struct ahash_edesc
*edesc
;
3792 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
3793 to_hash
= in_len
- *next_buflen
;
3796 struct dpaa2_sg_entry
*sg_table
;
3798 src_nents
= sg_nents_for_len(req
->src
,
3799 req
->nbytes
- *next_buflen
);
3800 if (src_nents
< 0) {
3801 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
3806 mapped_nents
= dma_map_sg(ctx
->dev
, req
->src
, src_nents
,
3808 if (!mapped_nents
) {
3809 dev_err(ctx
->dev
, "unable to DMA map source\n");
3816 /* allocate space for base edesc and link tables */
3817 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
3819 dma_unmap_sg(ctx
->dev
, req
->src
, src_nents
,
3824 edesc
->src_nents
= src_nents
;
3825 qm_sg_bytes
= (1 + mapped_nents
) * sizeof(*sg_table
);
3826 sg_table
= &edesc
->sgt
[0];
3828 ret
= buf_map_to_qm_sg(ctx
->dev
, sg_table
, state
);
3832 sg_to_qm_sg_last(req
->src
, mapped_nents
, sg_table
+ 1, 0);
3835 scatterwalk_map_and_copy(next_buf
, req
->src
,
3839 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
,
3840 qm_sg_bytes
, DMA_TO_DEVICE
);
3841 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
3842 dev_err(ctx
->dev
, "unable to map S/G table\n");
3846 edesc
->qm_sg_bytes
= qm_sg_bytes
;
3848 state
->ctx_dma_len
= ctx
->ctx_len
;
3849 state
->ctx_dma
= dma_map_single(ctx
->dev
, state
->caam_ctx
,
3850 ctx
->ctx_len
, DMA_FROM_DEVICE
);
3851 if (dma_mapping_error(ctx
->dev
, state
->ctx_dma
)) {
3852 dev_err(ctx
->dev
, "unable to map ctx\n");
3858 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
3859 dpaa2_fl_set_final(in_fle
, true);
3860 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
3861 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
3862 dpaa2_fl_set_len(in_fle
, to_hash
);
3863 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3864 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
3865 dpaa2_fl_set_len(out_fle
, ctx
->ctx_len
);
3867 req_ctx
->flc
= &ctx
->flc
[UPDATE_FIRST
];
3868 req_ctx
->flc_dma
= ctx
->flc_dma
[UPDATE_FIRST
];
3869 req_ctx
->cbk
= ahash_done_ctx_dst
;
3870 req_ctx
->ctx
= &req
->base
;
3871 req_ctx
->edesc
= edesc
;
3873 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3874 if (ret
!= -EINPROGRESS
&&
3876 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3879 state
->update
= ahash_update_ctx
;
3880 state
->finup
= ahash_finup_ctx
;
3881 state
->final
= ahash_final_ctx
;
3882 } else if (*next_buflen
) {
3883 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
3885 *buflen
= *next_buflen
;
3889 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
3890 DUMP_PREFIX_ADDRESS
, 16, 4, buf
, *buflen
, 1);
3891 print_hex_dump_debug("next buf@" __stringify(__LINE__
)": ",
3892 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
, *next_buflen
,
3897 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_TO_DEVICE
);
3898 qi_cache_free(edesc
);
3902 static int ahash_finup_no_ctx(struct ahash_request
*req
)
3904 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3905 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3906 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3907 struct caam_request
*req_ctx
= &state
->caam_req
;
3908 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3909 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3910 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3911 GFP_KERNEL
: GFP_ATOMIC
;
3912 int buflen
= *current_buflen(state
);
3913 int qm_sg_bytes
, src_nents
, mapped_nents
;
3914 int digestsize
= crypto_ahash_digestsize(ahash
);
3915 struct ahash_edesc
*edesc
;
3916 struct dpaa2_sg_entry
*sg_table
;
3919 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
3920 if (src_nents
< 0) {
3921 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
3926 mapped_nents
= dma_map_sg(ctx
->dev
, req
->src
, src_nents
,
3928 if (!mapped_nents
) {
3929 dev_err(ctx
->dev
, "unable to DMA map source\n");
3936 /* allocate space for base edesc and link tables */
3937 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
3939 dma_unmap_sg(ctx
->dev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
3943 edesc
->src_nents
= src_nents
;
3944 qm_sg_bytes
= (2 + mapped_nents
) * sizeof(*sg_table
);
3945 sg_table
= &edesc
->sgt
[0];
3947 ret
= buf_map_to_qm_sg(ctx
->dev
, sg_table
, state
);
3951 sg_to_qm_sg_last(req
->src
, mapped_nents
, sg_table
+ 1, 0);
3953 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
, qm_sg_bytes
,
3955 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
3956 dev_err(ctx
->dev
, "unable to map S/G table\n");
3960 edesc
->qm_sg_bytes
= qm_sg_bytes
;
3962 state
->ctx_dma_len
= digestsize
;
3963 state
->ctx_dma
= dma_map_single(ctx
->dev
, state
->caam_ctx
, digestsize
,
3965 if (dma_mapping_error(ctx
->dev
, state
->ctx_dma
)) {
3966 dev_err(ctx
->dev
, "unable to map ctx\n");
3972 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
3973 dpaa2_fl_set_final(in_fle
, true);
3974 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
3975 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
3976 dpaa2_fl_set_len(in_fle
, buflen
+ req
->nbytes
);
3977 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3978 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
3979 dpaa2_fl_set_len(out_fle
, digestsize
);
3981 req_ctx
->flc
= &ctx
->flc
[DIGEST
];
3982 req_ctx
->flc_dma
= ctx
->flc_dma
[DIGEST
];
3983 req_ctx
->cbk
= ahash_done
;
3984 req_ctx
->ctx
= &req
->base
;
3985 req_ctx
->edesc
= edesc
;
3986 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3987 if (ret
!= -EINPROGRESS
&&
3988 !(ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3993 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_FROM_DEVICE
);
3994 qi_cache_free(edesc
);
3998 static int ahash_update_first(struct ahash_request
*req
)
4000 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
4001 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
4002 struct caam_hash_state
*state
= ahash_request_ctx(req
);
4003 struct caam_request
*req_ctx
= &state
->caam_req
;
4004 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
4005 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
4006 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
4007 GFP_KERNEL
: GFP_ATOMIC
;
4008 u8
*next_buf
= alt_buf(state
);
4009 int *next_buflen
= alt_buflen(state
);
4011 int src_nents
, mapped_nents
;
4012 struct ahash_edesc
*edesc
;
4015 *next_buflen
= req
->nbytes
& (crypto_tfm_alg_blocksize(&ahash
->base
) -
4017 to_hash
= req
->nbytes
- *next_buflen
;
4020 struct dpaa2_sg_entry
*sg_table
;
4022 src_nents
= sg_nents_for_len(req
->src
,
4023 req
->nbytes
- (*next_buflen
));
4024 if (src_nents
< 0) {
4025 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
4030 mapped_nents
= dma_map_sg(ctx
->dev
, req
->src
, src_nents
,
4032 if (!mapped_nents
) {
4033 dev_err(ctx
->dev
, "unable to map source for DMA\n");
4040 /* allocate space for base edesc and link tables */
4041 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
4043 dma_unmap_sg(ctx
->dev
, req
->src
, src_nents
,
4048 edesc
->src_nents
= src_nents
;
4049 sg_table
= &edesc
->sgt
[0];
4051 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
4052 dpaa2_fl_set_final(in_fle
, true);
4053 dpaa2_fl_set_len(in_fle
, to_hash
);
4055 if (mapped_nents
> 1) {
4058 sg_to_qm_sg_last(req
->src
, mapped_nents
, sg_table
, 0);
4059 qm_sg_bytes
= mapped_nents
* sizeof(*sg_table
);
4060 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
,
4063 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
4064 dev_err(ctx
->dev
, "unable to map S/G table\n");
4068 edesc
->qm_sg_bytes
= qm_sg_bytes
;
4069 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
4070 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
4072 dpaa2_fl_set_format(in_fle
, dpaa2_fl_single
);
4073 dpaa2_fl_set_addr(in_fle
, sg_dma_address(req
->src
));
4077 scatterwalk_map_and_copy(next_buf
, req
->src
, to_hash
,
4080 state
->ctx_dma_len
= ctx
->ctx_len
;
4081 state
->ctx_dma
= dma_map_single(ctx
->dev
, state
->caam_ctx
,
4082 ctx
->ctx_len
, DMA_FROM_DEVICE
);
4083 if (dma_mapping_error(ctx
->dev
, state
->ctx_dma
)) {
4084 dev_err(ctx
->dev
, "unable to map ctx\n");
4090 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
4091 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
4092 dpaa2_fl_set_len(out_fle
, ctx
->ctx_len
);
4094 req_ctx
->flc
= &ctx
->flc
[UPDATE_FIRST
];
4095 req_ctx
->flc_dma
= ctx
->flc_dma
[UPDATE_FIRST
];
4096 req_ctx
->cbk
= ahash_done_ctx_dst
;
4097 req_ctx
->ctx
= &req
->base
;
4098 req_ctx
->edesc
= edesc
;
4100 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
4101 if (ret
!= -EINPROGRESS
&&
4102 !(ret
== -EBUSY
&& req
->base
.flags
&
4103 CRYPTO_TFM_REQ_MAY_BACKLOG
))
4106 state
->update
= ahash_update_ctx
;
4107 state
->finup
= ahash_finup_ctx
;
4108 state
->final
= ahash_final_ctx
;
4109 } else if (*next_buflen
) {
4110 state
->update
= ahash_update_no_ctx
;
4111 state
->finup
= ahash_finup_no_ctx
;
4112 state
->final
= ahash_final_no_ctx
;
4113 scatterwalk_map_and_copy(next_buf
, req
->src
, 0,
4118 print_hex_dump_debug("next buf@" __stringify(__LINE__
)": ",
4119 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
, *next_buflen
,
4124 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_TO_DEVICE
);
4125 qi_cache_free(edesc
);
4129 static int ahash_finup_first(struct ahash_request
*req
)
4131 return ahash_digest(req
);
4134 static int ahash_init(struct ahash_request
*req
)
4136 struct caam_hash_state
*state
= ahash_request_ctx(req
);
4138 state
->update
= ahash_update_first
;
4139 state
->finup
= ahash_finup_first
;
4140 state
->final
= ahash_final_no_ctx
;
4143 state
->ctx_dma_len
= 0;
4144 state
->current_buf
= 0;
4146 state
->buflen_0
= 0;
4147 state
->buflen_1
= 0;
4152 static int ahash_update(struct ahash_request
*req
)
4154 struct caam_hash_state
*state
= ahash_request_ctx(req
);
4156 return state
->update(req
);
4159 static int ahash_finup(struct ahash_request
*req
)
4161 struct caam_hash_state
*state
= ahash_request_ctx(req
);
4163 return state
->finup(req
);
4166 static int ahash_final(struct ahash_request
*req
)
4168 struct caam_hash_state
*state
= ahash_request_ctx(req
);
4170 return state
->final(req
);
4173 static int ahash_export(struct ahash_request
*req
, void *out
)
4175 struct caam_hash_state
*state
= ahash_request_ctx(req
);
4176 struct caam_export_state
*export
= out
;
4180 if (state
->current_buf
) {
4182 len
= state
->buflen_1
;
4185 len
= state
->buflen_0
;
4188 memcpy(export
->buf
, buf
, len
);
4189 memcpy(export
->caam_ctx
, state
->caam_ctx
, sizeof(export
->caam_ctx
));
4190 export
->buflen
= len
;
4191 export
->update
= state
->update
;
4192 export
->final
= state
->final
;
4193 export
->finup
= state
->finup
;
4198 static int ahash_import(struct ahash_request
*req
, const void *in
)
4200 struct caam_hash_state
*state
= ahash_request_ctx(req
);
4201 const struct caam_export_state
*export
= in
;
4203 memset(state
, 0, sizeof(*state
));
4204 memcpy(state
->buf_0
, export
->buf
, export
->buflen
);
4205 memcpy(state
->caam_ctx
, export
->caam_ctx
, sizeof(state
->caam_ctx
));
4206 state
->buflen_0
= export
->buflen
;
4207 state
->update
= export
->update
;
4208 state
->final
= export
->final
;
4209 state
->finup
= export
->finup
;
4214 struct caam_hash_template
{
4215 char name
[CRYPTO_MAX_ALG_NAME
];
4216 char driver_name
[CRYPTO_MAX_ALG_NAME
];
4217 char hmac_name
[CRYPTO_MAX_ALG_NAME
];
4218 char hmac_driver_name
[CRYPTO_MAX_ALG_NAME
];
4219 unsigned int blocksize
;
4220 struct ahash_alg template_ahash
;
4224 /* ahash descriptors */
4225 static struct caam_hash_template driver_hash
[] = {
4228 .driver_name
= "sha1-caam-qi2",
4229 .hmac_name
= "hmac(sha1)",
4230 .hmac_driver_name
= "hmac-sha1-caam-qi2",
4231 .blocksize
= SHA1_BLOCK_SIZE
,
4234 .update
= ahash_update
,
4235 .final
= ahash_final
,
4236 .finup
= ahash_finup
,
4237 .digest
= ahash_digest
,
4238 .export
= ahash_export
,
4239 .import
= ahash_import
,
4240 .setkey
= ahash_setkey
,
4242 .digestsize
= SHA1_DIGEST_SIZE
,
4243 .statesize
= sizeof(struct caam_export_state
),
4246 .alg_type
= OP_ALG_ALGSEL_SHA1
,
4249 .driver_name
= "sha224-caam-qi2",
4250 .hmac_name
= "hmac(sha224)",
4251 .hmac_driver_name
= "hmac-sha224-caam-qi2",
4252 .blocksize
= SHA224_BLOCK_SIZE
,
4255 .update
= ahash_update
,
4256 .final
= ahash_final
,
4257 .finup
= ahash_finup
,
4258 .digest
= ahash_digest
,
4259 .export
= ahash_export
,
4260 .import
= ahash_import
,
4261 .setkey
= ahash_setkey
,
4263 .digestsize
= SHA224_DIGEST_SIZE
,
4264 .statesize
= sizeof(struct caam_export_state
),
4267 .alg_type
= OP_ALG_ALGSEL_SHA224
,
4270 .driver_name
= "sha256-caam-qi2",
4271 .hmac_name
= "hmac(sha256)",
4272 .hmac_driver_name
= "hmac-sha256-caam-qi2",
4273 .blocksize
= SHA256_BLOCK_SIZE
,
4276 .update
= ahash_update
,
4277 .final
= ahash_final
,
4278 .finup
= ahash_finup
,
4279 .digest
= ahash_digest
,
4280 .export
= ahash_export
,
4281 .import
= ahash_import
,
4282 .setkey
= ahash_setkey
,
4284 .digestsize
= SHA256_DIGEST_SIZE
,
4285 .statesize
= sizeof(struct caam_export_state
),
4288 .alg_type
= OP_ALG_ALGSEL_SHA256
,
4291 .driver_name
= "sha384-caam-qi2",
4292 .hmac_name
= "hmac(sha384)",
4293 .hmac_driver_name
= "hmac-sha384-caam-qi2",
4294 .blocksize
= SHA384_BLOCK_SIZE
,
4297 .update
= ahash_update
,
4298 .final
= ahash_final
,
4299 .finup
= ahash_finup
,
4300 .digest
= ahash_digest
,
4301 .export
= ahash_export
,
4302 .import
= ahash_import
,
4303 .setkey
= ahash_setkey
,
4305 .digestsize
= SHA384_DIGEST_SIZE
,
4306 .statesize
= sizeof(struct caam_export_state
),
4309 .alg_type
= OP_ALG_ALGSEL_SHA384
,
4312 .driver_name
= "sha512-caam-qi2",
4313 .hmac_name
= "hmac(sha512)",
4314 .hmac_driver_name
= "hmac-sha512-caam-qi2",
4315 .blocksize
= SHA512_BLOCK_SIZE
,
4318 .update
= ahash_update
,
4319 .final
= ahash_final
,
4320 .finup
= ahash_finup
,
4321 .digest
= ahash_digest
,
4322 .export
= ahash_export
,
4323 .import
= ahash_import
,
4324 .setkey
= ahash_setkey
,
4326 .digestsize
= SHA512_DIGEST_SIZE
,
4327 .statesize
= sizeof(struct caam_export_state
),
4330 .alg_type
= OP_ALG_ALGSEL_SHA512
,
4333 .driver_name
= "md5-caam-qi2",
4334 .hmac_name
= "hmac(md5)",
4335 .hmac_driver_name
= "hmac-md5-caam-qi2",
4336 .blocksize
= MD5_BLOCK_WORDS
* 4,
4339 .update
= ahash_update
,
4340 .final
= ahash_final
,
4341 .finup
= ahash_finup
,
4342 .digest
= ahash_digest
,
4343 .export
= ahash_export
,
4344 .import
= ahash_import
,
4345 .setkey
= ahash_setkey
,
4347 .digestsize
= MD5_DIGEST_SIZE
,
4348 .statesize
= sizeof(struct caam_export_state
),
4351 .alg_type
= OP_ALG_ALGSEL_MD5
,
4355 struct caam_hash_alg
{
4356 struct list_head entry
;
4359 struct ahash_alg ahash_alg
;
4362 static int caam_hash_cra_init(struct crypto_tfm
*tfm
)
4364 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
4365 struct crypto_alg
*base
= tfm
->__crt_alg
;
4366 struct hash_alg_common
*halg
=
4367 container_of(base
, struct hash_alg_common
, base
);
4368 struct ahash_alg
*alg
=
4369 container_of(halg
, struct ahash_alg
, halg
);
4370 struct caam_hash_alg
*caam_hash
=
4371 container_of(alg
, struct caam_hash_alg
, ahash_alg
);
4372 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
4373 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4374 static const u8 runninglen
[] = { HASH_MSG_LEN
+ MD5_DIGEST_SIZE
,
4375 HASH_MSG_LEN
+ SHA1_DIGEST_SIZE
,
4377 HASH_MSG_LEN
+ SHA256_DIGEST_SIZE
,
4379 HASH_MSG_LEN
+ SHA512_DIGEST_SIZE
};
4380 dma_addr_t dma_addr
;
4383 ctx
->dev
= caam_hash
->dev
;
4385 dma_addr
= dma_map_single_attrs(ctx
->dev
, ctx
->flc
, sizeof(ctx
->flc
),
4387 DMA_ATTR_SKIP_CPU_SYNC
);
4388 if (dma_mapping_error(ctx
->dev
, dma_addr
)) {
4389 dev_err(ctx
->dev
, "unable to map shared descriptors\n");
4393 for (i
= 0; i
< HASH_NUM_OP
; i
++)
4394 ctx
->flc_dma
[i
] = dma_addr
+ i
* sizeof(ctx
->flc
[i
]);
4396 /* copy descriptor header template value */
4397 ctx
->adata
.algtype
= OP_TYPE_CLASS2_ALG
| caam_hash
->alg_type
;
4399 ctx
->ctx_len
= runninglen
[(ctx
->adata
.algtype
&
4400 OP_ALG_ALGSEL_SUBMASK
) >>
4401 OP_ALG_ALGSEL_SHIFT
];
4403 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
4404 sizeof(struct caam_hash_state
));
4406 return ahash_set_sh_desc(ahash
);
4409 static void caam_hash_cra_exit(struct crypto_tfm
*tfm
)
4411 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
4413 dma_unmap_single_attrs(ctx
->dev
, ctx
->flc_dma
[0], sizeof(ctx
->flc
),
4414 DMA_BIDIRECTIONAL
, DMA_ATTR_SKIP_CPU_SYNC
);
4417 static struct caam_hash_alg
*caam_hash_alloc(struct device
*dev
,
4418 struct caam_hash_template
*template, bool keyed
)
4420 struct caam_hash_alg
*t_alg
;
4421 struct ahash_alg
*halg
;
4422 struct crypto_alg
*alg
;
4424 t_alg
= kzalloc(sizeof(*t_alg
), GFP_KERNEL
);
4426 return ERR_PTR(-ENOMEM
);
4428 t_alg
->ahash_alg
= template->template_ahash
;
4429 halg
= &t_alg
->ahash_alg
;
4430 alg
= &halg
->halg
.base
;
4433 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
4434 template->hmac_name
);
4435 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
4436 template->hmac_driver_name
);
4438 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
4440 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
4441 template->driver_name
);
4442 t_alg
->ahash_alg
.setkey
= NULL
;
4444 alg
->cra_module
= THIS_MODULE
;
4445 alg
->cra_init
= caam_hash_cra_init
;
4446 alg
->cra_exit
= caam_hash_cra_exit
;
4447 alg
->cra_ctxsize
= sizeof(struct caam_hash_ctx
);
4448 alg
->cra_priority
= CAAM_CRA_PRIORITY
;
4449 alg
->cra_blocksize
= template->blocksize
;
4450 alg
->cra_alignmask
= 0;
4451 alg
->cra_flags
= CRYPTO_ALG_ASYNC
;
4453 t_alg
->alg_type
= template->alg_type
;
4459 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx
*nctx
)
4461 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4463 ppriv
= container_of(nctx
, struct dpaa2_caam_priv_per_cpu
, nctx
);
4464 napi_schedule_irqoff(&ppriv
->napi
);
4467 static int __cold
dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv
*priv
)
4469 struct device
*dev
= priv
->dev
;
4470 struct dpaa2_io_notification_ctx
*nctx
;
4471 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4472 int err
, i
= 0, cpu
;
4474 for_each_online_cpu(cpu
) {
4475 ppriv
= per_cpu_ptr(priv
->ppriv
, cpu
);
4477 nctx
= &ppriv
->nctx
;
4479 nctx
->id
= ppriv
->rsp_fqid
;
4480 nctx
->desired_cpu
= cpu
;
4481 nctx
->cb
= dpaa2_caam_fqdan_cb
;
4483 /* Register notification callbacks */
4484 ppriv
->dpio
= dpaa2_io_service_select(cpu
);
4485 err
= dpaa2_io_service_register(ppriv
->dpio
, nctx
, dev
);
4486 if (unlikely(err
)) {
4487 dev_dbg(dev
, "No affine DPIO for cpu %d\n", cpu
);
4490 * If no affine DPIO for this core, there's probably
4491 * none available for next cores either. Signal we want
4492 * to retry later, in case the DPIO devices weren't
4495 err
= -EPROBE_DEFER
;
4499 ppriv
->store
= dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE
,
4501 if (unlikely(!ppriv
->store
)) {
4502 dev_err(dev
, "dpaa2_io_store_create() failed\n");
4507 if (++i
== priv
->num_pairs
)
4514 for_each_online_cpu(cpu
) {
4515 ppriv
= per_cpu_ptr(priv
->ppriv
, cpu
);
4516 if (!ppriv
->nctx
.cb
)
4518 dpaa2_io_service_deregister(ppriv
->dpio
, &ppriv
->nctx
, dev
);
4521 for_each_online_cpu(cpu
) {
4522 ppriv
= per_cpu_ptr(priv
->ppriv
, cpu
);
4525 dpaa2_io_store_destroy(ppriv
->store
);
4531 static void __cold
dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv
*priv
)
4533 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4536 for_each_online_cpu(cpu
) {
4537 ppriv
= per_cpu_ptr(priv
->ppriv
, cpu
);
4538 dpaa2_io_service_deregister(ppriv
->dpio
, &ppriv
->nctx
,
4540 dpaa2_io_store_destroy(ppriv
->store
);
4542 if (++i
== priv
->num_pairs
)
4547 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv
*priv
)
4549 struct dpseci_rx_queue_cfg rx_queue_cfg
;
4550 struct device
*dev
= priv
->dev
;
4551 struct fsl_mc_device
*ls_dev
= to_fsl_mc_device(dev
);
4552 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4553 int err
= 0, i
= 0, cpu
;
4555 /* Configure Rx queues */
4556 for_each_online_cpu(cpu
) {
4557 ppriv
= per_cpu_ptr(priv
->ppriv
, cpu
);
4559 rx_queue_cfg
.options
= DPSECI_QUEUE_OPT_DEST
|
4560 DPSECI_QUEUE_OPT_USER_CTX
;
4561 rx_queue_cfg
.order_preservation_en
= 0;
4562 rx_queue_cfg
.dest_cfg
.dest_type
= DPSECI_DEST_DPIO
;
4563 rx_queue_cfg
.dest_cfg
.dest_id
= ppriv
->nctx
.dpio_id
;
4565 * Rx priority (WQ) doesn't really matter, since we use
4566 * pull mode, i.e. volatile dequeues from specific FQs
4568 rx_queue_cfg
.dest_cfg
.priority
= 0;
4569 rx_queue_cfg
.user_ctx
= ppriv
->nctx
.qman64
;
4571 err
= dpseci_set_rx_queue(priv
->mc_io
, 0, ls_dev
->mc_handle
, i
,
4574 dev_err(dev
, "dpseci_set_rx_queue() failed with err %d\n",
4579 if (++i
== priv
->num_pairs
)
4586 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv
*priv
)
4588 struct device
*dev
= priv
->dev
;
4590 if (!priv
->cscn_mem
)
4593 dma_unmap_single(dev
, priv
->cscn_dma
, DPAA2_CSCN_SIZE
, DMA_FROM_DEVICE
);
4594 kfree(priv
->cscn_mem
);
4597 static void dpaa2_dpseci_free(struct dpaa2_caam_priv
*priv
)
4599 struct device
*dev
= priv
->dev
;
4600 struct fsl_mc_device
*ls_dev
= to_fsl_mc_device(dev
);
4602 dpaa2_dpseci_congestion_free(priv
);
4603 dpseci_close(priv
->mc_io
, 0, ls_dev
->mc_handle
);
4606 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv
*priv
,
4607 const struct dpaa2_fd
*fd
)
4609 struct caam_request
*req
;
4612 if (dpaa2_fd_get_format(fd
) != dpaa2_fd_list
) {
4613 dev_err(priv
->dev
, "Only Frame List FD format is supported!\n");
4617 fd_err
= dpaa2_fd_get_ctrl(fd
) & FD_CTRL_ERR_MASK
;
4618 if (unlikely(fd_err
))
4619 dev_err(priv
->dev
, "FD error: %08x\n", fd_err
);
4622 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4623 * in FD[ERR] or FD[FRC].
4625 req
= dpaa2_caam_iova_to_virt(priv
, dpaa2_fd_get_addr(fd
));
4626 dma_unmap_single(priv
->dev
, req
->fd_flt_dma
, sizeof(req
->fd_flt
),
4628 req
->cbk(req
->ctx
, dpaa2_fd_get_frc(fd
));
4631 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu
*ppriv
)
4635 /* Retry while portal is busy */
4637 err
= dpaa2_io_service_pull_fq(ppriv
->dpio
, ppriv
->rsp_fqid
,
4639 } while (err
== -EBUSY
);
4642 dev_err(ppriv
->priv
->dev
, "dpaa2_io_service_pull err %d", err
);
4647 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu
*ppriv
)
4649 struct dpaa2_dq
*dq
;
4650 int cleaned
= 0, is_last
;
4653 dq
= dpaa2_io_store_next(ppriv
->store
, &is_last
);
4654 if (unlikely(!dq
)) {
4655 if (unlikely(!is_last
)) {
4656 dev_dbg(ppriv
->priv
->dev
,
4657 "FQ %d returned no valid frames\n",
4660 * MUST retry until we get some sort of
4661 * valid response token (be it "empty dequeue"
4662 * or a valid frame).
4670 dpaa2_caam_process_fd(ppriv
->priv
, dpaa2_dq_fd(dq
));
4677 static int dpaa2_dpseci_poll(struct napi_struct
*napi
, int budget
)
4679 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4680 struct dpaa2_caam_priv
*priv
;
4681 int err
, cleaned
= 0, store_cleaned
;
4683 ppriv
= container_of(napi
, struct dpaa2_caam_priv_per_cpu
, napi
);
4686 if (unlikely(dpaa2_caam_pull_fq(ppriv
)))
4690 store_cleaned
= dpaa2_caam_store_consume(ppriv
);
4691 cleaned
+= store_cleaned
;
4693 if (store_cleaned
== 0 ||
4694 cleaned
> budget
- DPAA2_CAAM_STORE_SIZE
)
4697 /* Try to dequeue some more */
4698 err
= dpaa2_caam_pull_fq(ppriv
);
4703 if (cleaned
< budget
) {
4704 napi_complete_done(napi
, cleaned
);
4705 err
= dpaa2_io_service_rearm(ppriv
->dpio
, &ppriv
->nctx
);
4707 dev_err(priv
->dev
, "Notification rearm failed: %d\n",
4714 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv
*priv
,
4717 struct dpseci_congestion_notification_cfg cong_notif_cfg
= { 0 };
4718 struct device
*dev
= priv
->dev
;
4722 * Congestion group feature supported starting with DPSECI API v5.1
4723 * and only when object has been created with this capability.
4725 if ((DPSECI_VER(priv
->major_ver
, priv
->minor_ver
) < DPSECI_VER(5, 1)) ||
4726 !(priv
->dpseci_attr
.options
& DPSECI_OPT_HAS_CG
))
4729 priv
->cscn_mem
= kzalloc(DPAA2_CSCN_SIZE
+ DPAA2_CSCN_ALIGN
,
4730 GFP_KERNEL
| GFP_DMA
);
4731 if (!priv
->cscn_mem
)
4734 priv
->cscn_mem_aligned
= PTR_ALIGN(priv
->cscn_mem
, DPAA2_CSCN_ALIGN
);
4735 priv
->cscn_dma
= dma_map_single(dev
, priv
->cscn_mem_aligned
,
4736 DPAA2_CSCN_SIZE
, DMA_FROM_DEVICE
);
4737 if (dma_mapping_error(dev
, priv
->cscn_dma
)) {
4738 dev_err(dev
, "Error mapping CSCN memory area\n");
4743 cong_notif_cfg
.units
= DPSECI_CONGESTION_UNIT_BYTES
;
4744 cong_notif_cfg
.threshold_entry
= DPAA2_SEC_CONG_ENTRY_THRESH
;
4745 cong_notif_cfg
.threshold_exit
= DPAA2_SEC_CONG_EXIT_THRESH
;
4746 cong_notif_cfg
.message_ctx
= (uintptr_t)priv
;
4747 cong_notif_cfg
.message_iova
= priv
->cscn_dma
;
4748 cong_notif_cfg
.notification_mode
= DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER
|
4749 DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT
|
4750 DPSECI_CGN_MODE_COHERENT_WRITE
;
4752 err
= dpseci_set_congestion_notification(priv
->mc_io
, 0, token
,
4755 dev_err(dev
, "dpseci_set_congestion_notification failed\n");
4762 dma_unmap_single(dev
, priv
->cscn_dma
, DPAA2_CSCN_SIZE
, DMA_FROM_DEVICE
);
4764 kfree(priv
->cscn_mem
);
4769 static int __cold
dpaa2_dpseci_setup(struct fsl_mc_device
*ls_dev
)
4771 struct device
*dev
= &ls_dev
->dev
;
4772 struct dpaa2_caam_priv
*priv
;
4773 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4777 priv
= dev_get_drvdata(dev
);
4780 priv
->dpsec_id
= ls_dev
->obj_desc
.id
;
4782 /* Get a handle for the DPSECI this interface is associate with */
4783 err
= dpseci_open(priv
->mc_io
, 0, priv
->dpsec_id
, &ls_dev
->mc_handle
);
4785 dev_err(dev
, "dpseci_open() failed: %d\n", err
);
4789 err
= dpseci_get_api_version(priv
->mc_io
, 0, &priv
->major_ver
,
4792 dev_err(dev
, "dpseci_get_api_version() failed\n");
4796 dev_info(dev
, "dpseci v%d.%d\n", priv
->major_ver
, priv
->minor_ver
);
4798 err
= dpseci_get_attributes(priv
->mc_io
, 0, ls_dev
->mc_handle
,
4799 &priv
->dpseci_attr
);
4801 dev_err(dev
, "dpseci_get_attributes() failed\n");
4805 err
= dpseci_get_sec_attr(priv
->mc_io
, 0, ls_dev
->mc_handle
,
4808 dev_err(dev
, "dpseci_get_sec_attr() failed\n");
4812 err
= dpaa2_dpseci_congestion_setup(priv
, ls_dev
->mc_handle
);
4814 dev_err(dev
, "setup_congestion() failed\n");
4818 priv
->num_pairs
= min(priv
->dpseci_attr
.num_rx_queues
,
4819 priv
->dpseci_attr
.num_tx_queues
);
4820 if (priv
->num_pairs
> num_online_cpus()) {
4821 dev_warn(dev
, "%d queues won't be used\n",
4822 priv
->num_pairs
- num_online_cpus());
4823 priv
->num_pairs
= num_online_cpus();
4826 for (i
= 0; i
< priv
->dpseci_attr
.num_rx_queues
; i
++) {
4827 err
= dpseci_get_rx_queue(priv
->mc_io
, 0, ls_dev
->mc_handle
, i
,
4828 &priv
->rx_queue_attr
[i
]);
4830 dev_err(dev
, "dpseci_get_rx_queue() failed\n");
4831 goto err_get_rx_queue
;
4835 for (i
= 0; i
< priv
->dpseci_attr
.num_tx_queues
; i
++) {
4836 err
= dpseci_get_tx_queue(priv
->mc_io
, 0, ls_dev
->mc_handle
, i
,
4837 &priv
->tx_queue_attr
[i
]);
4839 dev_err(dev
, "dpseci_get_tx_queue() failed\n");
4840 goto err_get_rx_queue
;
4845 for_each_online_cpu(cpu
) {
4848 j
= i
% priv
->num_pairs
;
4850 ppriv
= per_cpu_ptr(priv
->ppriv
, cpu
);
4851 ppriv
->req_fqid
= priv
->tx_queue_attr
[j
].fqid
;
4854 * Allow all cores to enqueue, while only some of them
4855 * will take part in dequeuing.
4857 if (++i
> priv
->num_pairs
)
4860 ppriv
->rsp_fqid
= priv
->rx_queue_attr
[j
].fqid
;
4863 dev_dbg(dev
, "pair %d: rx queue %d, tx queue %d\n", j
,
4864 priv
->rx_queue_attr
[j
].fqid
,
4865 priv
->tx_queue_attr
[j
].fqid
);
4867 ppriv
->net_dev
.dev
= *dev
;
4868 INIT_LIST_HEAD(&ppriv
->net_dev
.napi_list
);
4869 netif_napi_add(&ppriv
->net_dev
, &ppriv
->napi
, dpaa2_dpseci_poll
,
4870 DPAA2_CAAM_NAPI_WEIGHT
);
4876 dpaa2_dpseci_congestion_free(priv
);
4878 dpseci_close(priv
->mc_io
, 0, ls_dev
->mc_handle
);
4883 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv
*priv
)
4885 struct device
*dev
= priv
->dev
;
4886 struct fsl_mc_device
*ls_dev
= to_fsl_mc_device(dev
);
4887 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4890 for (i
= 0; i
< priv
->num_pairs
; i
++) {
4891 ppriv
= per_cpu_ptr(priv
->ppriv
, i
);
4892 napi_enable(&ppriv
->napi
);
4895 return dpseci_enable(priv
->mc_io
, 0, ls_dev
->mc_handle
);
4898 static int __cold
dpaa2_dpseci_disable(struct dpaa2_caam_priv
*priv
)
4900 struct device
*dev
= priv
->dev
;
4901 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4902 struct fsl_mc_device
*ls_dev
= to_fsl_mc_device(dev
);
4903 int i
, err
= 0, enabled
;
4905 err
= dpseci_disable(priv
->mc_io
, 0, ls_dev
->mc_handle
);
4907 dev_err(dev
, "dpseci_disable() failed\n");
4911 err
= dpseci_is_enabled(priv
->mc_io
, 0, ls_dev
->mc_handle
, &enabled
);
4913 dev_err(dev
, "dpseci_is_enabled() failed\n");
4917 dev_dbg(dev
, "disable: %s\n", enabled
? "false" : "true");
4919 for (i
= 0; i
< priv
->num_pairs
; i
++) {
4920 ppriv
= per_cpu_ptr(priv
->ppriv
, i
);
4921 napi_disable(&ppriv
->napi
);
4922 netif_napi_del(&ppriv
->napi
);
4928 static struct list_head hash_list
;
4930 static int dpaa2_caam_probe(struct fsl_mc_device
*dpseci_dev
)
4933 struct dpaa2_caam_priv
*priv
;
4935 bool registered
= false;
4938 * There is no way to get CAAM endianness - there is no direct register
4939 * space access and MC f/w does not provide this attribute.
4940 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
4943 caam_little_end
= true;
4947 dev
= &dpseci_dev
->dev
;
4949 priv
= devm_kzalloc(dev
, sizeof(*priv
), GFP_KERNEL
);
4953 dev_set_drvdata(dev
, priv
);
4955 priv
->domain
= iommu_get_domain_for_dev(dev
);
4957 qi_cache
= kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE
,
4958 0, SLAB_CACHE_DMA
, NULL
);
4960 dev_err(dev
, "Can't allocate SEC cache\n");
4964 err
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(49));
4966 dev_err(dev
, "dma_set_mask_and_coherent() failed\n");
4970 /* Obtain a MC portal */
4971 err
= fsl_mc_portal_allocate(dpseci_dev
, 0, &priv
->mc_io
);
4974 err
= -EPROBE_DEFER
;
4976 dev_err(dev
, "MC portal allocation failed\n");
4981 priv
->ppriv
= alloc_percpu(*priv
->ppriv
);
4983 dev_err(dev
, "alloc_percpu() failed\n");
4985 goto err_alloc_ppriv
;
4988 /* DPSECI initialization */
4989 err
= dpaa2_dpseci_setup(dpseci_dev
);
4991 dev_err(dev
, "dpaa2_dpseci_setup() failed\n");
4992 goto err_dpseci_setup
;
4996 err
= dpaa2_dpseci_dpio_setup(priv
);
4998 if (err
!= -EPROBE_DEFER
)
4999 dev_err(dev
, "dpaa2_dpseci_dpio_setup() failed\n");
5000 goto err_dpio_setup
;
5003 /* DPSECI binding to DPIO */
5004 err
= dpaa2_dpseci_bind(priv
);
5006 dev_err(dev
, "dpaa2_dpseci_bind() failed\n");
5011 err
= dpaa2_dpseci_enable(priv
);
5013 dev_err(dev
, "dpaa2_dpseci_enable() failed\n");
5017 /* register crypto algorithms the device supports */
5018 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
5019 struct caam_skcipher_alg
*t_alg
= driver_algs
+ i
;
5020 u32 alg_sel
= t_alg
->caam
.class1_alg_type
& OP_ALG_ALGSEL_MASK
;
5022 /* Skip DES algorithms if not supported by device */
5023 if (!priv
->sec_attr
.des_acc_num
&&
5024 (alg_sel
== OP_ALG_ALGSEL_3DES
||
5025 alg_sel
== OP_ALG_ALGSEL_DES
))
5028 /* Skip AES algorithms if not supported by device */
5029 if (!priv
->sec_attr
.aes_acc_num
&&
5030 alg_sel
== OP_ALG_ALGSEL_AES
)
5033 /* Skip CHACHA20 algorithms if not supported by device */
5034 if (alg_sel
== OP_ALG_ALGSEL_CHACHA20
&&
5035 !priv
->sec_attr
.ccha_acc_num
)
5038 t_alg
->caam
.dev
= dev
;
5039 caam_skcipher_alg_init(t_alg
);
5041 err
= crypto_register_skcipher(&t_alg
->skcipher
);
5043 dev_warn(dev
, "%s alg registration failed: %d\n",
5044 t_alg
->skcipher
.base
.cra_driver_name
, err
);
5048 t_alg
->registered
= true;
5052 for (i
= 0; i
< ARRAY_SIZE(driver_aeads
); i
++) {
5053 struct caam_aead_alg
*t_alg
= driver_aeads
+ i
;
5054 u32 c1_alg_sel
= t_alg
->caam
.class1_alg_type
&
5056 u32 c2_alg_sel
= t_alg
->caam
.class2_alg_type
&
5059 /* Skip DES algorithms if not supported by device */
5060 if (!priv
->sec_attr
.des_acc_num
&&
5061 (c1_alg_sel
== OP_ALG_ALGSEL_3DES
||
5062 c1_alg_sel
== OP_ALG_ALGSEL_DES
))
5065 /* Skip AES algorithms if not supported by device */
5066 if (!priv
->sec_attr
.aes_acc_num
&&
5067 c1_alg_sel
== OP_ALG_ALGSEL_AES
)
5070 /* Skip CHACHA20 algorithms if not supported by device */
5071 if (c1_alg_sel
== OP_ALG_ALGSEL_CHACHA20
&&
5072 !priv
->sec_attr
.ccha_acc_num
)
5075 /* Skip POLY1305 algorithms if not supported by device */
5076 if (c2_alg_sel
== OP_ALG_ALGSEL_POLY1305
&&
5077 !priv
->sec_attr
.ptha_acc_num
)
5081 * Skip algorithms requiring message digests
5082 * if MD not supported by device.
5084 if ((c2_alg_sel
& ~OP_ALG_ALGSEL_SUBMASK
) == 0x40 &&
5085 !priv
->sec_attr
.md_acc_num
)
5088 t_alg
->caam
.dev
= dev
;
5089 caam_aead_alg_init(t_alg
);
5091 err
= crypto_register_aead(&t_alg
->aead
);
5093 dev_warn(dev
, "%s alg registration failed: %d\n",
5094 t_alg
->aead
.base
.cra_driver_name
, err
);
5098 t_alg
->registered
= true;
5102 dev_info(dev
, "algorithms registered in /proc/crypto\n");
5104 /* register hash algorithms the device supports */
5105 INIT_LIST_HEAD(&hash_list
);
5108 * Skip registration of any hashing algorithms if MD block
5111 if (!priv
->sec_attr
.md_acc_num
)
5114 for (i
= 0; i
< ARRAY_SIZE(driver_hash
); i
++) {
5115 struct caam_hash_alg
*t_alg
;
5116 struct caam_hash_template
*alg
= driver_hash
+ i
;
5118 /* register hmac version */
5119 t_alg
= caam_hash_alloc(dev
, alg
, true);
5120 if (IS_ERR(t_alg
)) {
5121 err
= PTR_ERR(t_alg
);
5122 dev_warn(dev
, "%s hash alg allocation failed: %d\n",
5123 alg
->driver_name
, err
);
5127 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
5129 dev_warn(dev
, "%s alg registration failed: %d\n",
5130 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
5134 list_add_tail(&t_alg
->entry
, &hash_list
);
5137 /* register unkeyed version */
5138 t_alg
= caam_hash_alloc(dev
, alg
, false);
5139 if (IS_ERR(t_alg
)) {
5140 err
= PTR_ERR(t_alg
);
5141 dev_warn(dev
, "%s alg allocation failed: %d\n",
5142 alg
->driver_name
, err
);
5146 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
5148 dev_warn(dev
, "%s alg registration failed: %d\n",
5149 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
5153 list_add_tail(&t_alg
->entry
, &hash_list
);
5156 if (!list_empty(&hash_list
))
5157 dev_info(dev
, "hash algorithms registered in /proc/crypto\n");
5162 dpaa2_dpseci_dpio_free(priv
);
5164 dpaa2_dpseci_free(priv
);
5166 free_percpu(priv
->ppriv
);
5168 fsl_mc_portal_free(priv
->mc_io
);
5170 kmem_cache_destroy(qi_cache
);
5175 static int __cold
dpaa2_caam_remove(struct fsl_mc_device
*ls_dev
)
5178 struct dpaa2_caam_priv
*priv
;
5182 priv
= dev_get_drvdata(dev
);
5184 for (i
= 0; i
< ARRAY_SIZE(driver_aeads
); i
++) {
5185 struct caam_aead_alg
*t_alg
= driver_aeads
+ i
;
5187 if (t_alg
->registered
)
5188 crypto_unregister_aead(&t_alg
->aead
);
5191 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
5192 struct caam_skcipher_alg
*t_alg
= driver_algs
+ i
;
5194 if (t_alg
->registered
)
5195 crypto_unregister_skcipher(&t_alg
->skcipher
);
5198 if (hash_list
.next
) {
5199 struct caam_hash_alg
*t_hash_alg
, *p
;
5201 list_for_each_entry_safe(t_hash_alg
, p
, &hash_list
, entry
) {
5202 crypto_unregister_ahash(&t_hash_alg
->ahash_alg
);
5203 list_del(&t_hash_alg
->entry
);
5208 dpaa2_dpseci_disable(priv
);
5209 dpaa2_dpseci_dpio_free(priv
);
5210 dpaa2_dpseci_free(priv
);
5211 free_percpu(priv
->ppriv
);
5212 fsl_mc_portal_free(priv
->mc_io
);
5213 kmem_cache_destroy(qi_cache
);
5218 int dpaa2_caam_enqueue(struct device
*dev
, struct caam_request
*req
)
5221 struct dpaa2_caam_priv
*priv
= dev_get_drvdata(dev
);
5222 struct dpaa2_caam_priv_per_cpu
*ppriv
;
5226 return PTR_ERR(req
);
5228 if (priv
->cscn_mem
) {
5229 dma_sync_single_for_cpu(priv
->dev
, priv
->cscn_dma
,
5232 if (unlikely(dpaa2_cscn_state_congested(priv
->cscn_mem_aligned
))) {
5233 dev_dbg_ratelimited(dev
, "Dropping request\n");
5238 dpaa2_fl_set_flc(&req
->fd_flt
[1], req
->flc_dma
);
5240 req
->fd_flt_dma
= dma_map_single(dev
, req
->fd_flt
, sizeof(req
->fd_flt
),
5242 if (dma_mapping_error(dev
, req
->fd_flt_dma
)) {
5243 dev_err(dev
, "DMA mapping error for QI enqueue request\n");
5247 memset(&fd
, 0, sizeof(fd
));
5248 dpaa2_fd_set_format(&fd
, dpaa2_fd_list
);
5249 dpaa2_fd_set_addr(&fd
, req
->fd_flt_dma
);
5250 dpaa2_fd_set_len(&fd
, dpaa2_fl_get_len(&req
->fd_flt
[1]));
5251 dpaa2_fd_set_flc(&fd
, req
->flc_dma
);
5253 ppriv
= this_cpu_ptr(priv
->ppriv
);
5254 for (i
= 0; i
< (priv
->dpseci_attr
.num_tx_queues
<< 1); i
++) {
5255 err
= dpaa2_io_service_enqueue_fq(ppriv
->dpio
, ppriv
->req_fqid
,
5263 if (unlikely(err
)) {
5264 dev_err_ratelimited(dev
, "Error enqueuing frame: %d\n", err
);
5268 return -EINPROGRESS
;
5271 dma_unmap_single(dev
, req
->fd_flt_dma
, sizeof(req
->fd_flt
),
5275 EXPORT_SYMBOL(dpaa2_caam_enqueue
);
5277 static const struct fsl_mc_device_id dpaa2_caam_match_id_table
[] = {
5279 .vendor
= FSL_MC_VENDOR_FREESCALE
,
5280 .obj_type
= "dpseci",
5285 static struct fsl_mc_driver dpaa2_caam_driver
= {
5287 .name
= KBUILD_MODNAME
,
5288 .owner
= THIS_MODULE
,
5290 .probe
= dpaa2_caam_probe
,
5291 .remove
= dpaa2_caam_remove
,
5292 .match_id_table
= dpaa2_caam_match_id_table
5295 MODULE_LICENSE("Dual BSD/GPL");
5296 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5297 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5299 module_fsl_mc_driver(dpaa2_caam_driver
);